diff --git a/.agents/skills/component-refactoring/SKILL.md b/.agents/skills/component-refactoring/SKILL.md index 98a94592ab..a7cae67e8f 100644 --- a/.agents/skills/component-refactoring/SKILL.md +++ b/.agents/skills/component-refactoring/SKILL.md @@ -63,7 +63,7 @@ pnpm analyze-component --json ```typescript // ❌ Before: Complex state logic in component -const Configuration: FC = () => { +function Configuration() { const [modelConfig, setModelConfig] = useState(...) const [datasetConfigs, setDatasetConfigs] = useState(...) const [completionParams, setCompletionParams] = useState({}) @@ -85,7 +85,7 @@ export const useModelConfig = (appId: string) => { } // Component becomes cleaner -const Configuration: FC = () => { +function Configuration() { const { modelConfig, setModelConfig } = useModelConfig(appId) return
...
} @@ -189,8 +189,6 @@ const Template = useMemo(() => { **Dify Convention**: - This skill is for component decomposition, not query/mutation design. -- When refactoring data fetching, follow `web/AGENTS.md`. -- Use `frontend-query-mutation` for contracts, query shape, data-fetching wrappers, query/mutation call-site patterns, conditional queries, invalidation, and mutation error handling. - Do not introduce deprecated `useInvalid` / `useReset`. - Do not add thin passthrough `useQuery` wrappers during refactoring; only extract a custom hook when it truly orchestrates multiple queries/mutations or shared derived state. diff --git a/.agents/skills/component-refactoring/references/complexity-patterns.md b/.agents/skills/component-refactoring/references/complexity-patterns.md index 5a0a268f38..2873630d4b 100644 --- a/.agents/skills/component-refactoring/references/complexity-patterns.md +++ b/.agents/skills/component-refactoring/references/complexity-patterns.md @@ -60,8 +60,10 @@ const Template = useMemo(() => { **After** (complexity: ~3): ```typescript +import type { ComponentType } from 'react' + // Define lookup table outside component -const TEMPLATE_MAP: Record>> = { +const TEMPLATE_MAP: Record>> = { [AppModeEnum.CHAT]: { [LanguagesSupported[1]]: TemplateChatZh, [LanguagesSupported[7]]: TemplateChatJa, diff --git a/.agents/skills/component-refactoring/references/component-splitting.md b/.agents/skills/component-refactoring/references/component-splitting.md index 78a3389100..81c007e005 100644 --- a/.agents/skills/component-refactoring/references/component-splitting.md +++ b/.agents/skills/component-refactoring/references/component-splitting.md @@ -65,10 +65,10 @@ interface ConfigurationHeaderProps { onPublish: () => void } -const ConfigurationHeader: FC = ({ +function ConfigurationHeader({ isAdvancedMode, onPublish, -}) => { +}: ConfigurationHeaderProps) { const { t } = useTranslation() return ( @@ -136,7 +136,7 @@ const AppInfo = () => { } // ✅ After: Separate view components -const AppInfoExpanded: FC = ({ appDetail, onAction }) => { +function AppInfoExpanded({ appDetail, onAction }: AppInfoViewProps) { return (
{/* Clean, focused expanded view */} @@ -144,7 +144,7 @@ const AppInfoExpanded: FC = ({ appDetail, onAction }) => { ) } -const AppInfoCollapsed: FC = ({ appDetail, onAction }) => { +function AppInfoCollapsed({ appDetail, onAction }: AppInfoViewProps) { return (
{/* Clean, focused collapsed view */} @@ -203,12 +203,12 @@ interface AppInfoModalsProps { onSuccess: () => void } -const AppInfoModals: FC = ({ +function AppInfoModals({ appDetail, activeModal, onClose, onSuccess, -}) => { +}: AppInfoModalsProps) { const handleEdit = async (data) => { /* logic */ } const handleDuplicate = async (data) => { /* logic */ } const handleDelete = async () => { /* logic */ } @@ -296,7 +296,7 @@ interface OperationItemProps { onAction: (id: string) => void } -const OperationItem: FC = ({ operation, onAction }) => { +function OperationItem({ operation, onAction }: OperationItemProps) { return (
{operation.icon} @@ -435,7 +435,7 @@ interface ChildProps { onSubmit: () => void } -const Child: FC = ({ value, onChange, onSubmit }) => { +function Child({ value, onChange, onSubmit }: ChildProps) { return (
onChange(e.target.value)} /> diff --git a/.agents/skills/component-refactoring/references/hook-extraction.md b/.agents/skills/component-refactoring/references/hook-extraction.md index 0d567eb2a6..6fad2c8885 100644 --- a/.agents/skills/component-refactoring/references/hook-extraction.md +++ b/.agents/skills/component-refactoring/references/hook-extraction.md @@ -112,13 +112,13 @@ export const useModelConfig = ({ ```typescript // Before: 50+ lines of state management -const Configuration: FC = () => { +function Configuration() { const [modelConfig, setModelConfig] = useState(...) // ... lots of related state and effects } // After: Clean component -const Configuration: FC = () => { +function Configuration() { const { modelConfig, setModelConfig, @@ -159,8 +159,6 @@ const Configuration: FC = () => { When hook extraction touches query or mutation code, do not use this reference as the source of truth for data-layer patterns. -- Follow `web/AGENTS.md` first. -- Use `frontend-query-mutation` for contracts, query shape, data-fetching wrappers, query/mutation call-site patterns, conditional queries, invalidation, and mutation error handling. - Do not introduce deprecated `useInvalid` / `useReset`. - Do not extract thin passthrough `useQuery` hooks; only extract orchestration hooks. diff --git a/.agents/skills/e2e-cucumber-playwright/SKILL.md b/.agents/skills/e2e-cucumber-playwright/SKILL.md index de6b58f26d..dd7d204678 100644 --- a/.agents/skills/e2e-cucumber-playwright/SKILL.md +++ b/.agents/skills/e2e-cucumber-playwright/SKILL.md @@ -23,7 +23,7 @@ Use this skill for Dify's repository-level E2E suite in `e2e/`. Use [`e2e/AGENTS - `e2e/scripts/run-cucumber.ts` and `e2e/cucumber.config.ts` when tags or execution flow matter 3. Read [`references/playwright-best-practices.md`](references/playwright-best-practices.md) only when locator, assertion, isolation, or waiting choices are involved. 4. Read [`references/cucumber-best-practices.md`](references/cucumber-best-practices.md) only when scenario wording, step granularity, tags, or expression design are involved. -5. Re-check official docs with Context7 before introducing a new Playwright or Cucumber pattern. +5. Re-check official Playwright or Cucumber docs with the available documentation tools before introducing a new framework pattern. ## Local Rules diff --git a/.agents/skills/frontend-code-review/references/performance.md b/.agents/skills/frontend-code-review/references/performance.md index 2d60072f5c..0c33db46d0 100644 --- a/.agents/skills/frontend-code-review/references/performance.md +++ b/.agents/skills/frontend-code-review/references/performance.md @@ -9,18 +9,18 @@ Category: Performance When rendering React Flow, prefer `useNodes`/`useEdges` for UI consumption and rely on `useStoreApi` inside callbacks that mutate or read node/edge state. Avoid manually pulling Flow data outside of these hooks. -## Complex prop memoization +## Complex prop stability -IsUrgent: True +IsUrgent: False Category: Performance ### Description -Wrap complex prop values (objects, arrays, maps) in `useMemo` prior to passing them into child components to guarantee stable references and prevent unnecessary renders. +Only require stable object, array, or map props when there is a clear reason: the child is memoized, the value participates in effect/query dependencies, the value is part of a stable-reference API contract, or profiling/local behavior shows avoidable re-renders. Do not request `useMemo` for every inline object by default; `how-to-write-component` treats memoization as a targeted optimization. Update this file when adding, editing, or removing Performance rules so the catalog remains accurate. -Wrong: +Risky: ```tsx ``` -Right: +Better when stable identity matters: ```tsx const config = useMemo(() => ({ diff --git a/.agents/skills/frontend-query-mutation/SKILL.md b/.agents/skills/frontend-query-mutation/SKILL.md deleted file mode 100644 index 49888bdb66..0000000000 --- a/.agents/skills/frontend-query-mutation/SKILL.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -name: frontend-query-mutation -description: Guide for implementing Dify frontend query and mutation patterns with TanStack Query and oRPC. Trigger when creating or updating contracts in web/contract, wiring router composition, consuming consoleQuery or marketplaceQuery in components or services, deciding whether to call queryOptions() directly or extract a helper or use-* hook, handling conditional queries, cache invalidation, mutation error handling, or migrating legacy service calls to contract-first query and mutation helpers. ---- - -# Frontend Query & Mutation - -## Intent - -- Keep contract as the single source of truth in `web/contract/*`. -- Prefer contract-shaped `queryOptions()` and `mutationOptions()`. -- Keep invalidation and mutation flow knowledge in the service layer. -- Keep abstractions minimal to preserve TypeScript inference. - -## Workflow - -1. Identify the change surface. - - Read `references/contract-patterns.md` for contract files, router composition, client helpers, and query or mutation call-site shape. - - Read `references/runtime-rules.md` for conditional queries, invalidation, error handling, and legacy migrations. - - Read both references when a task spans contract shape and runtime behavior. -2. Implement the smallest abstraction that fits the task. - - Default to direct `useQuery(...)` or `useMutation(...)` calls with oRPC helpers at the call site. - - Extract a small shared query helper only when multiple call sites share the same extra options. - - Create `web/service/use-{domain}.ts` only for orchestration or shared domain behavior. -3. Preserve Dify conventions. - - Keep contract inputs in `{ params, query?, body? }` shape. - - Bind invalidation in the service-layer mutation definition. - - Prefer `mutate(...)`; use `mutateAsync(...)` only when Promise semantics are required. - -## Files Commonly Touched - -- `web/contract/console/*.ts` -- `web/contract/marketplace.ts` -- `web/contract/router.ts` -- `web/service/client.ts` -- `web/service/use-*.ts` -- component and hook call sites using `consoleQuery` or `marketplaceQuery` - -## References - -- Use `references/contract-patterns.md` for contract shape, router registration, query and mutation helpers, and anti-patterns that degrade inference. -- Use `references/runtime-rules.md` for conditional queries, invalidation, `mutate` versus `mutateAsync`, and legacy migration rules. - -Treat this skill as the single query and mutation entry point for Dify frontend work. Keep detailed rules in the reference files instead of duplicating them in project docs. diff --git a/.agents/skills/frontend-query-mutation/agents/openai.yaml b/.agents/skills/frontend-query-mutation/agents/openai.yaml deleted file mode 100644 index 87f7ae6ea4..0000000000 --- a/.agents/skills/frontend-query-mutation/agents/openai.yaml +++ /dev/null @@ -1,4 +0,0 @@ -interface: - display_name: "Frontend Query & Mutation" - short_description: "Dify TanStack Query and oRPC patterns" - default_prompt: "Use this skill when implementing or reviewing Dify frontend contracts, query and mutation call sites, conditional queries, invalidation, or legacy query/mutation migrations." diff --git a/.agents/skills/frontend-query-mutation/references/contract-patterns.md b/.agents/skills/frontend-query-mutation/references/contract-patterns.md deleted file mode 100644 index 08016ed2cc..0000000000 --- a/.agents/skills/frontend-query-mutation/references/contract-patterns.md +++ /dev/null @@ -1,98 +0,0 @@ -# Contract Patterns - -## Table of Contents - -- Intent -- Minimal structure -- Core workflow -- Query usage decision rule -- Mutation usage decision rule -- Anti-patterns -- Contract rules -- Type export - -## Intent - -- Keep contract as the single source of truth in `web/contract/*`. -- Default query usage to call-site `useQuery(consoleQuery|marketplaceQuery.xxx.queryOptions(...))` when endpoint behavior maps 1:1 to the contract. -- Keep abstractions minimal and preserve TypeScript inference. - -## Minimal Structure - -```text -web/contract/ -├── base.ts -├── router.ts -├── marketplace.ts -└── console/ - ├── billing.ts - └── ...other domains -web/service/client.ts -``` - -## Core Workflow - -1. Define contract in `web/contract/console/{domain}.ts` or `web/contract/marketplace.ts`. - - Use `base.route({...}).output(type<...>())` as the baseline. - - Add `.input(type<...>())` only when the request has `params`, `query`, or `body`. - - For `GET` without input, omit `.input(...)`; do not use `.input(type())`. -2. Register contract in `web/contract/router.ts`. - - Import directly from domain files and nest by API prefix. -3. Consume from UI call sites via oRPC query utilities. - -```typescript -import { useQuery } from '@tanstack/react-query' -import { consoleQuery } from '@/service/client' - -const invoiceQuery = useQuery(consoleQuery.billing.invoices.queryOptions({ - staleTime: 5 * 60 * 1000, - throwOnError: true, - select: invoice => invoice.url, -})) -``` - -## Query Usage Decision Rule - -1. Default to direct `*.queryOptions(...)` usage at the call site. -2. If 3 or more call sites share the same extra options, extract a small query helper, not a `use-*` passthrough hook. -3. Create `web/service/use-{domain}.ts` only for orchestration. - - Combine multiple queries or mutations. - - Share domain-level derived state or invalidation helpers. - -```typescript -const invoicesBaseQueryOptions = () => - consoleQuery.billing.invoices.queryOptions({ retry: false }) - -const invoiceQuery = useQuery({ - ...invoicesBaseQueryOptions(), - throwOnError: true, -}) -``` - -## Mutation Usage Decision Rule - -1. Default to mutation helpers from `consoleQuery` or `marketplaceQuery`, for example `useMutation(consoleQuery.billing.bindPartnerStack.mutationOptions(...))`. -2. If the mutation flow is heavily custom, use oRPC clients as `mutationFn`, for example `consoleClient.xxx` or `marketplaceClient.xxx`, instead of handwritten non-oRPC mutation logic. - -## Anti-Patterns - -- Do not wrap `useQuery` with `options?: Partial`. -- Do not split local `queryKey` and `queryFn` when oRPC `queryOptions` already exists and fits the use case. -- Do not create thin `use-*` passthrough hooks for a single endpoint. -- These patterns can degrade inference, especially around `throwOnError` and `select`, and add unnecessary indirection. - -## Contract Rules - -- Input structure: always use `{ params, query?, body? }`. -- No-input `GET`: omit `.input(...)`; do not use `.input(type())`. -- Path params: use `{paramName}` in the path and match it in the `params` object. -- Router nesting: group by API prefix, for example `/billing/*` becomes `billing: {}`. -- No barrel files: import directly from specific files. -- Types: import from `@/types/` and use the `type()` helper. -- Mutations: prefer `mutationOptions`; use explicit `mutationKey` mainly for defaults, filtering, and devtools. - -## Type Export - -```typescript -export type ConsoleInputs = InferContractRouterInputs -``` diff --git a/.agents/skills/frontend-query-mutation/references/runtime-rules.md b/.agents/skills/frontend-query-mutation/references/runtime-rules.md deleted file mode 100644 index 73d6fbdded..0000000000 --- a/.agents/skills/frontend-query-mutation/references/runtime-rules.md +++ /dev/null @@ -1,130 +0,0 @@ -# Runtime Rules - -## Table of Contents - -- Conditional queries -- Cache invalidation -- Key API guide -- `mutate` vs `mutateAsync` -- Legacy migration - -## Conditional Queries - -Prefer contract-shaped `queryOptions(...)`. -When required input is missing, prefer `input: skipToken` instead of placeholder params or non-null assertions. -Use `enabled` only for extra business gating after the input itself is already valid. - -```typescript -import { skipToken, useQuery } from '@tanstack/react-query' - -// Disable the query by skipping input construction. -function useAccessMode(appId: string | undefined) { - return useQuery(consoleQuery.accessControl.appAccessMode.queryOptions({ - input: appId - ? { params: { appId } } - : skipToken, - })) -} - -// Avoid runtime-only guards that bypass type checking. -function useBadAccessMode(appId: string | undefined) { - return useQuery(consoleQuery.accessControl.appAccessMode.queryOptions({ - input: { params: { appId: appId! } }, - enabled: !!appId, - })) -} -``` - -## Cache Invalidation - -Bind invalidation in the service-layer mutation definition. -Components may add UI feedback in call-site callbacks, but they should not decide which queries to invalidate. - -Use: - -- `.key()` for namespace or prefix invalidation -- `.queryKey(...)` only for exact cache reads or writes such as `getQueryData` and `setQueryData` -- `queryClient.invalidateQueries(...)` in mutation `onSuccess` - -Do not use deprecated `useInvalid` from `use-base.ts`. - -```typescript -// Service layer owns cache invalidation. -export const useUpdateAccessMode = () => { - const queryClient = useQueryClient() - - return useMutation(consoleQuery.accessControl.updateAccessMode.mutationOptions({ - onSuccess: () => { - queryClient.invalidateQueries({ - queryKey: consoleQuery.accessControl.appWhitelistSubjects.key(), - }) - }, - })) -} - -// Component only adds UI behavior. -updateAccessMode({ appId, mode }, { - onSuccess: () => toast.success('...'), -}) - -// Avoid putting invalidation knowledge in the component. -mutate({ appId, mode }, { - onSuccess: () => { - queryClient.invalidateQueries({ - queryKey: consoleQuery.accessControl.appWhitelistSubjects.key(), - }) - }, -}) -``` - -## Key API Guide - -- `.key(...)` - - Use for partial matching operations. - - Prefer it for invalidation, refetch, and cancel patterns. - - Example: `queryClient.invalidateQueries({ queryKey: consoleQuery.billing.key() })` -- `.queryKey(...)` - - Use for a specific query's full key. - - Prefer it for exact cache addressing and direct reads or writes. -- `.mutationKey(...)` - - Use for a specific mutation's full key. - - Prefer it for mutation defaults registration, mutation-status filtering, and devtools grouping. - -## `mutate` vs `mutateAsync` - -Prefer `mutate` by default. -Use `mutateAsync` only when Promise semantics are truly required, such as parallel mutations or sequential steps with result dependencies. - -Rules: - -- Event handlers should usually call `mutate(...)` with `onSuccess` or `onError`. -- Every `await mutateAsync(...)` must be wrapped in `try/catch`. -- Do not use `mutateAsync` when callbacks already express the flow clearly. - -```typescript -// Default case. -mutation.mutate(data, { - onSuccess: result => router.push(result.url), -}) - -// Promise semantics are required. -try { - const order = await createOrder.mutateAsync(orderData) - await confirmPayment.mutateAsync({ orderId: order.id, token }) - router.push(`/orders/${order.id}`) -} -catch (error) { - toast.error(error instanceof Error ? error.message : 'Unknown error') -} -``` - -## Legacy Migration - -When touching old code, migrate it toward these rules: - -| Old pattern | New pattern | -|---|---| -| `useInvalid(key)` in service layer | `queryClient.invalidateQueries(...)` inside mutation `onSuccess` | -| component-triggered invalidation after mutation | move invalidation into the service-layer mutation definition | -| imperative fetch plus manual invalidation | wrap it in `useMutation(...mutationOptions(...))` | -| `await mutateAsync()` without `try/catch` | switch to `mutate(...)` or add `try/catch` | diff --git a/.agents/skills/frontend-testing/SKILL.md b/.agents/skills/frontend-testing/SKILL.md index 105c979c58..21c46d75bc 100644 --- a/.agents/skills/frontend-testing/SKILL.md +++ b/.agents/skills/frontend-testing/SKILL.md @@ -5,7 +5,7 @@ description: Generate Vitest + React Testing Library tests for Dify frontend com # Dify Frontend Testing Skill -This skill enables Claude to generate high-quality, comprehensive frontend tests for the Dify project following established conventions and best practices. +This skill enables Codex to generate high-quality, comprehensive frontend tests for the Dify project following established conventions and best practices. > **⚠️ Authoritative Source**: This skill is derived from `web/docs/test.md`. Use Vitest mock/timer APIs (`vi.*`). @@ -24,35 +24,27 @@ Apply this skill when the user: **Do NOT apply** when: - User is asking about backend/API tests (Python/pytest) -- User is asking about E2E tests (Playwright/Cypress) +- User is asking about E2E tests (Cucumber + Playwright under `e2e/`) - User is only asking conceptual questions without code context ## Quick Reference -### Tech Stack - -| Tool | Version | Purpose | -|------|---------|---------| -| Vitest | 4.0.16 | Test runner | -| React Testing Library | 16.0 | Component testing | -| jsdom | - | Test environment | -| nock | 14.0 | HTTP mocking | -| TypeScript | 5.x | Type safety | - ### Key Commands +Run these commands from `web/`. From the repository root, prefix them with `pnpm -C web`. + ```bash # Run all tests pnpm test # Watch mode -pnpm test:watch +pnpm test --watch # Run specific file pnpm test path/to/file.spec.tsx # Generate coverage report -pnpm test:coverage +pnpm test --coverage # Analyze component complexity pnpm analyze-component @@ -228,7 +220,10 @@ Every test should clearly separate: ### 2. Black-Box Testing - Test observable behavior, not implementation details -- Use semantic queries (getByRole, getByLabelText) +- Use semantic queries (`getByRole` with accessible `name`, `getByLabelText`, `getByPlaceholderText`, `getByText`, and scoped `within(...)`) +- Treat `getByTestId` as a last resort. If a control cannot be found by role/name, label, landmark, or dialog scope, fix the component accessibility first instead of adding or relying on `data-testid`. +- Remove production `data-testid` attributes when semantic selectors can cover the behavior. Keep them only for non-visual mocked boundaries, editor/browser shims such as Monaco, canvas/chart output, or third-party widgets with no accessible DOM in the test environment. +- Do not assert decorative icons by test id. Assert the named control that contains them, or mark decorative icons `aria-hidden`. - Avoid testing internal state directly - **Prefer pattern matching over hardcoded strings** in assertions: diff --git a/.agents/skills/frontend-testing/references/mocking.md b/.agents/skills/frontend-testing/references/mocking.md index 8c2f1c0c58..7723e4df21 100644 --- a/.agents/skills/frontend-testing/references/mocking.md +++ b/.agents/skills/frontend-testing/references/mocking.md @@ -56,7 +56,7 @@ See [Zustand Store Testing](#zustand-store-testing) section for full details. | Location | Purpose | |----------|---------| -| `web/vitest.setup.ts` | Global mocks shared by all tests (`react-i18next`, `next/image`, `zustand`) | +| `web/vitest.setup.ts` | Global mocks shared by all tests (`react-i18next`, `zustand`, clipboard, FloatingPortal, Monaco, localStorage`) | | `web/__mocks__/zustand.ts` | Zustand mock implementation (auto-resets stores after each test) | | `web/__mocks__/` | Reusable mock factories shared across multiple test files | | Test file | Test-specific mocks, inline with `vi.mock()` | @@ -216,28 +216,21 @@ describe('Component', () => { }) ``` -### 5. HTTP Mocking with Nock +### 5. HTTP and `fetch` Mocking ```typescript -import nock from 'nock' - -const GITHUB_HOST = 'https://api.github.com' -const GITHUB_PATH = '/repos/owner/repo' - -const mockGithubApi = (status: number, body: Record, delayMs = 0) => { - return nock(GITHUB_HOST) - .get(GITHUB_PATH) - .delay(delayMs) - .reply(status, body) -} - describe('GithubComponent', () => { - afterEach(() => { - nock.cleanAll() + beforeEach(() => { + vi.clearAllMocks() }) it('should display repo info', async () => { - mockGithubApi(200, { name: 'dify', stars: 1000 }) + vi.mocked(globalThis.fetch).mockResolvedValueOnce( + new Response(JSON.stringify({ name: 'dify', stars: 1000 }), { + status: 200, + headers: { 'Content-Type': 'application/json' }, + }), + ) render() @@ -247,7 +240,12 @@ describe('GithubComponent', () => { }) it('should handle API error', async () => { - mockGithubApi(500, { message: 'Server error' }) + vi.mocked(globalThis.fetch).mockResolvedValueOnce( + new Response(JSON.stringify({ message: 'Server error' }), { + status: 500, + headers: { 'Content-Type': 'application/json' }, + }), + ) render() @@ -258,6 +256,8 @@ describe('GithubComponent', () => { }) ``` +Prefer mocking `@/service/*` modules or spying on `global.fetch` / `ky` clients with deterministic responses. Do not introduce an HTTP interception dependency such as `nock` or MSW unless it is already declared in the workspace or adding it is part of the task. + ### 6. Context Providers ```typescript @@ -332,7 +332,7 @@ const renderWithQueryClient = (ui: React.ReactElement) => { 1. **Don't mock Zustand store modules** - Use real stores with `setState()` 1. Don't mock components you can import directly 1. Don't create overly simplified mocks that miss conditional logic -1. Don't forget to clean up nock after each test +1. Don't leave HTTP mocks or service mock state leaking between tests 1. Don't use `any` types in mocks without necessity ### Mock Decision Tree diff --git a/.agents/skills/frontend-testing/references/workflow.md b/.agents/skills/frontend-testing/references/workflow.md index bc4ed8285a..27755d42a7 100644 --- a/.agents/skills/frontend-testing/references/workflow.md +++ b/.agents/skills/frontend-testing/references/workflow.md @@ -227,12 +227,12 @@ Failing tests compound: **Fix failures immediately before proceeding.** -## Integration with Claude's Todo Feature +## Integration with Codex's Todo Feature -When using Claude for multi-file testing: +When using Codex for multi-file testing: -1. **Ask Claude to create a todo list** before starting -1. **Request one file at a time** or ensure Claude processes incrementally +1. **Create a todo list** before starting +1. **Process one file at a time** 1. **Verify each test passes** before asking for the next 1. **Mark todos complete** as you progress diff --git a/.agents/skills/how-to-write-component/SKILL.md b/.agents/skills/how-to-write-component/SKILL.md new file mode 100644 index 0000000000..ac77112993 --- /dev/null +++ b/.agents/skills/how-to-write-component/SKILL.md @@ -0,0 +1,71 @@ +--- +name: how-to-write-component +description: React/TypeScript component style guide. Use when writing, refactoring, or reviewing React components, especially around props typing, state boundaries, shared local state with Jotai atoms, API types, query/mutation contracts, navigation, memoization, wrappers, and empty-state handling. +--- + +# How To Write A Component + +Use this as the decision guide for React/TypeScript component structure. Existing code is reference material, not automatic precedent; when it conflicts with these rules, adapt the approach instead of reproducing the violation. + +## Core Defaults + +- Search before adding UI, hooks, helpers, or styling patterns. Reuse existing base components, feature components, hooks, utilities, and design styles when they fit. +- Group code by feature workflow, route, or ownership area: components, hooks, local types, query helpers, atoms, constants, and small utilities should live near the code that changes with them. +- Promote code to shared only when multiple verticals need the same stable primitive. Otherwise keep it local and compose shared primitives inside the owning feature. +- Use Tailwind CSS v4.1+ rules via the `tailwind-css-rules` skill. Prefer v4 utilities, `gap`, `text-size/line-height`, `min-h-dvh`, and avoid deprecated utilities and `@apply`. + +## Ownership + +- Put local state, queries, mutations, handlers, and derived UI data in the lowest component that uses them. Extract a purpose-built owner component only when the logic has no natural home. +- Repeated TanStack query calls in sibling components are acceptable when each component independently consumes the data. Do not hoist a query only because it is duplicated; TanStack Query handles deduplication and cache sharing. +- Hoist state, queries, or callbacks to a parent only when the parent consumes the data, coordinates shared loading/error/empty UI, needs one consistent snapshot, or owns a workflow spanning children. +- Avoid prop drilling. One pass-through layer is acceptable; repeated forwarding means ownership should move down or into feature-scoped Jotai UI state. Keep server/cache state in query and API data flow. +- Keep callbacks in a parent only for workflow coordination such as form submission, shared selection, batch behavior, or navigation. Otherwise let the child or row own its action. +- Prefer uncontrolled DOM state and CSS variables before adding controlled props. + +## Components, Props, And Types + +- Type component signatures directly; do not use `FC` or `React.FC`. +- Prefer `function` for top-level components and module helpers. Use arrow functions for local callbacks, handlers, and lambda-style APIs. +- Prefer named exports. Use default exports only where the framework requires them, such as Next.js route files. +- Type simple one-off props inline. Use a named `Props` type only when reused, exported, complex, or clearer. +- Use API-generated or API-returned types at component boundaries. Keep small UI conversion helpers beside the component that needs them. +- Name values by their domain role and backend API contract, and keep that name stable across the call chain, especially IDs like `appInstanceId`. Normalize framework or route params at the boundary. +- Keep fallback and invariant checks at the lowest component that already handles that state; callers should pass raw values through instead of duplicating checks. + +## Queries And Mutations + +- Keep `web/contract/*` as the single source of truth for API shape; follow existing domain/router patterns and the `{ params, query?, body? }` input shape. +- Consume queries directly with `useQuery(consoleQuery.xxx.queryOptions(...))` or `useQuery(marketplaceQuery.xxx.queryOptions(...))`. +- Avoid pass-through hooks and thin `web/service/use-*` wrappers that only rename `queryOptions()` or `mutationOptions()`. Extract a small `queryOptions` helper only when repeated call-site options justify it. +- Keep feature hooks for real orchestration, workflow state, or shared domain behavior. +- For missing required query input, use `input: skipToken`; use `enabled` only for extra business gating after the input is valid. +- Consume mutations directly with `useMutation(consoleQuery.xxx.mutationOptions(...))` or `useMutation(marketplaceQuery.xxx.mutationOptions(...))`; use oRPC clients as `mutationFn` only for custom flows. +- Put shared cache behavior in `createTanstackQueryUtils(...experimental_defaults...)`; components may add UI feedback callbacks, but should not own shared invalidation rules. +- Do not use deprecated `useInvalid` or `useReset`. +- Prefer `mutate(...)`; use `mutateAsync(...)` only when Promise semantics are required, and wrap awaited calls in `try/catch`. + +## Component Boundaries + +- Use the first level below a page or tab to organize independent page sections when it adds real structure. This layer is layout/semantic first, not automatically the data owner. +- Split deeper components by the data and state each layer actually needs. Each component should access only necessary data, and ownership should stay at the lowest consumer. +- Keep cohesive forms, menu bodies, and one-off helpers local unless they need their own state, reuse, or semantic boundary. +- Separate hidden secondary surfaces from the trigger's main flow. For dialogs, dropdowns, popovers, and similar branches, extract a small local component that owns the trigger, open state, and hidden content when it would obscure the parent flow. +- Preserve composability by separating behavior ownership from layout ownership. A dropdown action may own its trigger, open state, and menu content; the caller owns placement such as slots, offsets, and alignment. +- Avoid unnecessary DOM hierarchy. Do not add wrapper elements unless they provide layout, semantics, accessibility, state ownership, or integration with a library API; prefer fragments or styling an existing element when possible. +- Avoid shallow wrappers and prop renaming unless the wrapper adds validation, orchestration, error handling, state ownership, or a real semantic boundary. + +## You Might Not Need An Effect + +- Use Effects only to synchronize with external systems such as browser APIs, non-React widgets, subscriptions, timers, analytics that must run because the component was shown, or imperative DOM integration. +- Do not use Effects to transform props or state for rendering. Calculate derived values during render, and use `useMemo` only when the calculation is actually expensive. +- Do not use Effects to handle user actions. Put action-specific logic in the event handler where the cause is known. +- Do not use Effects to copy one state value into another state value representing the same concept. Pick one source of truth and derive the rest during render. +- Do not reset or adjust state from props with an Effect. Prefer a `key` reset, storing a stable ID and deriving the selected object, or guarded same-component render-time adjustment when truly necessary. +- Prefer framework data APIs or TanStack Query for data fetching instead of writing request Effects in components. +- If an Effect still seems necessary, first name the external system it synchronizes with. If there is no external system, remove the Effect and restructure the state or event flow. + +## Navigation And Performance + +- Prefer `Link` for normal navigation. Use router APIs only for command-flow side effects such as mutation success, guarded redirects, or form submission. +- Avoid `memo`, `useMemo`, and `useCallback` unless there is a clear performance reason. diff --git a/.agents/skills/tailwind-css-rules/SKILL.md b/.agents/skills/tailwind-css-rules/SKILL.md new file mode 100644 index 0000000000..3528548036 --- /dev/null +++ b/.agents/skills/tailwind-css-rules/SKILL.md @@ -0,0 +1,367 @@ +--- +name: tailwind-css-rules +description: Tailwind CSS v4.1+ rules and best practices. Use when writing, reviewing, refactoring, or upgrading Tailwind CSS classes and styles, especially v4 utility migrations, layout spacing, typography, responsive variants, dark mode, gradients, CSS variables, and component styling. +--- + +# Tailwind CSS Rules and Best Practices + +## Core Principles + +- **Always use Tailwind CSS v4.1+** - Ensure the codebase is using the latest version +- **Do not use deprecated or removed utilities** - ALWAYS use the replacement +- **Never use `@apply`** - Use CSS variables, the `--spacing()` function, or framework components instead +- **Check for redundant classes** - Remove any classes that aren't necessary +- **Group elements logically** to simplify responsive tweaks later + +## Upgrading to Tailwind CSS v4 + +### Before Upgrading + +- **Always read the upgrade documentation first** - Read https://tailwindcss.com/docs/upgrade-guide and https://tailwindcss.com/blog/tailwindcss-v4 before starting an upgrade. +- Ensure the git repository is in a clean state before starting + +### Upgrade Process + +1. Run the upgrade command: `npx @tailwindcss/upgrade@latest` for both major and minor updates +2. The tool will convert JavaScript config files to the new CSS format +3. Review all changes extensively to clean up any false positives +4. Test thoroughly across your application + +## Breaking Changes Reference + +### Removed Utilities (NEVER use these in v4) + +| ❌ Deprecated | ✅ Replacement | +| ----------------------- | ------------------------------------------------- | +| `bg-opacity-*` | Use opacity modifiers like `bg-black/50` | +| `text-opacity-*` | Use opacity modifiers like `text-black/50` | +| `border-opacity-*` | Use opacity modifiers like `border-black/50` | +| `divide-opacity-*` | Use opacity modifiers like `divide-black/50` | +| `ring-opacity-*` | Use opacity modifiers like `ring-black/50` | +| `placeholder-opacity-*` | Use opacity modifiers like `placeholder-black/50` | +| `flex-shrink-*` | `shrink-*` | +| `flex-grow-*` | `grow-*` | +| `overflow-ellipsis` | `text-ellipsis` | +| `decoration-slice` | `box-decoration-slice` | +| `decoration-clone` | `box-decoration-clone` | + +### Renamed Utilities + +Use the v4 name when migrating code that still carries Tailwind v3 semantics. Do not blanket-replace existing v4 classes: classes such as `rounded-sm`, `shadow-sm`, `ring-1`, and `ring-2` are valid in this codebase when they intentionally represent the current design scale. + +| ❌ v3 pattern | ✅ v4 pattern | +| ------------------- | -------------------------------------------------- | +| `bg-gradient-*` | `bg-linear-*` | +| old shadow scale | verify against the current Tailwind/design scale | +| old blur scale | verify against the current Tailwind/design scale | +| old radius scale | use the Dify radius token mapping when applicable | +| `outline-none` | `outline-hidden` | +| bare `ring` utility | use an explicit ring width such as `ring-1`/`ring-2`/`ring-3` | + +For Figma radius tokens, follow `packages/dify-ui/AGENTS.md`. For example, `--radius/xs` maps to `rounded-sm`; do not rewrite it to `rounded-xs`. + +## Layout and Spacing Rules + +### Flexbox and Grid Spacing + +#### Always use gap utilities for internal spacing + +Gap provides consistent spacing without edge cases (no extra space on last items). It's cleaner and more maintainable than margins on children. + +```html + +
+
Item 1
+
Item 2
+
Item 3
+ +
+ + +
+
Item 1
+
Item 2
+
Item 3
+
+``` + +#### Gap vs Space utilities + +- **Never use `space-x-*` or `space-y-*` in flex/grid layouts** - always use gap +- Space utilities add margins to children and have issues with wrapped items +- Gap works correctly with flex-wrap and all flex directions + +```html + +
+ +
+ + +
+ +
+``` + +### General Spacing Guidelines + +- **Prefer top and left margins** over bottom and right margins (unless conditionally rendered) +- **Use padding on parent containers** instead of bottom margins on the last child +- **Always use `min-h-dvh` instead of `min-h-screen`** - `min-h-screen` is buggy on mobile Safari +- **Prefer `size-*` utilities** over separate `w-*` and `h-*` when setting equal dimensions +- For max-widths, prefer the container scale (e.g., `max-w-2xs` over `max-w-72`) + +## Typography Rules + +### Line Heights + +- **Never use `leading-*` classes** - Always use line height modifiers with text size +- **Always use fixed line heights from the spacing scale** - Don't use named values + +```html + +

Text with separate line height

+

Text with named line height

+ + +

Text with line height modifier

+

Text with specific line height

+``` + +### Font Size Reference + +Be precise with font sizes - know the actual pixel values: + +- `text-xs` = 12px +- `text-sm` = 14px +- `text-base` = 16px +- `text-lg` = 18px +- `text-xl` = 20px + +## Color and Opacity + +### Opacity Modifiers + +**Never use `bg-opacity-*`, `text-opacity-*`, etc.** - use the opacity modifier syntax: + +```html + +
Old opacity syntax
+ + +
Modern opacity syntax
+``` + +## Responsive Design + +### Breakpoint Optimization + +- **Check for redundant classes across breakpoints** +- **Only add breakpoint variants when values change** + +```html + +
+ +
+ + +
+ +
+``` + +## Dark Mode + +### Dark Mode Best Practices + +- Use the plain `dark:` variant pattern +- Put light mode styles first, then dark mode styles +- Ensure `dark:` variant comes before other variants + +```html + +
+ +
+``` + +## Gradient Utilities + +- **ALWAYS Use `bg-linear-*` instead of `bg-gradient-*` utilities** - The gradient utilities were renamed in v4 +- Use the new `bg-radial` or `bg-radial-[]` to create radial gradients +- Use the new `bg-conic` or `bg-conic-*` to create conic gradients + +```html + +
+
+
+ + +
+``` + +## Working with CSS Variables + +### Accessing Theme Values + +Tailwind CSS v4 exposes all theme values as CSS variables: + +```css +/* Access colors, and other theme values */ +.custom-element { + background: var(--color-red-500); + border-radius: var(--radius-lg); +} +``` + +### The `--spacing()` Function + +Use the dedicated `--spacing()` function for spacing calculations: + +```css +.custom-class { + margin-top: calc(100vh - --spacing(16)); +} +``` + +### Extending theme values + +Use CSS to extend theme values: + +```css +@import "tailwindcss"; + +@theme { + --color-mint-500: oklch(0.72 0.11 178); +} +``` + +```html +
+ +
+``` + +## New v4 Features + +### Container Queries + +Use the `@container` class and size variants: + +```html +
+
+ +
+ +
+
+
+``` + +### Container Query Units + +Use container-based units like `cqw` for responsive sizing: + +```html +
+

Responsive to container width

+
+``` + +### Text Shadows (v4.1) + +Use text-shadow-\* utilities from text-shadow-2xs to text-shadow-lg: + +```html + +

Large shadow

+

Small shadow with opacity

+``` + +### Masking (v4.1) + +Use the new composable mask utilities for image and gradient masks: + +```html + +
Top fade
+
Bottom gradient
+
+ Fade from white to black +
+ + +
+ Radial mask +
+``` + +## Component Patterns + +### Avoiding Utility Inheritance + +Don't add utilities to parents that you override in children: + +```html + +
+

Centered Heading

+
Left-aligned content
+
+ + +
+

Centered Heading

+
Left-aligned content
+
+``` + +### Component Extraction + +- Extract repeated patterns into framework components, not CSS classes +- Keep utility classes in templates/JSX +- Use data attributes for complex state-based styling + +## CSS Best Practices + +### Nesting Guidelines + +- Use nesting when styling both parent and children +- Avoid empty parent selectors + +```css +/* ✅ Good nesting - parent has styles */ +.card { + padding: --spacing(4); + + > .card-title { + font-weight: bold; + } +} + +/* ❌ Avoid empty parents */ +ul { + > li { + /* Parent has no styles */ + } +} +``` + +## Common Pitfalls to Avoid + +1. **Using old opacity utilities** - Always use `/opacity` syntax like `bg-red-500/60` +2. **Redundant breakpoint classes** - Only specify changes +3. **Space utilities in flex/grid** - Always use gap +4. **Leading utilities** - Use line-height modifiers like `text-sm/6` +5. **Arbitrary values** - Use the design scale +6. **@apply directive** - Use components or CSS variables +7. **min-h-screen on mobile** - Use min-h-dvh +8. **Separate width/height** - Use size utilities when equal +9. **Arbitrary values** - Always use Tailwind's predefined scale whenever possible (e.g., use `ml-4` over `ml-[16px]`) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 94e857f93a..98b7e9f119 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -6,6 +6,9 @@ * @crazywoola @laipz8200 @Yeuoly +# ESLint suppression file is maintained by autofix.ci pruning. +/eslint-suppressions.json + # CODEOWNERS file /.github/CODEOWNERS @laipz8200 @crazywoola diff --git a/.github/actions/setup-web/action.yml b/.github/actions/setup-web/action.yml index 673155bcf7..085b39ebfb 100644 --- a/.github/actions/setup-web/action.yml +++ b/.github/actions/setup-web/action.yml @@ -4,7 +4,7 @@ runs: using: composite steps: - name: Setup Vite+ - uses: voidzero-dev/setup-vp@20553a7a7429c429a74894104a2835d7fed28a72 # v1.3.0 + uses: voidzero-dev/setup-vp@4f5aa3e38c781f1b01e78fb9255527cee8a6efa6 # v1.8.0 with: node-version-file: .nvmrc cache: true diff --git a/.github/labeler.yml b/.github/labeler.yml index 3b9dc24749..e226bafccc 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -6,5 +6,4 @@ web: - 'package.json' - 'pnpm-lock.yaml' - 'pnpm-workspace.yaml' - - '.npmrc' - '.nvmrc' diff --git a/.github/workflows/api-tests.yml b/.github/workflows/api-tests.yml index bd47abc710..a08e7aacae 100644 --- a/.github/workflows/api-tests.yml +++ b/.github/workflows/api-tests.yml @@ -99,7 +99,7 @@ jobs: - name: Set up dotenvs run: | cp docker/.env.example docker/.env - cp docker/middleware.env.example docker/middleware.env + cp docker/envs/middleware.env.example docker/middleware.env - name: Expose Service Ports run: sh .github/workflows/expose_service_ports.sh diff --git a/.github/workflows/autofix.yml b/.github/workflows/autofix.yml index 8a1719da3c..9c2c6e2ca9 100644 --- a/.github/workflows/autofix.yml +++ b/.github/workflows/autofix.yml @@ -43,7 +43,6 @@ jobs: package.json pnpm-lock.yaml pnpm-workspace.yaml - .npmrc .nvmrc - name: Check api inputs if: github.event_name != 'merge_group' @@ -114,9 +113,15 @@ jobs: find . -name "*.py.bak" -type f -delete - name: Setup web environment - if: github.event_name != 'merge_group' && steps.web-changes.outputs.any_changed == 'true' + if: github.event_name != 'merge_group' uses: ./.github/actions/setup-web + - name: Generate API docs + if: github.event_name != 'merge_group' && steps.api-changes.outputs.any_changed == 'true' + run: | + cd api + uv run dev/generate_swagger_markdown_docs.py --swagger-dir openapi --markdown-dir openapi/markdown + - name: ESLint autofix if: github.event_name != 'merge_group' && steps.web-changes.outputs.any_changed == 'true' run: | diff --git a/.github/workflows/build-push.yml b/.github/workflows/build-push.yml index 2d8bde8080..915ed6cfe8 100644 --- a/.github/workflows/build-push.yml +++ b/.github/workflows/build-push.yml @@ -74,7 +74,7 @@ jobs: password: ${{ env.DOCKERHUB_TOKEN }} - name: Set up Depot CLI - uses: depot/setup-action@v1 + uses: depot/setup-action@15c09a5f77a0840ad4bce955686522a257853461 # v1.7.1 - name: Extract metadata for Docker id: meta @@ -84,7 +84,7 @@ jobs: - name: Build Docker image id: build - uses: depot/build-push-action@v1 + uses: depot/build-push-action@5f3b3c2e5a00f0093de47f657aeaefcedff27d18 # v1.17.0 with: project: ${{ vars.DEPOT_PROJECT_ID }} context: ${{ matrix.build_context }} @@ -124,10 +124,10 @@ jobs: file: "web/Dockerfile" steps: - name: Set up Docker Buildx - uses: docker/setup-buildx-action@98e3b2c9eab4f4f98a95c0c0a3ea5e5e672fd2a8 # v3.10.0 + uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0 - name: Validate Docker image - uses: docker/build-push-action@5cd29d66b4a8d8e6f4d5dfe2e9329f0b1d446289 # v6.18.0 + uses: docker/build-push-action@bcafcacb16a39f128d818304e6c9c0c18556b85f # v7.1.0 with: push: false context: ${{ matrix.build_context }} diff --git a/.github/workflows/db-migration-test.yml b/.github/workflows/db-migration-test.yml index 65f0149a74..9d3ccb34b2 100644 --- a/.github/workflows/db-migration-test.yml +++ b/.github/workflows/db-migration-test.yml @@ -37,7 +37,7 @@ jobs: - name: Prepare middleware env run: | cd docker - cp middleware.env.example middleware.env + cp envs/middleware.env.example middleware.env - name: Set up Middlewares uses: hoverkraft-tech/compose-action@d2bee4f07e8ca410d6b196d00f90c12e7d48c33a # v2.6.0 @@ -87,7 +87,7 @@ jobs: - name: Prepare middleware env for MySQL run: | cd docker - cp middleware.env.example middleware.env + cp envs/middleware.env.example middleware.env sed -i 's/DB_TYPE=postgresql/DB_TYPE=mysql/' middleware.env sed -i 's/DB_HOST=db_postgres/DB_HOST=db_mysql/' middleware.env sed -i 's/DB_PORT=5432/DB_PORT=3306/' middleware.env diff --git a/.github/workflows/docker-build.yml b/.github/workflows/docker-build.yml index b0022b863b..99c35b24eb 100644 --- a/.github/workflows/docker-build.yml +++ b/.github/workflows/docker-build.yml @@ -44,10 +44,10 @@ jobs: file: "web/Dockerfile" steps: - name: Set up Depot CLI - uses: depot/setup-action@v1 + uses: depot/setup-action@15c09a5f77a0840ad4bce955686522a257853461 # v1.7.1 - name: Build Docker Image - uses: depot/build-push-action@v1 + uses: depot/build-push-action@5f3b3c2e5a00f0093de47f657aeaefcedff27d18 # v1.17.0 with: project: ${{ vars.DEPOT_PROJECT_ID }} push: false diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml index f59cc6be48..aefcf1b5ac 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -9,6 +9,6 @@ jobs: pull-requests: write runs-on: depot-ubuntu-24.04 steps: - - uses: actions/labeler@634933edcd8ababfe52f92936142cc22ac488b1b # v6.0.1 + - uses: actions/labeler@f27b608878404679385c85cfa523b85ccb86e213 # v6.1.0 with: sync-labels: true diff --git a/.github/workflows/main-ci.yml b/.github/workflows/main-ci.yml index 278f2ed8d1..f624e8f872 100644 --- a/.github/workflows/main-ci.yml +++ b/.github/workflows/main-ci.yml @@ -57,7 +57,7 @@ jobs: - '.github/workflows/api-tests.yml' - '.github/workflows/expose_service_ports.sh' - 'docker/.env.example' - - 'docker/middleware.env.example' + - 'docker/envs/middleware.env.example' - 'docker/docker-compose.middleware.yaml' - 'docker/docker-compose-template.yaml' - 'docker/generate_docker_compose' @@ -69,7 +69,6 @@ jobs: - 'package.json' - 'pnpm-lock.yaml' - 'pnpm-workspace.yaml' - - '.npmrc' - '.nvmrc' - '.github/workflows/web-tests.yml' - '.github/actions/setup-web/**' @@ -83,10 +82,9 @@ jobs: - 'package.json' - 'pnpm-lock.yaml' - 'pnpm-workspace.yaml' - - '.npmrc' - '.nvmrc' - 'docker/docker-compose.middleware.yaml' - - 'docker/middleware.env.example' + - 'docker/envs/middleware.env.example' - '.github/workflows/web-e2e.yml' - '.github/actions/setup-web/**' vdb: @@ -96,7 +94,7 @@ jobs: - '.github/workflows/vdb-tests.yml' - '.github/workflows/expose_service_ports.sh' - 'docker/.env.example' - - 'docker/middleware.env.example' + - 'docker/envs/middleware.env.example' - 'docker/docker-compose.yaml' - 'docker/docker-compose-template.yaml' - 'docker/generate_docker_compose' @@ -118,7 +116,7 @@ jobs: - '.github/workflows/db-migration-test.yml' - '.github/workflows/expose_service_ports.sh' - 'docker/.env.example' - - 'docker/middleware.env.example' + - 'docker/envs/middleware.env.example' - 'docker/docker-compose.middleware.yaml' - 'docker/docker-compose-template.yaml' - 'docker/generate_docker_compose' diff --git a/.github/workflows/pyrefly-diff-comment.yml b/.github/workflows/pyrefly-diff-comment.yml index 7f82942e7e..8e16baf933 100644 --- a/.github/workflows/pyrefly-diff-comment.yml +++ b/.github/workflows/pyrefly-diff-comment.yml @@ -77,10 +77,28 @@ jobs: } if (diff.trim()) { - await github.rest.issues.createComment({ + const body = '### Pyrefly Diff\n
\nbase → PR\n\n```diff\n' + diff + '\n```\n
'; + const marker = '### Pyrefly Diff'; + const { data: comments } = await github.rest.issues.listComments({ issue_number: prNumber, owner: context.repo.owner, repo: context.repo.repo, - body: '### Pyrefly Diff\n
\nbase → PR\n\n```diff\n' + diff + '\n```\n
', }); + const existing = comments.find((comment) => comment.body.startsWith(marker)); + + if (existing) { + await github.rest.issues.updateComment({ + comment_id: existing.id, + owner: context.repo.owner, + repo: context.repo.repo, + body, + }); + } else { + await github.rest.issues.createComment({ + issue_number: prNumber, + owner: context.repo.owner, + repo: context.repo.repo, + body, + }); + } } diff --git a/.github/workflows/pyrefly-diff.yml b/.github/workflows/pyrefly-diff.yml index 0cf54e3585..386bd25751 100644 --- a/.github/workflows/pyrefly-diff.yml +++ b/.github/workflows/pyrefly-diff.yml @@ -103,9 +103,26 @@ jobs: ].join('\n') : '### Pyrefly Diff\nNo changes detected.'; - await github.rest.issues.createComment({ + const marker = '### Pyrefly Diff'; + const { data: comments } = await github.rest.issues.listComments({ issue_number: prNumber, owner: context.repo.owner, repo: context.repo.repo, - body, }); + const existing = comments.find((comment) => comment.body.startsWith(marker)); + + if (existing) { + await github.rest.issues.updateComment({ + comment_id: existing.id, + owner: context.repo.owner, + repo: context.repo.repo, + body, + }); + } else { + await github.rest.issues.createComment({ + issue_number: prNumber, + owner: context.repo.owner, + repo: context.repo.repo, + body, + }); + } diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml index 6b00899cf0..4ce121ba60 100644 --- a/.github/workflows/style.yml +++ b/.github/workflows/style.yml @@ -83,7 +83,6 @@ jobs: package.json pnpm-lock.yaml pnpm-workspace.yaml - .npmrc .nvmrc .github/workflows/style.yml .github/actions/setup-web/** diff --git a/.github/workflows/tool-test-sdks.yaml b/.github/workflows/tool-test-sdks.yaml index 79fddb1853..adaf99f33a 100644 --- a/.github/workflows/tool-test-sdks.yaml +++ b/.github/workflows/tool-test-sdks.yaml @@ -9,7 +9,6 @@ on: - package.json - pnpm-lock.yaml - pnpm-workspace.yaml - - .npmrc concurrency: group: sdk-tests-${{ github.head_ref || github.run_id }} diff --git a/.github/workflows/translate-i18n-claude.yml b/.github/workflows/translate-i18n-claude.yml index 5f48c22c56..4e738df684 100644 --- a/.github/workflows/translate-i18n-claude.yml +++ b/.github/workflows/translate-i18n-claude.yml @@ -158,7 +158,7 @@ jobs: - name: Run Claude Code for Translation Sync if: steps.context.outputs.CHANGED_FILES != '' - uses: anthropics/claude-code-action@567fe954a4527e81f132d87d1bdbcc94f7737434 # v1.0.107 + uses: anthropics/claude-code-action@476e359e6203e73dad705c8b322e333fabbd7416 # v1.0.119 with: anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }} github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/vdb-tests-full.yml b/.github/workflows/vdb-tests-full.yml index 5c241af5c5..1405eb4eeb 100644 --- a/.github/workflows/vdb-tests-full.yml +++ b/.github/workflows/vdb-tests-full.yml @@ -51,7 +51,7 @@ jobs: - name: Set up dotenvs run: | cp docker/.env.example docker/.env - cp docker/middleware.env.example docker/middleware.env + cp docker/envs/middleware.env.example docker/middleware.env - name: Expose Service Ports run: sh .github/workflows/expose_service_ports.sh diff --git a/.github/workflows/vdb-tests.yml b/.github/workflows/vdb-tests.yml index 38ec96f00f..cdcdcb27d7 100644 --- a/.github/workflows/vdb-tests.yml +++ b/.github/workflows/vdb-tests.yml @@ -48,7 +48,7 @@ jobs: - name: Set up dotenvs run: | cp docker/.env.example docker/.env - cp docker/middleware.env.example docker/middleware.env + cp docker/envs/middleware.env.example docker/middleware.env - name: Expose Service Ports run: sh .github/workflows/expose_service_ports.sh diff --git a/.gitignore b/.gitignore index 836bddbb49..dc3b3f284f 100644 --- a/.gitignore +++ b/.gitignore @@ -219,6 +219,9 @@ node_modules # plugin migrate plugins.jsonl +# generated API OpenAPI specs +packages/contracts/openapi/ + # mise mise.toml diff --git a/.npmrc b/.npmrc deleted file mode 100644 index cffe8cdef1..0000000000 --- a/.npmrc +++ /dev/null @@ -1 +0,0 @@ -save-exact=true diff --git a/Makefile b/Makefile index d8c9df5208..3b5024683f 100644 --- a/Makefile +++ b/Makefile @@ -3,6 +3,10 @@ DOCKER_REGISTRY=langgenius WEB_IMAGE=$(DOCKER_REGISTRY)/dify-web API_IMAGE=$(DOCKER_REGISTRY)/dify-api VERSION=latest +DOCKER_DIR=docker +DOCKER_MIDDLEWARE_ENV=$(DOCKER_DIR)/middleware.env +DOCKER_MIDDLEWARE_ENV_EXAMPLE=$(DOCKER_DIR)/envs/middleware.env.example +DOCKER_MIDDLEWARE_PROJECT=dify-middlewares-dev # Default target - show help .DEFAULT_GOAL := help @@ -17,8 +21,13 @@ dev-setup: prepare-docker prepare-web prepare-api # Step 1: Prepare Docker middleware prepare-docker: @echo "🐳 Setting up Docker middleware..." - @cp -n docker/middleware.env.example docker/middleware.env 2>/dev/null || echo "Docker middleware.env already exists" - @cd docker && docker compose -f docker-compose.middleware.yaml --env-file middleware.env -p dify-middlewares-dev up -d + @if [ ! -f "$(DOCKER_MIDDLEWARE_ENV)" ]; then \ + cp "$(DOCKER_MIDDLEWARE_ENV_EXAMPLE)" "$(DOCKER_MIDDLEWARE_ENV)"; \ + echo "Docker middleware.env created"; \ + else \ + echo "Docker middleware.env already exists"; \ + fi + @cd $(DOCKER_DIR) && docker compose -f docker-compose.middleware.yaml --env-file middleware.env -p $(DOCKER_MIDDLEWARE_PROJECT) up -d @echo "✅ Docker middleware started" # Step 2: Prepare web environment @@ -39,12 +48,18 @@ prepare-api: # Clean dev environment dev-clean: @echo "⚠️ Stopping Docker containers..." - @cd docker && docker compose -f docker-compose.middleware.yaml --env-file middleware.env -p dify-middlewares-dev down + @if [ -f "$(DOCKER_MIDDLEWARE_ENV)" ]; then \ + cd $(DOCKER_DIR) && docker compose -f docker-compose.middleware.yaml --env-file middleware.env -p $(DOCKER_MIDDLEWARE_PROJECT) down; \ + else \ + echo "Docker middleware.env does not exist, skipping compose down"; \ + fi @echo "🗑️ Removing volumes..." @rm -rf docker/volumes/db + @rm -rf docker/volumes/mysql @rm -rf docker/volumes/redis @rm -rf docker/volumes/plugin_daemon @rm -rf docker/volumes/weaviate + @rm -rf docker/volumes/sandbox/dependencies @rm -rf api/storage @echo "✅ Cleanup complete" @@ -71,13 +86,13 @@ type-check: @echo "📝 Running type checks (basedpyright + pyrefly + mypy)..." @./dev/basedpyright-check $(PATH_TO_CHECK) @./dev/pyrefly-check-local - @uv --directory api run mypy --exclude-gitignore --exclude 'tests/' --exclude 'migrations/' --check-untyped-defs --disable-error-code=import-untyped . + @uv --directory api run mypy --exclude-gitignore --exclude 'tests/' --exclude 'migrations/' --exclude 'dev/generate_swagger_specs.py' --check-untyped-defs --disable-error-code=import-untyped . @echo "✅ Type checks complete" type-check-core: @echo "📝 Running core type checks (basedpyright + mypy)..." @./dev/basedpyright-check $(PATH_TO_CHECK) - @uv --directory api run mypy --exclude-gitignore --exclude 'tests/' --exclude 'migrations/' --check-untyped-defs --disable-error-code=import-untyped . + @uv --directory api run mypy --exclude-gitignore --exclude 'tests/' --exclude 'migrations/' --exclude 'dev/generate_swagger_specs.py' --exclude 'dev/generate_fastopenapi_specs.py' --check-untyped-defs --disable-error-code=import-untyped . @echo "✅ Core type checks complete" test: @@ -132,7 +147,7 @@ help: @echo " make prepare-docker - Set up Docker middleware" @echo " make prepare-web - Set up web environment" @echo " make prepare-api - Set up API environment" - @echo " make dev-clean - Stop Docker middleware containers" + @echo " make dev-clean - Stop Docker middleware containers and remove dev data" @echo "" @echo "Backend Code Quality:" @echo " make format - Format code with ruff" diff --git a/README.md b/README.md index 778028fc76..b6cbb0e126 100644 --- a/README.md +++ b/README.md @@ -137,7 +137,7 @@ Star Dify on GitHub and be instantly notified of new releases. ### Custom configurations -If you need to customize the configuration, please refer to the comments in our [.env.example](docker/.env.example) file and update the corresponding values in your `.env` file. Additionally, you might need to make adjustments to the `docker-compose.yaml` file itself, such as changing image versions, port mappings, or volume mounts, based on your specific deployment environment and requirements. After making any changes, please re-run `docker compose up -d`. You can find the full list of available environment variables [here](https://docs.dify.ai/getting-started/install-self-hosted/environments). +If you need to customize the configuration, edit `docker/.env`. The essential startup defaults live in [`docker/.env.example`](docker/.env.example), and optional advanced variables are split under `docker/envs/` by theme. After making any changes, re-run `docker compose up -d` from the `docker` directory. You can find the full list of available environment variables [here](https://docs.dify.ai/getting-started/install-self-hosted/environments). ### Metrics Monitoring with Grafana diff --git a/api/.env.example b/api/.env.example index f6f65011ea..40fed7403c 100644 --- a/api/.env.example +++ b/api/.env.example @@ -34,7 +34,7 @@ TRIGGER_URL=http://localhost:5001 FILES_ACCESS_TIMEOUT=300 # Collaboration mode toggle -ENABLE_COLLABORATION_MODE=false +ENABLE_COLLABORATION_MODE=true # Access token expiration time in minutes ACCESS_TOKEN_EXPIRE_MINUTES=60 @@ -88,6 +88,10 @@ REDIS_HEALTH_CHECK_INTERVAL=30 CELERY_BROKER_URL=redis://:difyai123456@localhost:${REDIS_PORT}/1 CELERY_BACKEND=redis +# Ops trace retry configuration +OPS_TRACE_RETRYABLE_DISPATCH_MAX_RETRIES=60 +OPS_TRACE_RETRYABLE_DISPATCH_DELAY_SECONDS=5 + # Database configuration DB_TYPE=postgresql DB_USERNAME=postgres @@ -98,6 +102,8 @@ DB_DATABASE=dify SQLALCHEMY_POOL_PRE_PING=true SQLALCHEMY_POOL_TIMEOUT=30 +# Connection pool reset behavior on return +SQLALCHEMY_POOL_RESET_ON_RETURN=rollback # Storage configuration # use for store upload files, private keys... @@ -381,7 +387,7 @@ VIKINGDB_ACCESS_KEY=your-ak VIKINGDB_SECRET_KEY=your-sk VIKINGDB_REGION=cn-shanghai VIKINGDB_HOST=api-vikingdb.xxx.volces.com -VIKINGDB_SCHEMA=http +VIKINGDB_SCHEME=http VIKINGDB_CONNECTION_TIMEOUT=30 VIKINGDB_SOCKET_TIMEOUT=30 @@ -432,8 +438,6 @@ UPLOAD_FILE_EXTENSION_BLACKLIST= # Model configuration MULTIMODAL_SEND_FORMAT=base64 -PROMPT_GENERATION_MAX_TOKENS=512 -CODE_GENERATION_MAX_TOKENS=1024 PLUGIN_BASED_TOKEN_COUNTING_ENABLED=false # Mail configuration, support: resend, smtp, sendgrid diff --git a/api/AGENTS.md b/api/AGENTS.md index 8e5d9f600d..eb4404509d 100644 --- a/api/AGENTS.md +++ b/api/AGENTS.md @@ -193,6 +193,10 @@ Before opening a PR / submitting: - Controllers: parse input via Pydantic, invoke services, return serialised responses; no business logic. - Services: coordinate repositories, providers, background tasks; keep side effects explicit. - Document non-obvious behaviour with concise docstrings and comments. +- For Flask-RESTX controller request, query, and response schemas, follow `controllers/API_SCHEMA_GUIDE.md`. + In short: use Pydantic models, document GET query params with `query_params_from_model(...)`, register response + DTOs with `register_response_schema_models(...)`, serialize with `ResponseModel.model_validate(...).model_dump(...)`, + and avoid adding new legacy `ns.model(...)`, `@marshal_with(...)`, or GET `@ns.expect(...)` patterns. ### Miscellaneous diff --git a/api/commands/account.py b/api/commands/account.py index 761323a73d..0d99ce7a0f 100644 --- a/api/commands/account.py +++ b/api/commands/account.py @@ -113,8 +113,18 @@ def create_tenant(email: str, language: str | None = None, name: str | None = No # Validates name encoding for non-Latin characters. name = name.strip().encode("utf-8").decode("utf-8") if name else None - # generate random password - new_password = secrets.token_urlsafe(16) + # Generate a random password that satisfies the password policy. + # The iteration limit guards against infinite loops caused by unexpected bugs in valid_password. + for _ in range(100): + new_password = secrets.token_urlsafe(16) + try: + valid_password(new_password) + break + except Exception: + continue + else: + click.echo(click.style("Failed to generate a valid password. Please try again.", fg="red")) + return # register account account = RegisterService.register( diff --git a/api/configs/feature/__init__.py b/api/configs/feature/__init__.py index 52e33c1789..26b8ea670b 100644 --- a/api/configs/feature/__init__.py +++ b/api/configs/feature/__init__.py @@ -1137,6 +1137,18 @@ class MultiModalTransferConfig(BaseSettings): ) +class OpsTraceConfig(BaseSettings): + OPS_TRACE_RETRYABLE_DISPATCH_MAX_RETRIES: PositiveInt = Field( + description="Maximum retry attempts for transient ops trace provider dispatch failures.", + default=60, + ) + + OPS_TRACE_RETRYABLE_DISPATCH_DELAY_SECONDS: PositiveInt = Field( + description="Delay in seconds between transient ops trace provider dispatch retry attempts.", + default=5, + ) + + class CeleryBeatConfig(BaseSettings): CELERY_BEAT_SCHEDULER_TIME: int = Field( description="Interval in days for Celery Beat scheduler execution, default to 1 day", @@ -1298,7 +1310,7 @@ class PositionConfig(BaseSettings): class CollaborationConfig(BaseSettings): ENABLE_COLLABORATION_MODE: bool = Field( description="Whether to enable collaboration mode features across the workspace", - default=False, + default=True, ) @@ -1417,6 +1429,7 @@ class FeatureConfig( ModelLoadBalanceConfig, ModerationConfig, MultiModalTransferConfig, + OpsTraceConfig, PositionConfig, RagEtlConfig, RepositoryConfig, diff --git a/api/configs/middleware/__init__.py b/api/configs/middleware/__init__.py index c392b8840f..ee8b93aa9f 100644 --- a/api/configs/middleware/__init__.py +++ b/api/configs/middleware/__init__.py @@ -114,7 +114,7 @@ class SQLAlchemyEngineOptionsDict(TypedDict): pool_pre_ping: bool connect_args: dict[str, str] pool_use_lifo: bool - pool_reset_on_return: None + pool_reset_on_return: Literal["commit", "rollback", None] pool_timeout: int @@ -223,6 +223,11 @@ class DatabaseConfig(BaseSettings): default=30, ) + SQLALCHEMY_POOL_RESET_ON_RETURN: Literal["commit", "rollback", None] = Field( + description="Connection pool reset behavior on return. Options: 'commit', 'rollback', or None", + default="rollback", + ) + RETRIEVAL_SERVICE_EXECUTORS: NonNegativeInt = Field( description="Number of processes for the retrieval service, default to CPU cores.", default=os.cpu_count() or 1, @@ -252,7 +257,7 @@ class DatabaseConfig(BaseSettings): "pool_pre_ping": self.SQLALCHEMY_POOL_PRE_PING, "connect_args": connect_args, "pool_use_lifo": self.SQLALCHEMY_POOL_USE_LIFO, - "pool_reset_on_return": None, + "pool_reset_on_return": self.SQLALCHEMY_POOL_RESET_ON_RETURN, "pool_timeout": self.SQLALCHEMY_POOL_TIMEOUT, } return result diff --git a/api/constants/recommended_apps.json b/api/constants/recommended_apps.json index 3779fb0180..3d728f1b2e 100644 --- a/api/constants/recommended_apps.json +++ b/api/constants/recommended_apps.json @@ -19,7 +19,7 @@ "name": "Website Generator" }, "app_id": "b53545b1-79ea-4da3-b31a-c39391c6f041", - "category": "Programming", + "categories": ["Programming"], "copyright": null, "description": null, "is_listed": true, @@ -35,7 +35,7 @@ "name": "Investment Analysis Report Copilot" }, "app_id": "a23b57fa-85da-49c0-a571-3aff375976c1", - "category": "Agent", + "categories": ["Agent"], "copyright": "Dify.AI", "description": "Welcome to your personalized Investment Analysis Copilot service, where we delve into the depths of stock analysis to provide you with comprehensive insights. \n", "is_listed": true, @@ -51,7 +51,7 @@ "name": "Workflow Planning Assistant " }, "app_id": "f3303a7d-a81c-404e-b401-1f8711c998c1", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "An assistant that helps you plan and select the right node for a workflow (V0.6.0). ", "is_listed": true, @@ -67,7 +67,7 @@ "name": "Automated Email Reply " }, "app_id": "e9d92058-7d20-4904-892f-75d90bef7587", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "Reply emails using Gmail API. It will automatically retrieve email in your inbox and create a response in Gmail. \nConfigure your Gmail API in Google Cloud Console. ", "is_listed": true, @@ -83,7 +83,7 @@ "name": "Book Translation " }, "app_id": "98b87f88-bd22-4d86-8b74-86beba5e0ed4", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "A workflow designed to translate a full book up to 15000 tokens per run. Uses Code node to separate text into chunks and Iteration to translate each chunk. ", "is_listed": true, @@ -99,7 +99,7 @@ "name": "Python bug fixer" }, "app_id": "cae337e6-aec5-4c7b-beca-d6f1a808bd5e", - "category": "Programming", + "categories": ["Programming"], "copyright": null, "description": null, "is_listed": true, @@ -115,7 +115,7 @@ "name": "Code Interpreter" }, "app_id": "d077d587-b072-4f2c-b631-69ed1e7cdc0f", - "category": "Programming", + "categories": ["Programming"], "copyright": "Copyright 2023 Dify", "description": "Code interpreter, clarifying the syntax and semantics of the code.", "is_listed": true, @@ -131,7 +131,7 @@ "name": "SVG Logo Design " }, "app_id": "73fbb5f1-c15d-4d74-9cc8-46d9db9b2cca", - "category": "Agent", + "categories": ["Agent"], "copyright": "Dify.AI", "description": "Hello, I am your creative partner in bringing ideas to vivid life! I can assist you in creating stunning designs by leveraging abilities of DALL·E 3. ", "is_listed": true, @@ -147,7 +147,7 @@ "name": "Long Story Generator (Iteration) " }, "app_id": "5efb98d7-176b-419c-b6ef-50767391ab62", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "A workflow demonstrating how to use Iteration node to generate long article that is longer than the context length of LLMs. ", "is_listed": true, @@ -163,7 +163,7 @@ "name": "Text Summarization Workflow" }, "app_id": "f00c4531-6551-45ee-808f-1d7903099515", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "Based on users' choice, retrieve external knowledge to more accurately summarize articles.", "is_listed": true, @@ -179,7 +179,7 @@ "name": "YouTube Channel Data Analysis" }, "app_id": "be591209-2ca8-410f-8f3b-ca0e530dd638", - "category": "Agent", + "categories": ["Agent"], "copyright": "Dify.AI", "description": "I am a YouTube Channel Data Analysis Copilot, I am here to provide expert data analysis tailored to your needs. ", "is_listed": true, @@ -195,7 +195,7 @@ "name": "Article Grading Bot" }, "app_id": "a747f7b4-c48b-40d6-b313-5e628232c05f", - "category": "Writing", + "categories": ["Writing"], "copyright": null, "description": "Assess the quality of articles and text based on user defined criteria. ", "is_listed": true, @@ -211,7 +211,7 @@ "name": "SEO Blog Generator" }, "app_id": "18f3bd03-524d-4d7a-8374-b30dbe7c69d5", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "Workflow for retrieving information from the internet, followed by segmented generation of SEO blogs.", "is_listed": true, @@ -227,7 +227,7 @@ "name": "SQL Creator" }, "app_id": "050ef42e-3e0c-40c1-a6b6-a64f2c49d744", - "category": "Programming", + "categories": ["Programming"], "copyright": "Copyright 2023 Dify", "description": "Write SQL from natural language by pasting in your schema with the request.Please describe your query requirements in natural language and select the target database type.", "is_listed": true, @@ -243,7 +243,7 @@ "name": "Sentiment Analysis " }, "app_id": "f06bf86b-d50c-4895-a942-35112dbe4189", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "Batch sentiment analysis of text, followed by JSON output of sentiment classification along with scores.", "is_listed": true, @@ -259,7 +259,7 @@ "name": "Strategic Consulting Expert" }, "app_id": "7e8ca1ae-02f2-4b5f-979e-62d19133bee2", - "category": "Assistant", + "categories": ["Assistant"], "copyright": "Copyright 2023 Dify", "description": "I can answer your questions related to strategic marketing.", "is_listed": true, @@ -275,7 +275,7 @@ "name": "Code Converter" }, "app_id": "4006c4b2-0735-4f37-8dbb-fb1a8c5bd87a", - "category": "Programming", + "categories": ["Programming"], "copyright": "Copyright 2023 Dify", "description": "This is an application that provides the ability to convert code snippets in multiple programming languages. You can input the code you wish to convert, select the target programming language, and get the desired output.", "is_listed": true, @@ -291,7 +291,7 @@ "name": "Question Classifier + Knowledge + Chatbot " }, "app_id": "d9f6b733-e35d-4a40-9f38-ca7bbfa009f7", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "Basic Workflow Template, a chatbot capable of identifying intents alongside with a knowledge base.", "is_listed": true, @@ -307,7 +307,7 @@ "name": "AI Front-end interviewer" }, "app_id": "127efead-8944-4e20-ba9d-12402eb345e0", - "category": "HR", + "categories": ["HR"], "copyright": "Copyright 2023 Dify", "description": "A simulated front-end interviewer that tests the skill level of front-end development through questioning.", "is_listed": true, @@ -323,7 +323,7 @@ "name": "Knowledge Retrieval + Chatbot " }, "app_id": "e9870913-dd01-4710-9f06-15d4180ca1ce", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "Basic Workflow Template, A chatbot with a knowledge base. ", "is_listed": true, @@ -339,7 +339,7 @@ "name": "Email Assistant Workflow " }, "app_id": "dd5b6353-ae9b-4bce-be6a-a681a12cf709", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "A multifunctional email assistant capable of summarizing, replying, composing, proofreading, and checking grammar.", "is_listed": true, @@ -355,7 +355,7 @@ "name": "Customer Review Analysis Workflow " }, "app_id": "9c0cd31f-4b62-4005-adf5-e3888d08654a", - "category": "Workflow", + "categories": ["Workflow"], "copyright": null, "description": "Utilize LLM (Large Language Models) to classify customer reviews and forward them to the internal system.", "is_listed": true, diff --git a/api/controllers/API_SCHEMA_GUIDE.md b/api/controllers/API_SCHEMA_GUIDE.md new file mode 100644 index 0000000000..5b1b055b09 --- /dev/null +++ b/api/controllers/API_SCHEMA_GUIDE.md @@ -0,0 +1,193 @@ +# API Schema Guide + +This guide describes the expected Flask-RESTX + Pydantic pattern for controller request payloads, query +parameters, response schemas, and Swagger documentation. + +## Principles + +- Use Pydantic `BaseModel` for request bodies and query parameters. +- Use `fields.base.ResponseModel` for response DTOs. +- Keep runtime validation and Swagger documentation wired to the same Pydantic model. +- Prefer explicit validation and serialization in controller methods over Flask-RESTX marshalling. +- Do not add new Flask-RESTX `fields.*` dictionaries, `Namespace.model(...)` exports, or `@marshal_with(...)` for migrated or new endpoints. +- Do not use `@ns.expect(...)` for GET query parameters. Flask-RESTX documents that as a request body. + +## Naming + +- Request body models: use a `Payload` suffix. + - Example: `WorkflowRunPayload`, `DatasourceVariablesPayload`. +- Query parameter models: use a `Query` suffix. + - Example: `WorkflowRunListQuery`, `MessageListQuery`. +- Response models: use a `Response` suffix and inherit from `ResponseModel`. + - Example: `WorkflowRunDetailResponse`, `WorkflowRunNodeExecutionListResponse`. +- Use `ListResponse` or `PaginationResponse` for wrapper responses. + - Example: `WorkflowRunNodeExecutionListResponse`, `WorkflowRunPaginationResponse`. +- Keep these models near the controller when they are endpoint-specific. Move them to `fields/*_fields.py` only when shared by multiple controllers. + +## Registering Models For Swagger + +Use helpers from `controllers.common.schema`. + +```python +from controllers.common.schema import ( + query_params_from_model, + register_response_schema_models, + register_schema_models, +) +``` + +Register request payload and query models with `register_schema_models(...)`: + +```python +register_schema_models( + console_ns, + WorkflowRunPayload, + WorkflowRunListQuery, +) +``` + +Register response models with `register_response_schema_models(...)`: + +```python +register_response_schema_models( + console_ns, + WorkflowRunDetailResponse, + WorkflowRunPaginationResponse, +) +``` + +Response models are registered in Pydantic serialization mode. This matters when a response model uses +`validation_alias` to read internal object attributes but emits public API field names. For example, a response model +can validate from `inputs_dict` while documenting and serializing `inputs`. + +## Request Bodies + +For non-GET request bodies: + +1. Define a Pydantic `Payload` model. +2. Register it with `register_schema_models(...)`. +3. Use `@ns.expect(ns.models[Payload.__name__])` for Swagger documentation. +4. Validate from `ns.payload or {}` inside the controller. + +```python +class DraftWorkflowNodeRunPayload(BaseModel): + inputs: dict[str, Any] + query: str = "" + + +register_schema_models(console_ns, DraftWorkflowNodeRunPayload) + + +@console_ns.expect(console_ns.models[DraftWorkflowNodeRunPayload.__name__]) +def post(self, app_model: App, node_id: str): + payload = DraftWorkflowNodeRunPayload.model_validate(console_ns.payload or {}) + result = service.run(..., inputs=payload.inputs, query=payload.query) + return WorkflowRunNodeExecutionResponse.model_validate(result, from_attributes=True).model_dump(mode="json") +``` + +## Query Parameters + +For GET query parameters: + +1. Define a Pydantic `Query` model. +2. Register it with `register_schema_models(...)` if it is referenced elsewhere in docs, or only use + `query_params_from_model(...)` if a body schema is not needed. +3. Use `@ns.doc(params=query_params_from_model(QueryModel))`. +4. Validate from `request.args.to_dict(flat=True)` or an explicit dict when type coercion is needed. + +```python +class WorkflowRunListQuery(BaseModel): + last_id: str | None = Field(default=None, description="Last run ID for pagination") + limit: int = Field(default=20, ge=1, le=100, description="Number of items per page (1-100)") + + +@console_ns.doc(params=query_params_from_model(WorkflowRunListQuery)) +def get(self, app_model: App): + query = WorkflowRunListQuery.model_validate(request.args.to_dict(flat=True)) + result = service.list(..., limit=query.limit, last_id=query.last_id) + return WorkflowRunPaginationResponse.model_validate(result, from_attributes=True).model_dump(mode="json") +``` + +Do not do this for GET query parameters: + +```python +@console_ns.expect(console_ns.models[WorkflowRunListQuery.__name__]) +def get(...): + ... +``` + +That documents a GET request body and is not the expected contract. + +## Responses + +Response models should inherit from `ResponseModel`: + +```python +class WorkflowRunNodeExecutionResponse(ResponseModel): + id: str + inputs: Any = Field(default=None, validation_alias="inputs_dict") + process_data: Any = Field(default=None, validation_alias="process_data_dict") + outputs: Any = Field(default=None, validation_alias="outputs_dict") +``` + +Document response models with `@ns.response(...)`: + +```python +@console_ns.response( + 200, + "Node run started successfully", + console_ns.models[WorkflowRunNodeExecutionResponse.__name__], +) +def post(...): + ... +``` + +Serialize explicitly: + +```python +return WorkflowRunNodeExecutionResponse.model_validate( + workflow_node_execution, + from_attributes=True, +).model_dump(mode="json") +``` + +If the service can return `None`, translate that into the expected HTTP error before validation: + +```python +workflow_run = service.get_workflow_run(...) +if workflow_run is None: + raise NotFound("Workflow run not found") + +return WorkflowRunDetailResponse.model_validate(workflow_run, from_attributes=True).model_dump(mode="json") +``` + +## Legacy Flask-RESTX Patterns + +Avoid adding these patterns to new or migrated endpoints: + +- `ns.model(...)` for new request/response DTOs. +- Module-level exported RESTX model objects such as `workflow_run_detail_model`. +- `fields.Nested({...})` with raw inline dict field maps. +- `@marshal_with(...)` for response serialization. +- `@ns.expect(...)` for GET query params. + +Existing legacy field dictionaries may remain where an endpoint has not yet been migrated. Keep that compatibility local +to the legacy area and avoid importing RESTX model objects from controllers. + +## Verifying Swagger + +For schema and documentation changes, run focused tests and generate Swagger JSON: + +```bash +uv run --project . pytest tests/unit_tests/controllers/common/test_schema.py +uv run --project . pytest tests/unit_tests/commands/test_generate_swagger_specs.py tests/unit_tests/controllers/test_swagger.py +uv run --project . dev/generate_swagger_specs.py --output-dir /tmp/dify-openapi-check +``` + +Inspect affected endpoints with `jq`. Check that: + +- GET parameters are `in: query`. +- Request bodies appear only where the endpoint has a body. +- Responses reference the expected `*Response` schema. +- Response schemas use public serialized names, not internal validation aliases like `inputs_dict`. + diff --git a/api/controllers/common/helpers.py b/api/controllers/common/helpers.py index ef89e66980..84903733b5 100644 --- a/api/controllers/common/helpers.py +++ b/api/controllers/common/helpers.py @@ -41,7 +41,8 @@ def guess_file_info_from_response(response: httpx.Response): # Try to extract filename from URL parsed_url = urllib.parse.urlparse(url) url_path = parsed_url.path - filename = os.path.basename(url_path) + # Decode percent-encoded characters in the path segment + filename = urllib.parse.unquote(os.path.basename(url_path)) # If filename couldn't be extracted, use Content-Disposition header if not filename: diff --git a/api/controllers/common/schema.py b/api/controllers/common/schema.py index 8d112c203b..58140f3ac8 100644 --- a/api/controllers/common/schema.py +++ b/api/controllers/common/schema.py @@ -1,6 +1,14 @@ -"""Helpers for registering Pydantic models with Flask-RESTX namespaces.""" +"""Helpers for registering Pydantic models with Flask-RESTX namespaces. +Flask-RESTX treats `SchemaModel` bodies as opaque JSON schemas; it does not +promote Pydantic's nested `$defs` into top-level Swagger `definitions`. +These helpers keep that translation centralized so models registered through +`register_schema_models` emit resolvable Swagger 2.0 references. +""" + +from collections.abc import Mapping from enum import StrEnum +from typing import Any, Literal, NotRequired, TypedDict from flask_restx import Namespace from pydantic import BaseModel, TypeAdapter @@ -8,10 +16,59 @@ from pydantic import BaseModel, TypeAdapter DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" -def register_schema_model(namespace: Namespace, model: type[BaseModel]) -> None: - """Register a single BaseModel with a namespace for Swagger documentation.""" +QueryParamDoc = TypedDict( + "QueryParamDoc", + { + "in": NotRequired[str], + "type": NotRequired[str], + "items": NotRequired[dict[str, object]], + "required": NotRequired[bool], + "description": NotRequired[str], + "enum": NotRequired[list[object]], + "default": NotRequired[object], + "minimum": NotRequired[int | float], + "maximum": NotRequired[int | float], + "minLength": NotRequired[int], + "maxLength": NotRequired[int], + "minItems": NotRequired[int], + "maxItems": NotRequired[int], + }, +) - namespace.schema_model(model.__name__, model.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0)) + +def _register_json_schema(namespace: Namespace, name: str, schema: dict) -> None: + """Register a JSON schema and promote any nested Pydantic `$defs`.""" + + nested_definitions = schema.get("$defs") + schema_to_register = dict(schema) + if isinstance(nested_definitions, dict): + schema_to_register.pop("$defs") + + namespace.schema_model(name, schema_to_register) + + if not isinstance(nested_definitions, dict): + return + + for nested_name, nested_schema in nested_definitions.items(): + if isinstance(nested_schema, dict): + _register_json_schema(namespace, nested_name, nested_schema) + + +JsonSchemaMode = Literal["validation", "serialization"] + + +def _register_schema_model(namespace: Namespace, model: type[BaseModel], *, mode: JsonSchemaMode) -> None: + _register_json_schema( + namespace, + model.__name__, + model.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0, mode=mode), + ) + + +def register_schema_model(namespace: Namespace, model: type[BaseModel]) -> None: + """Register a BaseModel and its nested schema definitions for Swagger documentation.""" + + _register_schema_model(namespace, model, mode="validation") def register_schema_models(namespace: Namespace, *models: type[BaseModel]) -> None: @@ -21,6 +78,19 @@ def register_schema_models(namespace: Namespace, *models: type[BaseModel]) -> No register_schema_model(namespace, model) +def register_response_schema_model(namespace: Namespace, model: type[BaseModel]) -> None: + """Register a BaseModel using its serialized response shape.""" + + _register_schema_model(namespace, model, mode="serialization") + + +def register_response_schema_models(namespace: Namespace, *models: type[BaseModel]) -> None: + """Register multiple response BaseModels using their serialized response shape.""" + + for model in models: + register_response_schema_model(namespace, model) + + def get_or_create_model(model_name: str, field_def): # Import lazily to avoid circular imports between console controllers and schema helpers. from controllers.console import console_ns @@ -34,15 +104,114 @@ def get_or_create_model(model_name: str, field_def): def register_enum_models(namespace: Namespace, *models: type[StrEnum]) -> None: """Register multiple StrEnum with a namespace.""" for model in models: - namespace.schema_model( - model.__name__, TypeAdapter(model).json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0) + _register_json_schema( + namespace, + model.__name__, + TypeAdapter(model).json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), ) +def query_params_from_model(model: type[BaseModel]) -> dict[str, QueryParamDoc]: + """Build Flask-RESTX query parameter docs from a flat Pydantic model. + + `Namespace.expect()` treats Pydantic schema models as request bodies, so GET + endpoints should keep runtime validation on the Pydantic model and feed this + derived mapping to `Namespace.doc(params=...)` for Swagger documentation. + """ + + schema = model.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0) + properties = schema.get("properties", {}) + if not isinstance(properties, Mapping): + return {} + + required = schema.get("required", []) + required_names = set(required) if isinstance(required, list) else set() + + params: dict[str, QueryParamDoc] = {} + for name, property_schema in properties.items(): + if not isinstance(name, str) or not isinstance(property_schema, Mapping): + continue + + params[name] = _query_param_from_property(property_schema, required=name in required_names) + + return params + + +def _query_param_from_property(property_schema: Mapping[str, Any], *, required: bool) -> QueryParamDoc: + param_schema = _nullable_property_schema(property_schema) + param_doc: QueryParamDoc = {"in": "query", "required": required} + + description = param_schema.get("description") + if isinstance(description, str): + param_doc["description"] = description + + schema_type = param_schema.get("type") + if isinstance(schema_type, str) and schema_type in {"array", "boolean", "integer", "number", "string"}: + param_doc["type"] = schema_type + if schema_type == "array": + items = param_schema.get("items") + if isinstance(items, Mapping): + item_type = items.get("type") + if isinstance(item_type, str): + param_doc["items"] = {"type": item_type} + + enum = param_schema.get("enum") + if isinstance(enum, list): + param_doc["enum"] = enum + + default = param_schema.get("default") + if default is not None: + param_doc["default"] = default + + minimum = param_schema.get("minimum") + if isinstance(minimum, int | float): + param_doc["minimum"] = minimum + + maximum = param_schema.get("maximum") + if isinstance(maximum, int | float): + param_doc["maximum"] = maximum + + min_length = param_schema.get("minLength") + if isinstance(min_length, int): + param_doc["minLength"] = min_length + + max_length = param_schema.get("maxLength") + if isinstance(max_length, int): + param_doc["maxLength"] = max_length + + min_items = param_schema.get("minItems") + if isinstance(min_items, int): + param_doc["minItems"] = min_items + + max_items = param_schema.get("maxItems") + if isinstance(max_items, int): + param_doc["maxItems"] = max_items + + return param_doc + + +def _nullable_property_schema(property_schema: Mapping[str, Any]) -> Mapping[str, Any]: + any_of = property_schema.get("anyOf") + if not isinstance(any_of, list): + return property_schema + + non_null_candidates = [ + candidate for candidate in any_of if isinstance(candidate, Mapping) and candidate.get("type") != "null" + ] + + if len(non_null_candidates) == 1: + return {**property_schema, **non_null_candidates[0]} + + return property_schema + + __all__ = [ "DEFAULT_REF_TEMPLATE_SWAGGER_2_0", "get_or_create_model", + "query_params_from_model", "register_enum_models", + "register_response_schema_model", + "register_response_schema_models", "register_schema_model", "register_schema_models", ] diff --git a/api/controllers/console/admin.py b/api/controllers/console/admin.py index dce394be97..ae2b1007dd 100644 --- a/api/controllers/console/admin.py +++ b/api/controllers/console/admin.py @@ -3,6 +3,7 @@ import io from collections.abc import Callable from functools import wraps from typing import cast +from uuid import UUID from flask import request from flask_restx import Resource @@ -12,6 +13,7 @@ from werkzeug.exceptions import BadRequest, NotFound, Unauthorized from configs import dify_config from constants.languages import supported_language +from controllers.common.schema import register_schema_models from controllers.console import console_ns from controllers.console.wraps import only_edition_cloud from core.db.session_factory import session_factory @@ -20,8 +22,6 @@ from libs.token import extract_access_token from models.model import App, ExporleBanner, InstalledApp, RecommendedApp, TrialApp from services.billing_service import BillingService, LangContentDict -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" - class InsertExploreAppPayload(BaseModel): app_id: str = Field(...) @@ -58,15 +58,7 @@ class InsertExploreBannerPayload(BaseModel): model_config = {"populate_by_name": True} -console_ns.schema_model( - InsertExploreAppPayload.__name__, - InsertExploreAppPayload.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), -) - -console_ns.schema_model( - InsertExploreBannerPayload.__name__, - InsertExploreBannerPayload.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), -) +register_schema_models(console_ns, InsertExploreAppPayload, InsertExploreBannerPayload) def admin_required[**P, R](view: Callable[P, R]) -> Callable[P, R]: @@ -190,7 +182,7 @@ class InsertExploreAppApi(Resource): @console_ns.response(204, "App removed successfully") @only_edition_cloud @admin_required - def delete(self, app_id): + def delete(self, app_id: UUID): with session_factory.create_session() as session: recommended_app = session.execute( select(RecommendedApp).where(RecommendedApp.app_id == str(app_id)) @@ -301,15 +293,7 @@ class BatchAddNotificationAccountsPayload(BaseModel): user_email: list[str] = Field(..., description="List of account email addresses") -console_ns.schema_model( - UpsertNotificationPayload.__name__, - UpsertNotificationPayload.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), -) - -console_ns.schema_model( - BatchAddNotificationAccountsPayload.__name__, - BatchAddNotificationAccountsPayload.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), -) +register_schema_models(console_ns, UpsertNotificationPayload, BatchAddNotificationAccountsPayload) @console_ns.route("/admin/upsert_notification") @@ -411,11 +395,11 @@ class BatchAddNotificationAccountsApi(Resource): raise BadRequest("Invalid file type. Only CSV (.csv) and TXT (.txt) files are allowed.") try: - content = file.read().decode("utf-8") + content = file.stream.read().decode("utf-8") except UnicodeDecodeError: try: - file.seek(0) - content = file.read().decode("gbk") + file.stream.seek(0) + content = file.stream.read().decode("gbk") except UnicodeDecodeError: raise BadRequest("Unable to decode the file. Please use UTF-8 or GBK encoding.") diff --git a/api/controllers/console/app/advanced_prompt_template.py b/api/controllers/console/app/advanced_prompt_template.py index ed66da1be5..ad21671176 100644 --- a/api/controllers/console/app/advanced_prompt_template.py +++ b/api/controllers/console/app/advanced_prompt_template.py @@ -34,7 +34,7 @@ class AdvancedPromptTemplateList(Resource): @login_required @account_initialization_required def get(self): - args = AdvancedPromptTemplateQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = AdvancedPromptTemplateQuery.model_validate(request.args.to_dict(flat=True)) prompt_args: AdvancedPromptTemplateArgs = { "app_mode": args.app_mode, "model_mode": args.model_mode, diff --git a/api/controllers/console/app/agent.py b/api/controllers/console/app/agent.py index cfdb9cf417..c05600ced5 100644 --- a/api/controllers/console/app/agent.py +++ b/api/controllers/console/app/agent.py @@ -2,6 +2,7 @@ from flask import request from flask_restx import Resource, fields from pydantic import BaseModel, Field, field_validator +from controllers.common.schema import register_schema_models from controllers.console import console_ns from controllers.console.app.wraps import get_app_model from controllers.console.wraps import account_initialization_required, setup_required @@ -10,8 +11,6 @@ from libs.login import login_required from models.model import AppMode from services.agent_service import AgentService -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" - class AgentLogQuery(BaseModel): message_id: str = Field(..., description="Message UUID") @@ -23,9 +22,7 @@ class AgentLogQuery(BaseModel): return uuid_value(value) -console_ns.schema_model( - AgentLogQuery.__name__, AgentLogQuery.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0) -) +register_schema_models(console_ns, AgentLogQuery) @console_ns.route("/apps//agent/logs") @@ -44,6 +41,6 @@ class AgentLogApi(Resource): @get_app_model(mode=[AppMode.AGENT_CHAT]) def get(self, app_model): """Get agent logs""" - args = AgentLogQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = AgentLogQuery.model_validate(request.args.to_dict(flat=True)) return AgentService.get_agent_logs(app_model, args.conversation_id, args.message_id) diff --git a/api/controllers/console/app/annotation.py b/api/controllers/console/app/annotation.py index 528785931e..cfeaec4af9 100644 --- a/api/controllers/console/app/annotation.py +++ b/api/controllers/console/app/annotation.py @@ -1,4 +1,5 @@ from typing import Any, Literal +from uuid import UUID from flask import abort, make_response, request from flask_restx import Resource @@ -33,8 +34,6 @@ from services.annotation_service import ( UpsertAnnotationArgs, ) -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" - class AnnotationReplyPayload(BaseModel): score_threshold: float = Field(..., description="Score threshold for annotation matching") @@ -87,17 +86,6 @@ class AnnotationFilePayload(BaseModel): return uuid_value(value) -def reg(model: type[BaseModel]) -> None: - console_ns.schema_model(model.__name__, model.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0)) - - -reg(AnnotationReplyPayload) -reg(AnnotationSettingUpdatePayload) -reg(AnnotationListQuery) -reg(CreateAnnotationPayload) -reg(UpdateAnnotationPayload) -reg(AnnotationReplyStatusQuery) -reg(AnnotationFilePayload) register_schema_models( console_ns, Annotation, @@ -105,6 +93,13 @@ register_schema_models( AnnotationExportList, AnnotationHitHistory, AnnotationHitHistoryList, + AnnotationReplyPayload, + AnnotationSettingUpdatePayload, + AnnotationListQuery, + CreateAnnotationPayload, + UpdateAnnotationPayload, + AnnotationReplyStatusQuery, + AnnotationFilePayload, ) @@ -121,8 +116,7 @@ class AnnotationReplyActionApi(Resource): @account_initialization_required @cloud_edition_billing_resource_check("annotation") @edit_permission_required - def post(self, app_id, action: Literal["enable", "disable"]): - app_id = str(app_id) + def post(self, app_id: UUID, action: Literal["enable", "disable"]): args = AnnotationReplyPayload.model_validate(console_ns.payload) match action: case "enable": @@ -131,9 +125,9 @@ class AnnotationReplyActionApi(Resource): "embedding_provider_name": args.embedding_provider_name, "embedding_model_name": args.embedding_model_name, } - result = AppAnnotationService.enable_app_annotation(enable_args, app_id) + result = AppAnnotationService.enable_app_annotation(enable_args, str(app_id)) case "disable": - result = AppAnnotationService.disable_app_annotation(app_id) + result = AppAnnotationService.disable_app_annotation(str(app_id)) return result, 200 @@ -148,9 +142,8 @@ class AppAnnotationSettingDetailApi(Resource): @login_required @account_initialization_required @edit_permission_required - def get(self, app_id): - app_id = str(app_id) - result = AppAnnotationService.get_app_annotation_setting_by_app_id(app_id) + def get(self, app_id: UUID): + result = AppAnnotationService.get_app_annotation_setting_by_app_id(str(app_id)) return result, 200 @@ -166,14 +159,13 @@ class AppAnnotationSettingUpdateApi(Resource): @login_required @account_initialization_required @edit_permission_required - def post(self, app_id, annotation_setting_id): - app_id = str(app_id) + def post(self, app_id: UUID, annotation_setting_id): annotation_setting_id = str(annotation_setting_id) args = AnnotationSettingUpdatePayload.model_validate(console_ns.payload) setting_args: UpdateAnnotationSettingArgs = {"score_threshold": args.score_threshold} - result = AppAnnotationService.update_app_annotation_setting(app_id, annotation_setting_id, setting_args) + result = AppAnnotationService.update_app_annotation_setting(str(app_id), annotation_setting_id, setting_args) return result, 200 @@ -189,7 +181,7 @@ class AnnotationReplyActionStatusApi(Resource): @account_initialization_required @cloud_edition_billing_resource_check("annotation") @edit_permission_required - def get(self, app_id, job_id, action): + def get(self, app_id: UUID, job_id, action): job_id = str(job_id) app_annotation_job_key = f"{action}_app_annotation_job_{str(job_id)}" cache_result = redis_client.get(app_annotation_job_key) @@ -217,14 +209,13 @@ class AnnotationApi(Resource): @login_required @account_initialization_required @edit_permission_required - def get(self, app_id): - args = AnnotationListQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + def get(self, app_id: UUID): + args = AnnotationListQuery.model_validate(request.args.to_dict(flat=True)) page = args.page limit = args.limit keyword = args.keyword - app_id = str(app_id) - annotation_list, total = AppAnnotationService.get_annotation_list_by_app_id(app_id, page, limit, keyword) + annotation_list, total = AppAnnotationService.get_annotation_list_by_app_id(str(app_id), page, limit, keyword) annotation_models = TypeAdapter(list[Annotation]).validate_python(annotation_list, from_attributes=True) response = AnnotationList( data=annotation_models, @@ -246,8 +237,7 @@ class AnnotationApi(Resource): @account_initialization_required @cloud_edition_billing_resource_check("annotation") @edit_permission_required - def post(self, app_id): - app_id = str(app_id) + def post(self, app_id: UUID): args = CreateAnnotationPayload.model_validate(console_ns.payload) upsert_args: UpsertAnnotationArgs = {} if args.answer is not None: @@ -258,15 +248,14 @@ class AnnotationApi(Resource): upsert_args["message_id"] = args.message_id if args.question is not None: upsert_args["question"] = args.question - annotation = AppAnnotationService.up_insert_app_annotation_from_message(upsert_args, app_id) + annotation = AppAnnotationService.up_insert_app_annotation_from_message(upsert_args, str(app_id)) return Annotation.model_validate(annotation, from_attributes=True).model_dump(mode="json") @setup_required @login_required @account_initialization_required @edit_permission_required - def delete(self, app_id): - app_id = str(app_id) + def delete(self, app_id: UUID): # Use request.args.getlist to get annotation_ids array directly annotation_ids = request.args.getlist("annotation_id") @@ -280,11 +269,11 @@ class AnnotationApi(Resource): "message": "annotation_ids are required if the parameter is provided.", }, 400 - result = AppAnnotationService.delete_app_annotations_in_batch(app_id, annotation_ids) + result = AppAnnotationService.delete_app_annotations_in_batch(str(app_id), annotation_ids) return result, 204 # If no annotation_ids are provided, handle clearing all annotations else: - AppAnnotationService.clear_all_annotations(app_id) + AppAnnotationService.clear_all_annotations(str(app_id)) return {"result": "success"}, 204 @@ -303,9 +292,8 @@ class AnnotationExportApi(Resource): @login_required @account_initialization_required @edit_permission_required - def get(self, app_id): - app_id = str(app_id) - annotation_list = AppAnnotationService.export_annotation_list_by_app_id(app_id) + def get(self, app_id: UUID): + annotation_list = AppAnnotationService.export_annotation_list_by_app_id(str(app_id)) annotation_models = TypeAdapter(list[Annotation]).validate_python(annotation_list, from_attributes=True) response_data = AnnotationExportList(data=annotation_models).model_dump(mode="json") @@ -331,26 +319,22 @@ class AnnotationUpdateDeleteApi(Resource): @account_initialization_required @cloud_edition_billing_resource_check("annotation") @edit_permission_required - def post(self, app_id, annotation_id): - app_id = str(app_id) - annotation_id = str(annotation_id) + def post(self, app_id: UUID, annotation_id: UUID): args = UpdateAnnotationPayload.model_validate(console_ns.payload) update_args: UpdateAnnotationArgs = {} if args.answer is not None: update_args["answer"] = args.answer if args.question is not None: update_args["question"] = args.question - annotation = AppAnnotationService.update_app_annotation_directly(update_args, app_id, annotation_id) + annotation = AppAnnotationService.update_app_annotation_directly(update_args, str(app_id), str(annotation_id)) return Annotation.model_validate(annotation, from_attributes=True).model_dump(mode="json") @setup_required @login_required @account_initialization_required @edit_permission_required - def delete(self, app_id, annotation_id): - app_id = str(app_id) - annotation_id = str(annotation_id) - AppAnnotationService.delete_app_annotation(app_id, annotation_id) + def delete(self, app_id: UUID, annotation_id: UUID): + AppAnnotationService.delete_app_annotation(str(app_id), str(annotation_id)) return {"result": "success"}, 204 @@ -371,11 +355,9 @@ class AnnotationBatchImportApi(Resource): @annotation_import_rate_limit @annotation_import_concurrency_limit @edit_permission_required - def post(self, app_id): + def post(self, app_id: UUID): from configs import dify_config - app_id = str(app_id) - # check file if "file" not in request.files: raise NoFileUploadedError() @@ -391,9 +373,9 @@ class AnnotationBatchImportApi(Resource): raise ValueError("Invalid file type. Only CSV files are allowed") # Check file size before processing - file.seek(0, 2) # Seek to end of file - file_size = file.tell() - file.seek(0) # Reset to beginning + file.stream.seek(0, 2) # Seek to end of file + file_size = file.stream.tell() + file.stream.seek(0) # Reset to beginning max_size_bytes = dify_config.ANNOTATION_IMPORT_FILE_SIZE_LIMIT * 1024 * 1024 if file_size > max_size_bytes: @@ -406,7 +388,7 @@ class AnnotationBatchImportApi(Resource): if file_size == 0: raise ValueError("The uploaded file is empty") - return AppAnnotationService.batch_import_app_annotations(app_id, file) + return AppAnnotationService.batch_import_app_annotations(str(app_id), file) @console_ns.route("/apps//annotations/batch-import-status/") @@ -421,8 +403,7 @@ class AnnotationBatchImportStatusApi(Resource): @account_initialization_required @cloud_edition_billing_resource_check("annotation") @edit_permission_required - def get(self, app_id, job_id): - job_id = str(job_id) + def get(self, app_id: UUID, job_id: UUID): indexing_cache_key = f"app_annotation_batch_import_{str(job_id)}" cache_result = redis_client.get(indexing_cache_key) if cache_result is None: @@ -456,13 +437,11 @@ class AnnotationHitHistoryListApi(Resource): @login_required @account_initialization_required @edit_permission_required - def get(self, app_id, annotation_id): + def get(self, app_id: UUID, annotation_id: UUID): page = request.args.get("page", default=1, type=int) limit = request.args.get("limit", default=20, type=int) - app_id = str(app_id) - annotation_id = str(annotation_id) annotation_hit_history_list, total = AppAnnotationService.get_annotation_hit_histories( - app_id, annotation_id, page, limit + str(app_id), str(annotation_id), page, limit ) history_models = TypeAdapter(list[AnnotationHitHistory]).validate_python( annotation_hit_history_list, from_attributes=True diff --git a/api/controllers/console/app/app.py b/api/controllers/console/app/app.py index f938483c70..a8aaecefca 100644 --- a/api/controllers/console/app/app.py +++ b/api/controllers/console/app/app.py @@ -1,13 +1,16 @@ import logging +import re import uuid from datetime import datetime from typing import Any, Literal +from uuid import UUID from flask import request from flask_restx import Resource from pydantic import AliasChoices, BaseModel, Field, computed_field, field_validator from sqlalchemy import select from sqlalchemy.orm import Session +from werkzeug.datastructures import MultiDict from werkzeug.exceptions import BadRequest from controllers.common.helpers import FileInfo @@ -23,6 +26,7 @@ from controllers.console.wraps import ( is_admin_or_owner_required, setup_required, ) +from core.db.session_factory import session_factory from core.ops.ops_trace_manager import OpsTraceManager from core.rag.entities import PreProcessingRule, Rule, Segmentation from core.rag.retrieval.retrieval_methods import RetrievalMethod @@ -36,7 +40,7 @@ from libs.login import current_account_with_tenant, login_required from models import App, DatasetPermissionEnum, Workflow from models.model import IconType from services.app_dsl_service import AppDslService -from services.app_service import AppService +from services.app_service import AppListParams, AppService, CreateAppParams from services.enterprise.enterprise_service import EnterpriseService from services.enterprise import rbac_service as enterprise_rbac_service from services.entities.dsl_entities import ImportMode, ImportStatus @@ -59,6 +63,7 @@ ALLOW_CREATE_APP_MODES = ["chat", "agent-chat", "advanced-chat", "workflow", "co register_enum_models(console_ns, IconType) _logger = logging.getLogger(__name__) +_TAG_IDS_BRACKET_PATTERN = re.compile(r"^tag_ids\[(\d+)\]$") class AppListQuery(BaseModel): @@ -68,22 +73,19 @@ class AppListQuery(BaseModel): default="all", description="App mode filter" ) name: str | None = Field(default=None, description="Filter by app name") - tag_ids: list[str] | None = Field(default=None, description="Comma-separated tag IDs") + tag_ids: list[str] | None = Field(default=None, description="Filter by tag IDs") is_created_by_me: bool | None = Field(default=None, description="Filter by creator") @field_validator("tag_ids", mode="before") @classmethod - def validate_tag_ids(cls, value: str | list[str] | None) -> list[str] | None: + def validate_tag_ids(cls, value: list[str] | None) -> list[str] | None: if not value: return None - if isinstance(value, str): - items = [item.strip() for item in value.split(",") if item.strip()] - elif isinstance(value, list): - items = [str(item).strip() for item in value if item and str(item).strip()] - else: - raise TypeError("Unsupported tag_ids type.") + if not isinstance(value, list): + raise ValueError("Unsupported tag_ids type.") + items = [str(item).strip() for item in value if item and str(item).strip()] if not items: return None @@ -93,6 +95,26 @@ class AppListQuery(BaseModel): raise ValueError("Invalid UUID format in tag_ids.") from exc +def _normalize_app_list_query_args(query_args: MultiDict[str, str]) -> dict[str, str | list[str]]: + normalized: dict[str, str | list[str]] = {} + indexed_tag_ids: list[tuple[int, str]] = [] + + for key in query_args: + match = _TAG_IDS_BRACKET_PATTERN.fullmatch(key) + if match: + indexed_tag_ids.extend((int(match.group(1)), value) for value in query_args.getlist(key)) + continue + + value = query_args.get(key) + if value is not None: + normalized[key] = value + + if indexed_tag_ids: + normalized["tag_ids"] = [value for _, value in sorted(indexed_tag_ids)] + + return normalized + + class CreateAppPayload(BaseModel): name: str = Field(..., min_length=1, description="App name") description: str | None = Field(default=None, description="App description (max 400 chars)", max_length=400) @@ -458,12 +480,19 @@ class AppListApi(Resource): """Get app list""" current_user, current_tenant_id = current_account_with_tenant() - args = AppListQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore - args_dict = args.model_dump() + args = AppListQuery.model_validate(_normalize_app_list_query_args(request.args)) + params = AppListParams( + page=args.page, + limit=args.limit, + mode=args.mode, + name=args.name, + tag_ids=args.tag_ids, + is_created_by_me=args.is_created_by_me, + ) # get app list app_service = AppService() - app_pagination = app_service.get_paginate_apps(current_user.id, current_tenant_id, args_dict) + app_pagination = app_service.get_paginate_apps(current_user.id, current_tenant_id, params) if not app_pagination: empty = AppPagination(page=args.page, limit=args.limit, total=0, has_more=False, data=[]) return empty.model_dump(mode="json"), 200 @@ -541,9 +570,17 @@ class AppListApi(Resource): """Create app""" current_user, current_tenant_id = current_account_with_tenant() args = CreateAppPayload.model_validate(console_ns.payload) + params = CreateAppParams( + name=args.name, + description=args.description, + mode=args.mode, + icon_type=args.icon_type, + icon=args.icon, + icon_background=args.icon_background, + ) app_service = AppService() - app = app_service.create_app(current_tenant_id, args.model_dump(), current_user) + app = app_service.create_app(current_tenant_id, params, current_user) app_detail = AppDetail.model_validate(app, from_attributes=True) return app_detail.model_dump(mode="json"), 201 @@ -697,7 +734,7 @@ class AppExportApi(Resource): @edit_permission_required def get(self, app_model): """Export app""" - args = AppExportQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = AppExportQuery.model_validate(request.args.to_dict(flat=True)) payload = AppExportResponse( data=AppDslService.export_dsl( @@ -836,9 +873,10 @@ class AppTraceApi(Resource): @setup_required @login_required @account_initialization_required - def get(self, app_id): + def get(self, app_id: UUID): """Get app trace""" - app_trace_config = OpsTraceManager.get_app_tracing_config(app_id=app_id) + with session_factory.create_session() as session: + app_trace_config = OpsTraceManager.get_app_tracing_config(str(app_id), session) return app_trace_config @@ -852,12 +890,12 @@ class AppTraceApi(Resource): @login_required @account_initialization_required @edit_permission_required - def post(self, app_id): + def post(self, app_id: UUID): # add app trace args = AppTracePayload.model_validate(console_ns.payload) OpsTraceManager.update_app_tracing_config( - app_id=app_id, + app_id=str(app_id), enabled=args.enabled, tracing_provider=args.tracing_provider, ) diff --git a/api/controllers/console/app/app_import.py b/api/controllers/console/app/app_import.py index e91dc9cfe5..b653016319 100644 --- a/api/controllers/console/app/app_import.py +++ b/api/controllers/console/app/app_import.py @@ -2,7 +2,7 @@ from flask_restx import Resource from pydantic import BaseModel, Field from sqlalchemy.orm import Session -from controllers.common.schema import register_schema_models +from controllers.common.schema import register_enum_models, register_schema_models from controllers.console.app.wraps import get_app_model from controllers.console.wraps import ( account_initialization_required, @@ -33,6 +33,7 @@ class AppImportPayload(BaseModel): app_id: str | None = Field(None) +register_enum_models(console_ns, ImportStatus) register_schema_models(console_ns, AppImportPayload, Import, CheckDependenciesResult) diff --git a/api/controllers/console/app/audio.py b/api/controllers/console/app/audio.py index 91fbe4a85a..5b673f3394 100644 --- a/api/controllers/console/app/audio.py +++ b/api/controllers/console/app/audio.py @@ -173,7 +173,7 @@ class TextModesApi(Resource): @account_initialization_required def get(self, app_model): try: - args = TextToSpeechVoiceQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = TextToSpeechVoiceQuery.model_validate(request.args.to_dict(flat=True)) response = AudioService.transcript_tts_voices( tenant_id=app_model.tenant_id, diff --git a/api/controllers/console/app/completion.py b/api/controllers/console/app/completion.py index fe274e4c9a..6a20296cff 100644 --- a/api/controllers/console/app/completion.py +++ b/api/controllers/console/app/completion.py @@ -7,6 +7,7 @@ from pydantic import BaseModel, Field, field_validator from werkzeug.exceptions import InternalServerError, NotFound import services +from controllers.common.schema import register_schema_models from controllers.console import console_ns from controllers.console.app.error import ( AppUnavailableError, @@ -37,7 +38,6 @@ from services.app_task_service import AppTaskService from services.errors.llm import InvokeRateLimitError logger = logging.getLogger(__name__) -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" class BaseMessagePayload(BaseModel): @@ -65,13 +65,7 @@ class ChatMessagePayload(BaseMessagePayload): return uuid_value(value) -console_ns.schema_model( - CompletionMessagePayload.__name__, - CompletionMessagePayload.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), -) -console_ns.schema_model( - ChatMessagePayload.__name__, ChatMessagePayload.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0) -) +register_schema_models(console_ns, CompletionMessagePayload, ChatMessagePayload) # define completion message api for user diff --git a/api/controllers/console/app/conversation.py b/api/controllers/console/app/conversation.py index b2b1049f0c..c7347933cb 100644 --- a/api/controllers/console/app/conversation.py +++ b/api/controllers/console/app/conversation.py @@ -39,8 +39,6 @@ from models.model import AppMode from services.conversation_service import ConversationService from services.errors.conversation import ConversationNotExistsError -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" - class BaseConversationQuery(BaseModel): keyword: str | None = Field(default=None, description="Search keyword") @@ -70,15 +68,6 @@ class ChatConversationQuery(BaseConversationQuery): ) -console_ns.schema_model( - CompletionConversationQuery.__name__, - CompletionConversationQuery.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), -) -console_ns.schema_model( - ChatConversationQuery.__name__, - ChatConversationQuery.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), -) - register_schema_models( console_ns, CompletionConversationQuery, @@ -89,6 +78,8 @@ register_schema_models( ConversationWithSummaryPaginationResponse, ConversationDetailResponse, ResultResponse, + CompletionConversationQuery, + ChatConversationQuery, ) @@ -107,7 +98,7 @@ class CompletionConversationApi(Resource): @edit_permission_required def get(self, app_model): current_user, _ = current_account_with_tenant() - args = CompletionConversationQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = CompletionConversationQuery.model_validate(request.args.to_dict(flat=True)) query = sa.select(Conversation).where( Conversation.app_id == app_model.id, Conversation.mode == "completion", Conversation.is_deleted.is_(False) @@ -221,7 +212,7 @@ class ChatConversationApi(Resource): @edit_permission_required def get(self, app_model): current_user, _ = current_account_with_tenant() - args = ChatConversationQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = ChatConversationQuery.model_validate(request.args.to_dict(flat=True)) subquery = ( sa.select(Conversation.id.label("conversation_id"), EndUser.session_id.label("from_end_user_session_id")) diff --git a/api/controllers/console/app/conversation_variables.py b/api/controllers/console/app/conversation_variables.py index 9c8b095b9f..60a2bfc799 100644 --- a/api/controllers/console/app/conversation_variables.py +++ b/api/controllers/console/app/conversation_variables.py @@ -100,7 +100,7 @@ class ConversationVariablesApi(Resource): @account_initialization_required @get_app_model(mode=AppMode.ADVANCED_CHAT) def get(self, app_model): - args = ConversationVariablesQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = ConversationVariablesQuery.model_validate(request.args.to_dict(flat=True)) stmt = ( select(ConversationVariable) diff --git a/api/controllers/console/app/generator.py b/api/controllers/console/app/generator.py index c720a5e074..d4f501d34c 100644 --- a/api/controllers/console/app/generator.py +++ b/api/controllers/console/app/generator.py @@ -3,6 +3,7 @@ from collections.abc import Sequence from flask_restx import Resource from pydantic import BaseModel, Field +from controllers.common.schema import register_enum_models, register_schema_models from controllers.console import console_ns from controllers.console.app.error import ( CompletionRequestError, @@ -19,13 +20,12 @@ from core.helper.code_executor.python3.python3_code_provider import Python3CodeP from core.llm_generator.entities import RuleCodeGeneratePayload, RuleGeneratePayload, RuleStructuredOutputPayload from core.llm_generator.llm_generator import LLMGenerator from extensions.ext_database import db +from graphon.model_runtime.entities.llm_entities import LLMMode from graphon.model_runtime.errors.invoke import InvokeError from libs.login import current_account_with_tenant, login_required from models import App from services.workflow_service import WorkflowService -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" - class InstructionGeneratePayload(BaseModel): flow_id: str = Field(..., description="Workflow/Flow ID") @@ -41,16 +41,16 @@ class InstructionTemplatePayload(BaseModel): type: str = Field(..., description="Instruction template type") -def reg(cls: type[BaseModel]): - console_ns.schema_model(cls.__name__, cls.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0)) - - -reg(RuleGeneratePayload) -reg(RuleCodeGeneratePayload) -reg(RuleStructuredOutputPayload) -reg(InstructionGeneratePayload) -reg(InstructionTemplatePayload) -reg(ModelConfig) +register_enum_models(console_ns, LLMMode) +register_schema_models( + console_ns, + RuleGeneratePayload, + RuleCodeGeneratePayload, + RuleStructuredOutputPayload, + InstructionGeneratePayload, + InstructionTemplatePayload, + ModelConfig, +) @console_ns.route("/rule-generate") diff --git a/api/controllers/console/app/ops_trace.py b/api/controllers/console/app/ops_trace.py index cbcf513162..9227d00a21 100644 --- a/api/controllers/console/app/ops_trace.py +++ b/api/controllers/console/app/ops_trace.py @@ -1,18 +1,18 @@ from typing import Any +from uuid import UUID from flask import request from flask_restx import Resource, fields from pydantic import BaseModel, Field from werkzeug.exceptions import BadRequest +from controllers.common.schema import register_schema_models from controllers.console import console_ns from controllers.console.app.error import TracingConfigCheckError, TracingConfigIsExist, TracingConfigNotExist from controllers.console.wraps import account_initialization_required, setup_required from libs.login import login_required from services.ops_service import OpsService -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" - class TraceProviderQuery(BaseModel): tracing_provider: str = Field(..., description="Tracing provider name") @@ -23,13 +23,7 @@ class TraceConfigPayload(BaseModel): tracing_config: dict[str, Any] = Field(..., description="Tracing configuration data") -console_ns.schema_model( - TraceProviderQuery.__name__, - TraceProviderQuery.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), -) -console_ns.schema_model( - TraceConfigPayload.__name__, TraceConfigPayload.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0) -) +register_schema_models(console_ns, TraceProviderQuery, TraceConfigPayload) @console_ns.route("/apps//trace-config") @@ -49,11 +43,11 @@ class TraceAppConfigApi(Resource): @setup_required @login_required @account_initialization_required - def get(self, app_id): - args = TraceProviderQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + def get(self, app_id: UUID): + args = TraceProviderQuery.model_validate(request.args.to_dict(flat=True)) try: - trace_config = OpsService.get_tracing_app_config(app_id=app_id, tracing_provider=args.tracing_provider) + trace_config = OpsService.get_tracing_app_config(app_id=str(app_id), tracing_provider=args.tracing_provider) if not trace_config: return {"has_not_configured": True} return trace_config @@ -71,13 +65,13 @@ class TraceAppConfigApi(Resource): @setup_required @login_required @account_initialization_required - def post(self, app_id): + def post(self, app_id: UUID): """Create a new trace app configuration""" args = TraceConfigPayload.model_validate(console_ns.payload) try: result = OpsService.create_tracing_app_config( - app_id=app_id, tracing_provider=args.tracing_provider, tracing_config=args.tracing_config + app_id=str(app_id), tracing_provider=args.tracing_provider, tracing_config=args.tracing_config ) if not result: raise TracingConfigIsExist() @@ -96,13 +90,13 @@ class TraceAppConfigApi(Resource): @setup_required @login_required @account_initialization_required - def patch(self, app_id): + def patch(self, app_id: UUID): """Update an existing trace app configuration""" args = TraceConfigPayload.model_validate(console_ns.payload) try: result = OpsService.update_tracing_app_config( - app_id=app_id, tracing_provider=args.tracing_provider, tracing_config=args.tracing_config + app_id=str(app_id), tracing_provider=args.tracing_provider, tracing_config=args.tracing_config ) if not result: raise TracingConfigNotExist() @@ -119,12 +113,12 @@ class TraceAppConfigApi(Resource): @setup_required @login_required @account_initialization_required - def delete(self, app_id): + def delete(self, app_id: UUID): """Delete an existing trace app configuration""" - args = TraceProviderQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = TraceProviderQuery.model_validate(request.args.to_dict(flat=True)) try: - result = OpsService.delete_tracing_app_config(app_id=app_id, tracing_provider=args.tracing_provider) + result = OpsService.delete_tracing_app_config(app_id=str(app_id), tracing_provider=args.tracing_provider) if not result: raise TracingConfigNotExist() return {"result": "success"}, 204 diff --git a/api/controllers/console/app/statistic.py b/api/controllers/console/app/statistic.py index ffa28b1c95..d23b2837c9 100644 --- a/api/controllers/console/app/statistic.py +++ b/api/controllers/console/app/statistic.py @@ -5,6 +5,7 @@ from flask import abort, jsonify, request from flask_restx import Resource, fields from pydantic import BaseModel, Field, field_validator +from controllers.common.schema import register_schema_models from controllers.console import console_ns from controllers.console.app.wraps import get_app_model from controllers.console.wraps import account_initialization_required, setup_required @@ -15,8 +16,6 @@ from libs.helper import convert_datetime_to_date from libs.login import current_account_with_tenant, login_required from models import AppMode -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" - class StatisticTimeRangeQuery(BaseModel): start: str | None = Field(default=None, description="Start date (YYYY-MM-DD HH:MM)") @@ -30,10 +29,7 @@ class StatisticTimeRangeQuery(BaseModel): return value -console_ns.schema_model( - StatisticTimeRangeQuery.__name__, - StatisticTimeRangeQuery.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), -) +register_schema_models(console_ns, StatisticTimeRangeQuery) @console_ns.route("/apps//statistics/daily-messages") @@ -54,7 +50,7 @@ class DailyMessageStatistic(Resource): def get(self, app_model): account, _ = current_account_with_tenant() - args = StatisticTimeRangeQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = StatisticTimeRangeQuery.model_validate(request.args.to_dict(flat=True)) converted_created_at = convert_datetime_to_date("created_at") sql_query = f"""SELECT @@ -111,7 +107,7 @@ class DailyConversationStatistic(Resource): def get(self, app_model): account, _ = current_account_with_tenant() - args = StatisticTimeRangeQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = StatisticTimeRangeQuery.model_validate(request.args.to_dict(flat=True)) converted_created_at = convert_datetime_to_date("created_at") sql_query = f"""SELECT @@ -167,7 +163,7 @@ class DailyTerminalsStatistic(Resource): def get(self, app_model): account, _ = current_account_with_tenant() - args = StatisticTimeRangeQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = StatisticTimeRangeQuery.model_validate(request.args.to_dict(flat=True)) converted_created_at = convert_datetime_to_date("created_at") sql_query = f"""SELECT @@ -224,7 +220,7 @@ class DailyTokenCostStatistic(Resource): def get(self, app_model): account, _ = current_account_with_tenant() - args = StatisticTimeRangeQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = StatisticTimeRangeQuery.model_validate(request.args.to_dict(flat=True)) converted_created_at = convert_datetime_to_date("created_at") sql_query = f"""SELECT @@ -284,7 +280,7 @@ class AverageSessionInteractionStatistic(Resource): def get(self, app_model): account, _ = current_account_with_tenant() - args = StatisticTimeRangeQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = StatisticTimeRangeQuery.model_validate(request.args.to_dict(flat=True)) converted_created_at = convert_datetime_to_date("c.created_at") sql_query = f"""SELECT @@ -360,7 +356,7 @@ class UserSatisfactionRateStatistic(Resource): def get(self, app_model): account, _ = current_account_with_tenant() - args = StatisticTimeRangeQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = StatisticTimeRangeQuery.model_validate(request.args.to_dict(flat=True)) converted_created_at = convert_datetime_to_date("m.created_at") sql_query = f"""SELECT @@ -426,7 +422,7 @@ class AverageResponseTimeStatistic(Resource): def get(self, app_model): account, _ = current_account_with_tenant() - args = StatisticTimeRangeQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = StatisticTimeRangeQuery.model_validate(request.args.to_dict(flat=True)) converted_created_at = convert_datetime_to_date("created_at") sql_query = f"""SELECT @@ -482,7 +478,7 @@ class TokensPerSecondStatistic(Resource): @account_initialization_required def get(self, app_model): account, _ = current_account_with_tenant() - args = StatisticTimeRangeQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = StatisticTimeRangeQuery.model_validate(request.args.to_dict(flat=True)) converted_created_at = convert_datetime_to_date("created_at") sql_query = f"""SELECT diff --git a/api/controllers/console/app/workflow.py b/api/controllers/console/app/workflow.py index 478f783eb0..4f532b437c 100644 --- a/api/controllers/console/app/workflow.py +++ b/api/controllers/console/app/workflow.py @@ -11,9 +11,9 @@ from werkzeug.exceptions import BadRequest, Forbidden, InternalServerError, NotF import services from controllers.common.controller_schemas import DefaultBlockConfigQuery, WorkflowListQuery, WorkflowUpdatePayload +from controllers.common.schema import register_response_schema_model, register_schema_models from controllers.console import console_ns from controllers.console.app.error import ConversationCompletedError, DraftWorkflowNotExist, DraftWorkflowNotSync -from controllers.console.app.workflow_run import workflow_run_node_execution_model from controllers.console.app.wraps import get_app_model from controllers.console.wraps import account_initialization_required, edit_permission_required, setup_required from controllers.web.error import InvokeRateLimitError as InvokeRateLimitHttpError @@ -37,6 +37,7 @@ from factories import file_factory, variable_factory from fields.member_fields import simple_account_fields from fields.online_user_fields import online_user_list_fields from fields.workflow_fields import workflow_fields, workflow_pagination_fields +from fields.workflow_run_fields import WorkflowRunNodeExecutionResponse from graphon.enums import NodeType from graphon.file import File from graphon.file import helpers as file_helpers @@ -56,11 +57,13 @@ from services.errors.llm import InvokeRateLimitError from services.workflow_service import DraftWorkflowDeletionError, WorkflowInUseError, WorkflowService logger = logging.getLogger(__name__) + _file_access_controller = DatabaseFileAccessController() LISTENING_RETRY_IN = 2000 -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" + RESTORE_SOURCE_WORKFLOW_MUST_BE_PUBLISHED_MESSAGE = "source workflow must be published" -MAX_WORKFLOW_ONLINE_USERS_QUERY_IDS = 50 +MAX_WORKFLOW_ONLINE_USERS_REQUEST_IDS = 1000 +WORKFLOW_ONLINE_USERS_REDIS_BATCH_SIZE = 50 # Register models for flask_restx to avoid dict type issues in Swagger # Register in dependency order: base models first, then dependent models @@ -158,8 +161,13 @@ class WorkflowFeaturesPayload(BaseModel): features: dict[str, Any] = Field(..., description="Workflow feature configuration") -class WorkflowOnlineUsersQuery(BaseModel): - app_ids: str = Field(..., description="Comma-separated app IDs") +class WorkflowOnlineUsersPayload(BaseModel): + app_ids: list[str] = Field(default_factory=list, description="App IDs") + + @field_validator("app_ids") + @classmethod + def normalize_app_ids(cls, app_ids: list[str]) -> list[str]: + return list(dict.fromkeys(app_id.strip() for app_id in app_ids if app_id.strip())) class DraftWorkflowTriggerRunPayload(BaseModel): @@ -170,25 +178,25 @@ class DraftWorkflowTriggerRunAllPayload(BaseModel): node_ids: list[str] -def reg(cls: type[BaseModel]): - console_ns.schema_model(cls.__name__, cls.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0)) - - -reg(SyncDraftWorkflowPayload) -reg(AdvancedChatWorkflowRunPayload) -reg(IterationNodeRunPayload) -reg(LoopNodeRunPayload) -reg(DraftWorkflowRunPayload) -reg(DraftWorkflowNodeRunPayload) -reg(PublishWorkflowPayload) -reg(DefaultBlockConfigQuery) -reg(ConvertToWorkflowPayload) -reg(WorkflowListQuery) -reg(WorkflowUpdatePayload) -reg(WorkflowFeaturesPayload) -reg(WorkflowOnlineUsersQuery) -reg(DraftWorkflowTriggerRunPayload) -reg(DraftWorkflowTriggerRunAllPayload) +register_schema_models( + console_ns, + SyncDraftWorkflowPayload, + AdvancedChatWorkflowRunPayload, + IterationNodeRunPayload, + LoopNodeRunPayload, + DraftWorkflowRunPayload, + DraftWorkflowNodeRunPayload, + PublishWorkflowPayload, + DefaultBlockConfigQuery, + ConvertToWorkflowPayload, + WorkflowListQuery, + WorkflowUpdatePayload, + WorkflowFeaturesPayload, + WorkflowOnlineUsersPayload, + DraftWorkflowTriggerRunPayload, + DraftWorkflowTriggerRunAllPayload, +) +register_response_schema_model(console_ns, WorkflowRunNodeExecutionResponse) # TODO(QuantumGhost): Refactor existing node run API to handle file parameter parsing @@ -534,9 +542,12 @@ class HumanInputDeliveryTestPayload(BaseModel): ) -reg(HumanInputFormPreviewPayload) -reg(HumanInputFormSubmitPayload) -reg(HumanInputDeliveryTestPayload) +register_schema_models( + console_ns, + HumanInputFormPreviewPayload, + HumanInputFormSubmitPayload, + HumanInputDeliveryTestPayload, +) @console_ns.route("/apps//advanced-chat/workflows/draft/human-input/nodes//form/preview") @@ -754,14 +765,17 @@ class DraftWorkflowNodeRunApi(Resource): @console_ns.doc(description="Run draft workflow node") @console_ns.doc(params={"app_id": "Application ID", "node_id": "Node ID"}) @console_ns.expect(console_ns.models[DraftWorkflowNodeRunPayload.__name__]) - @console_ns.response(200, "Node run started successfully", workflow_run_node_execution_model) + @console_ns.response( + 200, + "Node run started successfully", + console_ns.models[WorkflowRunNodeExecutionResponse.__name__], + ) @console_ns.response(403, "Permission denied") @console_ns.response(404, "Node not found") @setup_required @login_required @account_initialization_required @get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW]) - @marshal_with(workflow_run_node_execution_model) @edit_permission_required def post(self, app_model: App, node_id: str): """ @@ -793,7 +807,9 @@ class DraftWorkflowNodeRunApi(Resource): files=files, ) - return workflow_node_execution + return WorkflowRunNodeExecutionResponse.model_validate( + workflow_node_execution, from_attributes=True + ).model_dump(mode="json") @console_ns.route("/apps//workflows/publish") @@ -896,7 +912,7 @@ class DefaultBlockConfigApi(Resource): """ Get default block config """ - args = DefaultBlockConfigQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = DefaultBlockConfigQuery.model_validate(request.args.to_dict(flat=True)) filters = None if args.q: @@ -989,7 +1005,7 @@ class PublishedAllWorkflowApi(Resource): """ current_user, _ = current_account_with_tenant() - args = WorkflowListQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = WorkflowListQuery.model_validate(request.args.to_dict(flat=True)) page = args.page limit = args.limit user_id = args.user_id @@ -1137,14 +1153,17 @@ class DraftWorkflowNodeLastRunApi(Resource): @console_ns.doc("get_draft_workflow_node_last_run") @console_ns.doc(description="Get last run result for draft workflow node") @console_ns.doc(params={"app_id": "Application ID", "node_id": "Node ID"}) - @console_ns.response(200, "Node last run retrieved successfully", workflow_run_node_execution_model) + @console_ns.response( + 200, + "Node last run retrieved successfully", + console_ns.models[WorkflowRunNodeExecutionResponse.__name__], + ) @console_ns.response(404, "Node last run not found") @console_ns.response(403, "Permission denied") @setup_required @login_required @account_initialization_required @get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW]) - @marshal_with(workflow_run_node_execution_model) def get(self, app_model: App, node_id: str): srv = WorkflowService() workflow = srv.get_draft_workflow(app_model) @@ -1157,7 +1176,7 @@ class DraftWorkflowNodeLastRunApi(Resource): ) if node_exec is None: raise NotFound("last run not found") - return node_exec + return WorkflowRunNodeExecutionResponse.model_validate(node_exec, from_attributes=True).model_dump(mode="json") @console_ns.route("/apps//workflows/draft/trigger/run") @@ -1384,19 +1403,19 @@ class DraftWorkflowTriggerRunAllApi(Resource): @console_ns.route("/apps/workflows/online-users") class WorkflowOnlineUsersApi(Resource): - @console_ns.expect(console_ns.models[WorkflowOnlineUsersQuery.__name__]) + @console_ns.expect(console_ns.models[WorkflowOnlineUsersPayload.__name__]) @console_ns.doc("get_workflow_online_users") @console_ns.doc(description="Get workflow online users") @setup_required @login_required @account_initialization_required @marshal_with(online_user_list_fields) - def get(self): - args = WorkflowOnlineUsersQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + def post(self): + args = WorkflowOnlineUsersPayload.model_validate(console_ns.payload or {}) - app_ids = list(dict.fromkeys(app_id.strip() for app_id in args.app_ids.split(",") if app_id.strip())) - if len(app_ids) > MAX_WORKFLOW_ONLINE_USERS_QUERY_IDS: - raise BadRequest(f"Maximum {MAX_WORKFLOW_ONLINE_USERS_QUERY_IDS} app_ids are allowed per request.") + app_ids = args.app_ids + if len(app_ids) > MAX_WORKFLOW_ONLINE_USERS_REQUEST_IDS: + raise BadRequest(f"Maximum {MAX_WORKFLOW_ONLINE_USERS_REQUEST_IDS} app_ids are allowed per request.") if not app_ids: return {"data": []} @@ -1404,13 +1423,24 @@ class WorkflowOnlineUsersApi(Resource): _, current_tenant_id = current_account_with_tenant() workflow_service = WorkflowService() accessible_app_ids = workflow_service.get_accessible_app_ids(app_ids, current_tenant_id) + ordered_accessible_app_ids = [app_id for app_id in app_ids if app_id in accessible_app_ids] + + users_json_by_app_id: dict[str, Any] = {} + for start_index in range(0, len(ordered_accessible_app_ids), WORKFLOW_ONLINE_USERS_REDIS_BATCH_SIZE): + app_id_batch = ordered_accessible_app_ids[ + start_index : start_index + WORKFLOW_ONLINE_USERS_REDIS_BATCH_SIZE + ] + pipe = redis_client.pipeline(transaction=False) + for app_id in app_id_batch: + pipe.hgetall(f"{WORKFLOW_ONLINE_USERS_PREFIX}{app_id}") + + users_json_batch = pipe.execute() + for app_id, users_json in zip(app_id_batch, users_json_batch): + users_json_by_app_id[app_id] = users_json results = [] - for app_id in app_ids: - if app_id not in accessible_app_ids: - continue - - users_json = redis_client.hgetall(f"{WORKFLOW_ONLINE_USERS_PREFIX}{app_id}") + for app_id in ordered_accessible_app_ids: + users_json = users_json_by_app_id.get(app_id, {}) users = [] for _, user_info_json in users_json.items(): diff --git a/api/controllers/console/app/workflow_app_log.py b/api/controllers/console/app/workflow_app_log.py index 4b39590235..ddc900eb2d 100644 --- a/api/controllers/console/app/workflow_app_log.py +++ b/api/controllers/console/app/workflow_app_log.py @@ -185,7 +185,7 @@ class WorkflowAppLogApi(Resource): """ Get workflow app logs """ - args = WorkflowAppLogQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = WorkflowAppLogQuery.model_validate(request.args.to_dict(flat=True)) # get paginate workflow app logs workflow_app_service = WorkflowAppService() @@ -228,7 +228,7 @@ class WorkflowArchivedLogApi(Resource): """ Get workflow archived logs """ - args = WorkflowAppLogQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = WorkflowAppLogQuery.model_validate(request.args.to_dict(flat=True)) workflow_app_service = WorkflowAppService() with sessionmaker(db.engine, expire_on_commit=False).begin() as session: diff --git a/api/controllers/console/app/workflow_comment.py b/api/controllers/console/app/workflow_comment.py index e7c3e982a6..c003be1303 100644 --- a/api/controllers/console/app/workflow_comment.py +++ b/api/controllers/console/app/workflow_comment.py @@ -23,7 +23,6 @@ from services.account_service import TenantService from services.workflow_comment_service import WorkflowCommentService logger = logging.getLogger(__name__) -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" class WorkflowCommentCreatePayload(BaseModel): @@ -52,13 +51,14 @@ class WorkflowCommentMentionUsersPayload(BaseModel): users: list[AccountWithRole] -for model in ( +register_schema_models( + console_ns, + AccountWithRole, + WorkflowCommentMentionUsersPayload, WorkflowCommentCreatePayload, WorkflowCommentUpdatePayload, WorkflowCommentReplyPayload, -): - console_ns.schema_model(model.__name__, model.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0)) -register_schema_models(console_ns, AccountWithRole, WorkflowCommentMentionUsersPayload) +) workflow_comment_basic_model = console_ns.model("WorkflowCommentBasic", workflow_comment_basic_fields) workflow_comment_detail_model = console_ns.model("WorkflowCommentDetail", workflow_comment_detail_fields) diff --git a/api/controllers/console/app/workflow_draft_variable.py b/api/controllers/console/app/workflow_draft_variable.py index e32ba5f66c..3c887c33dc 100644 --- a/api/controllers/console/app/workflow_draft_variable.py +++ b/api/controllers/console/app/workflow_draft_variable.py @@ -8,6 +8,7 @@ from flask_restx import Resource, fields, marshal, marshal_with from pydantic import BaseModel, Field from sqlalchemy.orm import sessionmaker +from controllers.common.schema import register_schema_models from controllers.console import console_ns from controllers.console.app.error import ( DraftWorkflowNotExist, @@ -33,7 +34,6 @@ from services.workflow_service import WorkflowService logger = logging.getLogger(__name__) _file_access_controller = DatabaseFileAccessController() -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" class WorkflowDraftVariableListQuery(BaseModel): @@ -56,33 +56,25 @@ class EnvironmentVariableUpdatePayload(BaseModel): environment_variables: list[dict[str, Any]] = Field(..., description="Environment variables for the draft workflow") -console_ns.schema_model( - WorkflowDraftVariableListQuery.__name__, - WorkflowDraftVariableListQuery.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), -) -console_ns.schema_model( - WorkflowDraftVariableUpdatePayload.__name__, - WorkflowDraftVariableUpdatePayload.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), -) -console_ns.schema_model( - ConversationVariableUpdatePayload.__name__, - ConversationVariableUpdatePayload.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), -) -console_ns.schema_model( - EnvironmentVariableUpdatePayload.__name__, - EnvironmentVariableUpdatePayload.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), +register_schema_models( + console_ns, + WorkflowDraftVariableListQuery, + WorkflowDraftVariableUpdatePayload, + ConversationVariableUpdatePayload, + EnvironmentVariableUpdatePayload, ) def _convert_values_to_json_serializable_object(value: Segment): - if isinstance(value, FileSegment): - return value.value.model_dump() - elif isinstance(value, ArrayFileSegment): - return [i.model_dump() for i in value.value] - elif isinstance(value, SegmentGroup): - return [_convert_values_to_json_serializable_object(i) for i in value.value] - else: - return value.value + match value: + case FileSegment(): + return value.value.model_dump() + case ArrayFileSegment(): + return [i.model_dump() for i in value.value] + case SegmentGroup(): + return [_convert_values_to_json_serializable_object(i) for i in value.value] + case _: + return value.value def _serialize_var_value(variable: WorkflowDraftVariable): @@ -259,7 +251,7 @@ class WorkflowVariableCollectionApi(Resource): """ Get draft workflow """ - args = WorkflowDraftVariableListQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = WorkflowDraftVariableListQuery.model_validate(request.args.to_dict(flat=True)) # fetch draft workflow by app_model workflow_service = WorkflowService() diff --git a/api/controllers/console/app/workflow_run.py b/api/controllers/console/app/workflow_run.py index 6748d95d6b..97d2003209 100644 --- a/api/controllers/console/app/workflow_run.py +++ b/api/controllers/console/app/workflow_run.py @@ -1,30 +1,28 @@ from datetime import UTC, datetime, timedelta -from typing import Literal, TypedDict, cast +from typing import Literal, cast from flask import request -from flask_restx import Resource, fields, marshal_with +from flask_restx import Resource from pydantic import BaseModel, Field, field_validator from sqlalchemy import select from sqlalchemy.orm import sessionmaker from configs import dify_config +from controllers.common.schema import query_params_from_model, register_response_schema_models, register_schema_models from controllers.console import console_ns from controllers.console.app.wraps import get_app_model from controllers.console.wraps import account_initialization_required, setup_required from controllers.web.error import NotFoundError from core.workflow.human_input_forms import load_form_tokens_by_form_id as _load_form_tokens_by_form_id from extensions.ext_database import db -from fields.end_user_fields import simple_end_user_fields -from fields.member_fields import simple_account_fields +from fields.base import ResponseModel from fields.workflow_run_fields import ( - advanced_chat_workflow_run_for_list_fields, - advanced_chat_workflow_run_pagination_fields, - workflow_run_count_fields, - workflow_run_detail_fields, - workflow_run_for_list_fields, - workflow_run_node_execution_fields, - workflow_run_node_execution_list_fields, - workflow_run_pagination_fields, + AdvancedChatWorkflowRunPaginationResponse, + WorkflowRunCountResponse, + WorkflowRunDetailResponse, + WorkflowRunNodeExecutionListResponse, + WorkflowRunNodeExecutionResponse, + WorkflowRunPaginationResponse, ) from graphon.entities.pause_reason import HumanInputRequired from graphon.enums import WorkflowExecutionStatus @@ -52,82 +50,6 @@ def _build_backstage_input_url(form_token: str | None) -> str | None: WORKFLOW_RUN_STATUS_CHOICES = ["running", "succeeded", "failed", "stopped", "partial-succeeded"] EXPORT_SIGNED_URL_EXPIRE_SECONDS = 3600 -# Register models for flask_restx to avoid dict type issues in Swagger -# Register in dependency order: base models first, then dependent models - -# Base models -simple_account_model = console_ns.model("SimpleAccount", simple_account_fields) - -simple_end_user_model = console_ns.model("SimpleEndUser", simple_end_user_fields) - -# Models that depend on simple_account_fields -workflow_run_for_list_fields_copy = workflow_run_for_list_fields.copy() -workflow_run_for_list_fields_copy["created_by_account"] = fields.Nested( - simple_account_model, attribute="created_by_account", allow_null=True -) -workflow_run_for_list_model = console_ns.model("WorkflowRunForList", workflow_run_for_list_fields_copy) - -advanced_chat_workflow_run_for_list_fields_copy = advanced_chat_workflow_run_for_list_fields.copy() -advanced_chat_workflow_run_for_list_fields_copy["created_by_account"] = fields.Nested( - simple_account_model, attribute="created_by_account", allow_null=True -) -advanced_chat_workflow_run_for_list_model = console_ns.model( - "AdvancedChatWorkflowRunForList", advanced_chat_workflow_run_for_list_fields_copy -) - -workflow_run_detail_fields_copy = workflow_run_detail_fields.copy() -workflow_run_detail_fields_copy["created_by_account"] = fields.Nested( - simple_account_model, attribute="created_by_account", allow_null=True -) -workflow_run_detail_fields_copy["created_by_end_user"] = fields.Nested( - simple_end_user_model, attribute="created_by_end_user", allow_null=True -) -workflow_run_detail_model = console_ns.model("WorkflowRunDetail", workflow_run_detail_fields_copy) - -workflow_run_node_execution_fields_copy = workflow_run_node_execution_fields.copy() -workflow_run_node_execution_fields_copy["created_by_account"] = fields.Nested( - simple_account_model, attribute="created_by_account", allow_null=True -) -workflow_run_node_execution_fields_copy["created_by_end_user"] = fields.Nested( - simple_end_user_model, attribute="created_by_end_user", allow_null=True -) -workflow_run_node_execution_model = console_ns.model( - "WorkflowRunNodeExecution", workflow_run_node_execution_fields_copy -) - -# Simple models without nested dependencies -workflow_run_count_model = console_ns.model("WorkflowRunCount", workflow_run_count_fields) - -# Pagination models that depend on list models -advanced_chat_workflow_run_pagination_fields_copy = advanced_chat_workflow_run_pagination_fields.copy() -advanced_chat_workflow_run_pagination_fields_copy["data"] = fields.List( - fields.Nested(advanced_chat_workflow_run_for_list_model), attribute="data" -) -advanced_chat_workflow_run_pagination_model = console_ns.model( - "AdvancedChatWorkflowRunPagination", advanced_chat_workflow_run_pagination_fields_copy -) - -workflow_run_pagination_fields_copy = workflow_run_pagination_fields.copy() -workflow_run_pagination_fields_copy["data"] = fields.List(fields.Nested(workflow_run_for_list_model), attribute="data") -workflow_run_pagination_model = console_ns.model("WorkflowRunPagination", workflow_run_pagination_fields_copy) - -workflow_run_node_execution_list_fields_copy = workflow_run_node_execution_list_fields.copy() -workflow_run_node_execution_list_fields_copy["data"] = fields.List(fields.Nested(workflow_run_node_execution_model)) -workflow_run_node_execution_list_model = console_ns.model( - "WorkflowRunNodeExecutionList", workflow_run_node_execution_list_fields_copy -) - -workflow_run_export_fields = console_ns.model( - "WorkflowRunExport", - { - "status": fields.String(description="Export status: success/failed"), - "presigned_url": fields.String(description="Pre-signed URL for download", required=False), - "presigned_url_expires_at": fields.String(description="Pre-signed URL expiration time", required=False), - }, -) - -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" - class WorkflowRunListQuery(BaseModel): last_id: str | None = Field(default=None, description="Last run ID for pagination") @@ -136,7 +58,7 @@ class WorkflowRunListQuery(BaseModel): default=None, description="Workflow run status filter" ) triggered_from: Literal["debugging", "app-run"] | None = Field( - default=None, description="Filter by trigger source: debugging or app-run" + default=None, description="Filter by trigger source: debugging or app-run. Default: debugging" ) @field_validator("last_id") @@ -151,9 +73,15 @@ class WorkflowRunCountQuery(BaseModel): status: Literal["running", "succeeded", "failed", "stopped", "partial-succeeded"] | None = Field( default=None, description="Workflow run status filter" ) - time_range: str | None = Field(default=None, description="Time range filter (e.g., 7d, 4h, 30m, 30s)") + time_range: str | None = Field( + default=None, + description=( + "Filter by time range (optional): e.g., 7d (7 days), 4h (4 hours), " + "30m (30 minutes), 30s (30 seconds). Filters by created_at field." + ), + ) triggered_from: Literal["debugging", "app-run"] | None = Field( - default=None, description="Filter by trigger source: debugging or app-run" + default=None, description="Filter by trigger source: debugging or app-run. Default: debugging" ) @field_validator("time_range") @@ -164,56 +92,69 @@ class WorkflowRunCountQuery(BaseModel): return time_duration(value) -console_ns.schema_model( - WorkflowRunListQuery.__name__, WorkflowRunListQuery.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0) -) -console_ns.schema_model( - WorkflowRunCountQuery.__name__, - WorkflowRunCountQuery.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), -) +class WorkflowRunExportResponse(ResponseModel): + status: str = Field(description="Export status: success/failed") + presigned_url: str | None = Field(default=None, description="Pre-signed URL for download") + presigned_url_expires_at: str | None = Field(default=None, description="Pre-signed URL expiration time") -class HumanInputPauseTypeResponse(TypedDict): +class HumanInputPauseTypeResponse(ResponseModel): type: Literal["human_input"] form_id: str - backstage_input_url: str | None + backstage_input_url: str | None = None -class PausedNodeResponse(TypedDict): +class PausedNodeResponse(ResponseModel): node_id: str node_title: str pause_type: HumanInputPauseTypeResponse -class WorkflowPauseDetailsResponse(TypedDict): - paused_at: str | None +class WorkflowPauseDetailsResponse(ResponseModel): + paused_at: str | None = None paused_nodes: list[PausedNodeResponse] +register_schema_models( + console_ns, + WorkflowRunListQuery, + WorkflowRunCountQuery, +) +register_response_schema_models( + console_ns, + AdvancedChatWorkflowRunPaginationResponse, + WorkflowRunPaginationResponse, + WorkflowRunCountResponse, + WorkflowRunDetailResponse, + WorkflowRunNodeExecutionResponse, + WorkflowRunNodeExecutionListResponse, + WorkflowRunExportResponse, + HumanInputPauseTypeResponse, + PausedNodeResponse, + WorkflowPauseDetailsResponse, +) + + @console_ns.route("/apps//advanced-chat/workflow-runs") class AdvancedChatAppWorkflowRunListApi(Resource): @console_ns.doc("get_advanced_chat_workflow_runs") @console_ns.doc(description="Get advanced chat workflow run list") @console_ns.doc(params={"app_id": "Application ID"}) - @console_ns.doc(params={"last_id": "Last run ID for pagination", "limit": "Number of items per page (1-100)"}) - @console_ns.doc( - params={"status": "Filter by status (optional): running, succeeded, failed, stopped, partial-succeeded"} + @console_ns.doc(params=query_params_from_model(WorkflowRunListQuery)) + @console_ns.response( + 200, + "Workflow runs retrieved successfully", + console_ns.models[AdvancedChatWorkflowRunPaginationResponse.__name__], ) - @console_ns.doc( - params={"triggered_from": "Filter by trigger source (optional): debugging or app-run. Default: debugging"} - ) - @console_ns.expect(console_ns.models[WorkflowRunListQuery.__name__]) - @console_ns.response(200, "Workflow runs retrieved successfully", advanced_chat_workflow_run_pagination_model) @setup_required @login_required @account_initialization_required @get_app_model(mode=[AppMode.ADVANCED_CHAT]) - @marshal_with(advanced_chat_workflow_run_pagination_model) def get(self, app_model: App): """ Get advanced chat app workflow run list """ - args_model = WorkflowRunListQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args_model = WorkflowRunListQuery.model_validate(request.args.to_dict(flat=True)) args: WorkflowRunListArgs = {"limit": args_model.limit} if args_model.last_id is not None: args["last_id"] = args_model.last_id @@ -232,7 +173,9 @@ class AdvancedChatAppWorkflowRunListApi(Resource): app_model=app_model, args=args, triggered_from=triggered_from ) - return result + return AdvancedChatWorkflowRunPaginationResponse.model_validate(result, from_attributes=True).model_dump( + mode="json" + ) @console_ns.route("/apps//workflow-runs//export") @@ -240,7 +183,7 @@ class WorkflowRunExportApi(Resource): @console_ns.doc("get_workflow_run_export_url") @console_ns.doc(description="Generate a download URL for an archived workflow run.") @console_ns.doc(params={"app_id": "Application ID", "run_id": "Workflow run ID"}) - @console_ns.response(200, "Export URL generated", workflow_run_export_fields) + @console_ns.response(200, "Export URL generated", console_ns.models[WorkflowRunExportResponse.__name__]) @setup_required @login_required @account_initialization_required @@ -278,11 +221,14 @@ class WorkflowRunExportApi(Resource): expires_in=EXPORT_SIGNED_URL_EXPIRE_SECONDS, ) expires_at = datetime.now(UTC) + timedelta(seconds=EXPORT_SIGNED_URL_EXPIRE_SECONDS) - return { - "status": "success", - "presigned_url": presigned_url, - "presigned_url_expires_at": expires_at.isoformat(), - }, 200 + response = WorkflowRunExportResponse.model_validate( + { + "status": "success", + "presigned_url": presigned_url, + "presigned_url_expires_at": expires_at.isoformat(), + } + ) + return response.model_dump(mode="json"), 200 @console_ns.route("/apps//advanced-chat/workflow-runs/count") @@ -290,32 +236,21 @@ class AdvancedChatAppWorkflowRunCountApi(Resource): @console_ns.doc("get_advanced_chat_workflow_runs_count") @console_ns.doc(description="Get advanced chat workflow runs count statistics") @console_ns.doc(params={"app_id": "Application ID"}) - @console_ns.doc( - params={"status": "Filter by status (optional): running, succeeded, failed, stopped, partial-succeeded"} + @console_ns.doc(params=query_params_from_model(WorkflowRunCountQuery)) + @console_ns.response( + 200, + "Workflow runs count retrieved successfully", + console_ns.models[WorkflowRunCountResponse.__name__], ) - @console_ns.doc( - params={ - "time_range": ( - "Filter by time range (optional): e.g., 7d (7 days), 4h (4 hours), " - "30m (30 minutes), 30s (30 seconds). Filters by created_at field." - ) - } - ) - @console_ns.doc( - params={"triggered_from": "Filter by trigger source (optional): debugging or app-run. Default: debugging"} - ) - @console_ns.response(200, "Workflow runs count retrieved successfully", workflow_run_count_model) - @console_ns.expect(console_ns.models[WorkflowRunCountQuery.__name__]) @setup_required @login_required @account_initialization_required @get_app_model(mode=[AppMode.ADVANCED_CHAT]) - @marshal_with(workflow_run_count_model) def get(self, app_model: App): """ Get advanced chat workflow runs count statistics """ - args_model = WorkflowRunCountQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args_model = WorkflowRunCountQuery.model_validate(request.args.to_dict(flat=True)) args = args_model.model_dump(exclude_none=True) # Default to DEBUGGING if not specified @@ -333,7 +268,7 @@ class AdvancedChatAppWorkflowRunCountApi(Resource): triggered_from=triggered_from, ) - return result + return WorkflowRunCountResponse.model_validate(result).model_dump(mode="json") @console_ns.route("/apps//workflow-runs") @@ -341,25 +276,21 @@ class WorkflowRunListApi(Resource): @console_ns.doc("get_workflow_runs") @console_ns.doc(description="Get workflow run list") @console_ns.doc(params={"app_id": "Application ID"}) - @console_ns.doc(params={"last_id": "Last run ID for pagination", "limit": "Number of items per page (1-100)"}) - @console_ns.doc( - params={"status": "Filter by status (optional): running, succeeded, failed, stopped, partial-succeeded"} + @console_ns.doc(params=query_params_from_model(WorkflowRunListQuery)) + @console_ns.response( + 200, + "Workflow runs retrieved successfully", + console_ns.models[WorkflowRunPaginationResponse.__name__], ) - @console_ns.doc( - params={"triggered_from": "Filter by trigger source (optional): debugging or app-run. Default: debugging"} - ) - @console_ns.response(200, "Workflow runs retrieved successfully", workflow_run_pagination_model) - @console_ns.expect(console_ns.models[WorkflowRunListQuery.__name__]) @setup_required @login_required @account_initialization_required @get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW]) - @marshal_with(workflow_run_pagination_model) def get(self, app_model: App): """ Get workflow run list """ - args_model = WorkflowRunListQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args_model = WorkflowRunListQuery.model_validate(request.args.to_dict(flat=True)) args: WorkflowRunListArgs = {"limit": args_model.limit} if args_model.last_id is not None: args["last_id"] = args_model.last_id @@ -378,7 +309,7 @@ class WorkflowRunListApi(Resource): app_model=app_model, args=args, triggered_from=triggered_from ) - return result + return WorkflowRunPaginationResponse.model_validate(result, from_attributes=True).model_dump(mode="json") @console_ns.route("/apps//workflow-runs/count") @@ -386,32 +317,21 @@ class WorkflowRunCountApi(Resource): @console_ns.doc("get_workflow_runs_count") @console_ns.doc(description="Get workflow runs count statistics") @console_ns.doc(params={"app_id": "Application ID"}) - @console_ns.doc( - params={"status": "Filter by status (optional): running, succeeded, failed, stopped, partial-succeeded"} + @console_ns.doc(params=query_params_from_model(WorkflowRunCountQuery)) + @console_ns.response( + 200, + "Workflow runs count retrieved successfully", + console_ns.models[WorkflowRunCountResponse.__name__], ) - @console_ns.doc( - params={ - "time_range": ( - "Filter by time range (optional): e.g., 7d (7 days), 4h (4 hours), " - "30m (30 minutes), 30s (30 seconds). Filters by created_at field." - ) - } - ) - @console_ns.doc( - params={"triggered_from": "Filter by trigger source (optional): debugging or app-run. Default: debugging"} - ) - @console_ns.response(200, "Workflow runs count retrieved successfully", workflow_run_count_model) - @console_ns.expect(console_ns.models[WorkflowRunCountQuery.__name__]) @setup_required @login_required @account_initialization_required @get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW]) - @marshal_with(workflow_run_count_model) def get(self, app_model: App): """ Get workflow runs count statistics """ - args_model = WorkflowRunCountQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args_model = WorkflowRunCountQuery.model_validate(request.args.to_dict(flat=True)) args = args_model.model_dump(exclude_none=True) # Default to DEBUGGING for workflow if not specified (backward compatibility) @@ -429,7 +349,7 @@ class WorkflowRunCountApi(Resource): triggered_from=triggered_from, ) - return result + return WorkflowRunCountResponse.model_validate(result).model_dump(mode="json") @console_ns.route("/apps//workflow-runs/") @@ -437,13 +357,16 @@ class WorkflowRunDetailApi(Resource): @console_ns.doc("get_workflow_run_detail") @console_ns.doc(description="Get workflow run detail") @console_ns.doc(params={"app_id": "Application ID", "run_id": "Workflow run ID"}) - @console_ns.response(200, "Workflow run detail retrieved successfully", workflow_run_detail_model) + @console_ns.response( + 200, + "Workflow run detail retrieved successfully", + console_ns.models[WorkflowRunDetailResponse.__name__], + ) @console_ns.response(404, "Workflow run not found") @setup_required @login_required @account_initialization_required @get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW]) - @marshal_with(workflow_run_detail_model) def get(self, app_model: App, run_id): """ Get workflow run detail @@ -452,8 +375,10 @@ class WorkflowRunDetailApi(Resource): workflow_run_service = WorkflowRunService() workflow_run = workflow_run_service.get_workflow_run(app_model=app_model, run_id=run_id) + if workflow_run is None: + raise NotFoundError("Workflow run not found") - return workflow_run + return WorkflowRunDetailResponse.model_validate(workflow_run, from_attributes=True).model_dump(mode="json") @console_ns.route("/apps//workflow-runs//node-executions") @@ -461,13 +386,16 @@ class WorkflowRunNodeExecutionListApi(Resource): @console_ns.doc("get_workflow_run_node_executions") @console_ns.doc(description="Get workflow run node execution list") @console_ns.doc(params={"app_id": "Application ID", "run_id": "Workflow run ID"}) - @console_ns.response(200, "Node executions retrieved successfully", workflow_run_node_execution_list_model) + @console_ns.response( + 200, + "Node executions retrieved successfully", + console_ns.models[WorkflowRunNodeExecutionListResponse.__name__], + ) @console_ns.response(404, "Workflow run not found") @setup_required @login_required @account_initialization_required @get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW]) - @marshal_with(workflow_run_node_execution_list_model) def get(self, app_model: App, run_id): """ Get workflow run node execution list @@ -482,13 +410,24 @@ class WorkflowRunNodeExecutionListApi(Resource): user=user, ) - return {"data": node_executions} + return WorkflowRunNodeExecutionListResponse.model_validate( + {"data": node_executions}, from_attributes=True + ).model_dump(mode="json") @console_ns.route("/workflow//pause-details") class ConsoleWorkflowPauseDetailsApi(Resource): """Console API for getting workflow pause details.""" + @console_ns.doc("get_workflow_pause_details") + @console_ns.doc(description="Get workflow pause details") + @console_ns.doc(params={"workflow_run_id": "Workflow run ID"}) + @console_ns.response( + 200, + "Workflow pause details retrieved successfully", + console_ns.models[WorkflowPauseDetailsResponse.__name__], + ) + @console_ns.response(404, "Workflow run not found") @setup_required @login_required @account_initialization_required @@ -515,11 +454,8 @@ class ConsoleWorkflowPauseDetailsApi(Resource): # Check if workflow is suspended is_paused = workflow_run.status == WorkflowExecutionStatus.PAUSED if not is_paused: - empty_response: WorkflowPauseDetailsResponse = { - "paused_at": None, - "paused_nodes": [], - } - return empty_response, 200 + empty_response = WorkflowPauseDetailsResponse(paused_at=None, paused_nodes=[]) + return empty_response.model_dump(mode="json"), 200 pause_entity = workflow_run_repo.get_workflow_pause(workflow_run_id) pause_reasons = pause_entity.get_pause_reasons() if pause_entity else [] @@ -530,27 +466,25 @@ class ConsoleWorkflowPauseDetailsApi(Resource): # Build response paused_at = pause_entity.paused_at if pause_entity else None paused_nodes: list[PausedNodeResponse] = [] - response: WorkflowPauseDetailsResponse = { - "paused_at": paused_at.isoformat() + "Z" if paused_at else None, - "paused_nodes": paused_nodes, - } for reason in pause_reasons: if isinstance(reason, HumanInputRequired): paused_nodes.append( - { - "node_id": reason.node_id, - "node_title": reason.node_title, - "pause_type": { - "type": "human_input", - "form_id": reason.form_id, - "backstage_input_url": _build_backstage_input_url( - form_tokens_by_form_id.get(reason.form_id) - ), - }, - } + PausedNodeResponse( + node_id=reason.node_id, + node_title=reason.node_title, + pause_type=HumanInputPauseTypeResponse( + type="human_input", + form_id=reason.form_id, + backstage_input_url=_build_backstage_input_url(form_tokens_by_form_id.get(reason.form_id)), + ), + ) ) else: raise AssertionError("unimplemented.") - return response, 200 + response = WorkflowPauseDetailsResponse( + paused_at=paused_at.isoformat() + "Z" if paused_at else None, + paused_nodes=paused_nodes, + ) + return response.model_dump(mode="json"), 200 diff --git a/api/controllers/console/app/workflow_statistic.py b/api/controllers/console/app/workflow_statistic.py index e48cf42762..ca899d8784 100644 --- a/api/controllers/console/app/workflow_statistic.py +++ b/api/controllers/console/app/workflow_statistic.py @@ -3,6 +3,7 @@ from flask_restx import Resource from pydantic import BaseModel, Field, field_validator from sqlalchemy.orm import sessionmaker +from controllers.common.schema import register_schema_models from controllers.console import console_ns from controllers.console.app.wraps import get_app_model from controllers.console.wraps import account_initialization_required, setup_required @@ -13,8 +14,6 @@ from models.enums import WorkflowRunTriggeredFrom from models.model import AppMode from repositories.factory import DifyAPIRepositoryFactory -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" - class WorkflowStatisticQuery(BaseModel): start: str | None = Field(default=None, description="Start date and time (YYYY-MM-DD HH:MM)") @@ -28,10 +27,7 @@ class WorkflowStatisticQuery(BaseModel): return value -console_ns.schema_model( - WorkflowStatisticQuery.__name__, - WorkflowStatisticQuery.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), -) +register_schema_models(console_ns, WorkflowStatisticQuery) @console_ns.route("/apps//workflow/statistics/daily-conversations") @@ -53,7 +49,7 @@ class WorkflowDailyRunsStatistic(Resource): def get(self, app_model): account, _ = current_account_with_tenant() - args = WorkflowStatisticQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = WorkflowStatisticQuery.model_validate(request.args.to_dict(flat=True)) assert account.timezone is not None @@ -93,7 +89,7 @@ class WorkflowDailyTerminalsStatistic(Resource): def get(self, app_model): account, _ = current_account_with_tenant() - args = WorkflowStatisticQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = WorkflowStatisticQuery.model_validate(request.args.to_dict(flat=True)) assert account.timezone is not None @@ -133,7 +129,7 @@ class WorkflowDailyTokenCostStatistic(Resource): def get(self, app_model): account, _ = current_account_with_tenant() - args = WorkflowStatisticQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = WorkflowStatisticQuery.model_validate(request.args.to_dict(flat=True)) assert account.timezone is not None @@ -173,7 +169,7 @@ class WorkflowAverageAppInteractionStatistic(Resource): def get(self, app_model): account, _ = current_account_with_tenant() - args = WorkflowStatisticQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = WorkflowStatisticQuery.model_validate(request.args.to_dict(flat=True)) assert account.timezone is not None diff --git a/api/controllers/console/app/workflow_trigger.py b/api/controllers/console/app/workflow_trigger.py index a6715fa200..a80b4f5d0c 100644 --- a/api/controllers/console/app/workflow_trigger.py +++ b/api/controllers/console/app/workflow_trigger.py @@ -94,7 +94,7 @@ class WebhookTriggerApi(Resource): @console_ns.response(200, "Success", console_ns.models[WebhookTriggerResponse.__name__]) def get(self, app_model: App): """Get webhook trigger for a node""" - args = Parser.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = Parser.model_validate(request.args.to_dict(flat=True)) node_id = args.node_id diff --git a/api/controllers/console/auth/activate.py b/api/controllers/console/auth/activate.py index f7061f820f..0c05cf2fe3 100644 --- a/api/controllers/console/auth/activate.py +++ b/api/controllers/console/auth/activate.py @@ -63,7 +63,7 @@ class ActivateCheckApi(Resource): console_ns.models[ActivationCheckResponse.__name__], ) def get(self): - args = ActivateCheckQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = ActivateCheckQuery.model_validate(request.args.to_dict(flat=True)) workspaceId = args.workspace_id token = args.token diff --git a/api/controllers/console/auth/data_source_bearer_auth.py b/api/controllers/console/auth/data_source_bearer_auth.py index 905d0daef0..db0d36af6e 100644 --- a/api/controllers/console/auth/data_source_bearer_auth.py +++ b/api/controllers/console/auth/data_source_bearer_auth.py @@ -1,6 +1,7 @@ from flask_restx import Resource from pydantic import BaseModel, Field +from controllers.common.schema import register_schema_models from libs.login import current_account_with_tenant, login_required from services.auth.api_key_auth_service import ApiKeyAuthService @@ -8,8 +9,6 @@ from .. import console_ns from ..auth.error import ApiKeyAuthFailedError from ..wraps import account_initialization_required, is_admin_or_owner_required, setup_required -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" - class ApiKeyAuthBindingPayload(BaseModel): category: str = Field(...) @@ -17,10 +16,7 @@ class ApiKeyAuthBindingPayload(BaseModel): credentials: dict = Field(...) -console_ns.schema_model( - ApiKeyAuthBindingPayload.__name__, - ApiKeyAuthBindingPayload.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), -) +register_schema_models(console_ns, ApiKeyAuthBindingPayload) @console_ns.route("/api-key-auth/data-source") diff --git a/api/controllers/console/auth/email_register.py b/api/controllers/console/auth/email_register.py index 1fd781b4fc..f6b8aedf22 100644 --- a/api/controllers/console/auth/email_register.py +++ b/api/controllers/console/auth/email_register.py @@ -4,6 +4,7 @@ from pydantic import BaseModel, Field, field_validator from configs import dify_config from constants.languages import languages +from controllers.common.schema import register_schema_models from controllers.console import console_ns from controllers.console.auth.error import ( EmailAlreadyInUseError, @@ -23,8 +24,6 @@ from services.errors.account import AccountNotFoundError, AccountRegisterError from ..error import AccountInFreezeError, EmailSendIpLimitError from ..wraps import email_password_login_enabled, email_register_enabled, setup_required -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" - class EmailRegisterSendPayload(BaseModel): email: EmailStr = Field(..., description="Email address") @@ -48,8 +47,7 @@ class EmailRegisterResetPayload(BaseModel): return valid_password(value) -for model in (EmailRegisterSendPayload, EmailRegisterValidityPayload, EmailRegisterResetPayload): - console_ns.schema_model(model.__name__, model.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0)) +register_schema_models(console_ns, EmailRegisterSendPayload, EmailRegisterValidityPayload, EmailRegisterResetPayload) @console_ns.route("/email-register/send-email") diff --git a/api/controllers/console/auth/forgot_password.py b/api/controllers/console/auth/forgot_password.py index ed390a5f89..c34dd1ac85 100644 --- a/api/controllers/console/auth/forgot_password.py +++ b/api/controllers/console/auth/forgot_password.py @@ -28,8 +28,6 @@ from services.entities.auth_entities import ( ) from services.feature_service import FeatureService -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" - class ForgotPasswordEmailResponse(BaseModel): result: str = Field(description="Operation result") diff --git a/api/controllers/console/auth/login.py b/api/controllers/console/auth/login.py index 8216b3d0da..19c98f3a1a 100644 --- a/api/controllers/console/auth/login.py +++ b/api/controllers/console/auth/login.py @@ -9,6 +9,7 @@ from werkzeug.exceptions import Unauthorized import services from configs import dify_config from constants.languages import get_valid_language +from controllers.common.schema import register_schema_models from controllers.console import console_ns from controllers.console.auth.error import ( AuthenticationFailedError, @@ -50,7 +51,6 @@ from services.errors.account import AccountRegisterError from services.errors.workspace import WorkSpaceNotAllowedCreateError, WorkspacesLimitExceededError from services.feature_service import FeatureService -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" logger = logging.getLogger(__name__) @@ -71,13 +71,7 @@ class EmailCodeLoginPayload(BaseModel): language: str | None = Field(default=None) -def reg(cls: type[BaseModel]): - console_ns.schema_model(cls.__name__, cls.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0)) - - -reg(LoginPayload) -reg(EmailPayload) -reg(EmailCodeLoginPayload) +register_schema_models(console_ns, LoginPayload, EmailPayload, EmailCodeLoginPayload) @console_ns.route("/login") diff --git a/api/controllers/console/datasets/datasets.py b/api/controllers/console/datasets/datasets.py index 91384d8785..0451409fdb 100644 --- a/api/controllers/console/datasets/datasets.py +++ b/api/controllers/console/datasets/datasets.py @@ -631,63 +631,63 @@ class DatasetIndexingEstimateApi(Resource): # validate args DocumentService.estimate_args_validate(args) extract_settings = [] - if args["info_list"]["data_source_type"] == "upload_file": - file_ids = args["info_list"]["file_info_list"]["file_ids"] - file_details = db.session.scalars( - select(UploadFile).where(UploadFile.tenant_id == current_tenant_id, UploadFile.id.in_(file_ids)) - ).all() + match args["info_list"]["data_source_type"]: + case "upload_file": + file_ids = args["info_list"]["file_info_list"]["file_ids"] + file_details = db.session.scalars( + select(UploadFile).where(UploadFile.tenant_id == current_tenant_id, UploadFile.id.in_(file_ids)) + ).all() + if file_details is None: + raise NotFound("File not found.") - if file_details is None: - raise NotFound("File not found.") - - if file_details: - for file_detail in file_details: + if file_details: + for file_detail in file_details: + extract_setting = ExtractSetting( + datasource_type=DatasourceType.FILE, + upload_file=file_detail, + document_model=args["doc_form"], + ) + extract_settings.append(extract_setting) + case "notion_import": + notion_info_list = args["info_list"]["notion_info_list"] + for notion_info in notion_info_list: + workspace_id = notion_info["workspace_id"] + credential_id = notion_info.get("credential_id") + for page in notion_info["pages"]: + extract_setting = ExtractSetting( + datasource_type=DatasourceType.NOTION, + notion_info=NotionInfo.model_validate( + { + "credential_id": credential_id, + "notion_workspace_id": workspace_id, + "notion_obj_id": page["page_id"], + "notion_page_type": page["type"], + "tenant_id": current_tenant_id, + } + ), + document_model=args["doc_form"], + ) + extract_settings.append(extract_setting) + case "website_crawl": + website_info_list = args["info_list"]["website_info_list"] + for url in website_info_list["urls"]: extract_setting = ExtractSetting( - datasource_type=DatasourceType.FILE, - upload_file=file_detail, - document_model=args["doc_form"], - ) - extract_settings.append(extract_setting) - elif args["info_list"]["data_source_type"] == "notion_import": - notion_info_list = args["info_list"]["notion_info_list"] - for notion_info in notion_info_list: - workspace_id = notion_info["workspace_id"] - credential_id = notion_info.get("credential_id") - for page in notion_info["pages"]: - extract_setting = ExtractSetting( - datasource_type=DatasourceType.NOTION, - notion_info=NotionInfo.model_validate( + datasource_type=DatasourceType.WEBSITE, + website_info=WebsiteInfo.model_validate( { - "credential_id": credential_id, - "notion_workspace_id": workspace_id, - "notion_obj_id": page["page_id"], - "notion_page_type": page["type"], + "provider": website_info_list["provider"], + "job_id": website_info_list["job_id"], + "url": url, "tenant_id": current_tenant_id, + "mode": "crawl", + "only_main_content": website_info_list["only_main_content"], } ), document_model=args["doc_form"], ) extract_settings.append(extract_setting) - elif args["info_list"]["data_source_type"] == "website_crawl": - website_info_list = args["info_list"]["website_info_list"] - for url in website_info_list["urls"]: - extract_setting = ExtractSetting( - datasource_type=DatasourceType.WEBSITE, - website_info=WebsiteInfo.model_validate( - { - "provider": website_info_list["provider"], - "job_id": website_info_list["job_id"], - "url": url, - "tenant_id": current_tenant_id, - "mode": "crawl", - "only_main_content": website_info_list["only_main_content"], - } - ), - document_model=args["doc_form"], - ) - extract_settings.append(extract_setting) - else: - raise ValueError("Data source type not support") + case _: + raise ValueError("Data source type not support") indexing_runner = IndexingRunner() try: response = indexing_runner.indexing_estimate( diff --git a/api/controllers/console/datasets/datasets_document.py b/api/controllers/console/datasets/datasets_document.py index 3372a967d9..c4e13c41a5 100644 --- a/api/controllers/console/datasets/datasets_document.py +++ b/api/controllers/console/datasets/datasets_document.py @@ -369,28 +369,31 @@ class DatasetDocumentListApi(Resource): else: sort_logic = asc - if sort == "hit_count": - sub_query = ( - sa.select(DocumentSegment.document_id, sa.func.sum(DocumentSegment.hit_count).label("total_hit_count")) - .where(DocumentSegment.dataset_id == str(dataset_id)) - .group_by(DocumentSegment.document_id) - .subquery() - ) + match sort: + case "hit_count": + sub_query = ( + sa.select( + DocumentSegment.document_id, sa.func.sum(DocumentSegment.hit_count).label("total_hit_count") + ) + .where(DocumentSegment.dataset_id == str(dataset_id)) + .group_by(DocumentSegment.document_id) + .subquery() + ) - query = query.outerjoin(sub_query, sub_query.c.document_id == Document.id).order_by( - sort_logic(sa.func.coalesce(sub_query.c.total_hit_count, 0)), - sort_logic(Document.position), - ) - elif sort == "created_at": - query = query.order_by( - sort_logic(Document.created_at), - sort_logic(Document.position), - ) - else: - query = query.order_by( - desc(Document.created_at), - desc(Document.position), - ) + query = query.outerjoin(sub_query, sub_query.c.document_id == Document.id).order_by( + sort_logic(sa.func.coalesce(sub_query.c.total_hit_count, 0)), + sort_logic(Document.position), + ) + case "created_at": + query = query.order_by( + sort_logic(Document.created_at), + sort_logic(Document.position), + ) + case _: + query = query.order_by( + desc(Document.created_at), + desc(Document.position), + ) paginated_documents = db.paginate(select=query, page=page, per_page=limit, max_per_page=100, error_out=False) documents = paginated_documents.items diff --git a/api/controllers/console/datasets/rag_pipeline/datasource_content_preview.py b/api/controllers/console/datasets/rag_pipeline/datasource_content_preview.py index 7caf5b52ed..a43caa8f56 100644 --- a/api/controllers/console/datasets/rag_pipeline/datasource_content_preview.py +++ b/api/controllers/console/datasets/rag_pipeline/datasource_content_preview.py @@ -4,6 +4,7 @@ from flask_restx import ( # type: ignore from pydantic import BaseModel from werkzeug.exceptions import Forbidden +from controllers.common.schema import register_schema_models from controllers.console import console_ns from controllers.console.datasets.wraps import get_rag_pipeline from controllers.console.wraps import account_initialization_required, setup_required @@ -12,8 +13,6 @@ from models import Account from models.dataset import Pipeline from services.rag_pipeline.rag_pipeline import RagPipelineService -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" - class Parser(BaseModel): inputs: dict @@ -21,7 +20,7 @@ class Parser(BaseModel): credential_id: str | None = None -console_ns.schema_model(Parser.__name__, Parser.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0)) +register_schema_models(console_ns, Parser) @console_ns.route("/rag/pipelines//workflows/published/datasource/nodes//preview") diff --git a/api/controllers/console/datasets/rag_pipeline/rag_pipeline_workflow.py b/api/controllers/console/datasets/rag_pipeline/rag_pipeline_workflow.py index ee146e8287..8eff32c555 100644 --- a/api/controllers/console/datasets/rag_pipeline/rag_pipeline_workflow.py +++ b/api/controllers/console/datasets/rag_pipeline/rag_pipeline_workflow.py @@ -10,7 +10,7 @@ from werkzeug.exceptions import BadRequest, Forbidden, InternalServerError, NotF import services from controllers.common.controller_schemas import DefaultBlockConfigQuery, WorkflowListQuery, WorkflowUpdatePayload -from controllers.common.schema import register_schema_models +from controllers.common.schema import register_response_schema_models, register_schema_models from controllers.console import console_ns from controllers.console.app.error import ( ConversationCompletedError, @@ -22,12 +22,6 @@ from controllers.console.app.workflow import ( workflow_model, workflow_pagination_model, ) -from controllers.console.app.workflow_run import ( - workflow_run_detail_model, - workflow_run_node_execution_list_model, - workflow_run_node_execution_model, - workflow_run_pagination_model, -) from controllers.console.datasets.wraps import get_rag_pipeline from controllers.console.wraps import ( account_initialization_required, @@ -40,6 +34,12 @@ from core.app.apps.pipeline.pipeline_generator import PipelineGenerator from core.app.entities.app_invoke_entities import InvokeFrom from extensions.ext_database import db from factories import variable_factory +from fields.workflow_run_fields import ( + WorkflowRunDetailResponse, + WorkflowRunNodeExecutionListResponse, + WorkflowRunNodeExecutionResponse, + WorkflowRunPaginationResponse, +) from graphon.model_runtime.utils.encoders import jsonable_encoder from libs import helper from libs.helper import TimestampField, UUIDStrOrEmpty @@ -131,6 +131,13 @@ register_schema_models( DatasourceVariablesPayload, RagPipelineRecommendedPluginQuery, ) +register_response_schema_models( + console_ns, + WorkflowRunDetailResponse, + WorkflowRunNodeExecutionListResponse, + WorkflowRunNodeExecutionResponse, + WorkflowRunPaginationResponse, +) @console_ns.route("/rag/pipelines//workflows/draft") @@ -415,12 +422,16 @@ class RagPipelineDraftDatasourceNodeRunApi(Resource): @console_ns.route("/rag/pipelines//workflows/draft/nodes//run") class RagPipelineDraftNodeRunApi(Resource): @console_ns.expect(console_ns.models[NodeRunRequiredPayload.__name__]) + @console_ns.response( + 200, + "Node run started successfully", + console_ns.models[WorkflowRunNodeExecutionResponse.__name__], + ) @setup_required @login_required @edit_permission_required @account_initialization_required @get_rag_pipeline - @marshal_with(workflow_run_node_execution_model) def post(self, pipeline: Pipeline, node_id: str): """ Run draft workflow node @@ -439,7 +450,9 @@ class RagPipelineDraftNodeRunApi(Resource): if workflow_node_execution is None: raise ValueError("Workflow node execution not found") - return workflow_node_execution + return WorkflowRunNodeExecutionResponse.model_validate( + workflow_node_execution, from_attributes=True + ).model_dump(mode="json") @console_ns.route("/rag/pipelines//workflow-runs/tasks//stop") @@ -778,11 +791,15 @@ class DraftRagPipelineSecondStepApi(Resource): @console_ns.route("/rag/pipelines//workflow-runs") class RagPipelineWorkflowRunListApi(Resource): + @console_ns.response( + 200, + "Workflow runs retrieved successfully", + console_ns.models[WorkflowRunPaginationResponse.__name__], + ) @setup_required @login_required @account_initialization_required @get_rag_pipeline - @marshal_with(workflow_run_pagination_model) def get(self, pipeline: Pipeline): """ Get workflow run list @@ -801,16 +818,20 @@ class RagPipelineWorkflowRunListApi(Resource): rag_pipeline_service = RagPipelineService() result = rag_pipeline_service.get_rag_pipeline_paginate_workflow_runs(pipeline=pipeline, args=args) - return result + return WorkflowRunPaginationResponse.model_validate(result, from_attributes=True).model_dump(mode="json") @console_ns.route("/rag/pipelines//workflow-runs/") class RagPipelineWorkflowRunDetailApi(Resource): + @console_ns.response( + 200, + "Workflow run detail retrieved successfully", + console_ns.models[WorkflowRunDetailResponse.__name__], + ) @setup_required @login_required @account_initialization_required @get_rag_pipeline - @marshal_with(workflow_run_detail_model) def get(self, pipeline: Pipeline, run_id): """ Get workflow run detail @@ -819,17 +840,23 @@ class RagPipelineWorkflowRunDetailApi(Resource): rag_pipeline_service = RagPipelineService() workflow_run = rag_pipeline_service.get_rag_pipeline_workflow_run(pipeline=pipeline, run_id=run_id) + if workflow_run is None: + raise NotFound("Workflow run not found") - return workflow_run + return WorkflowRunDetailResponse.model_validate(workflow_run, from_attributes=True).model_dump(mode="json") @console_ns.route("/rag/pipelines//workflow-runs//node-executions") class RagPipelineWorkflowRunNodeExecutionListApi(Resource): + @console_ns.response( + 200, + "Node executions retrieved successfully", + console_ns.models[WorkflowRunNodeExecutionListResponse.__name__], + ) @setup_required @login_required @account_initialization_required @get_rag_pipeline - @marshal_with(workflow_run_node_execution_list_model) def get(self, pipeline: Pipeline, run_id: str): """ Get workflow run node execution list @@ -844,7 +871,9 @@ class RagPipelineWorkflowRunNodeExecutionListApi(Resource): user=user, ) - return {"data": node_executions} + return WorkflowRunNodeExecutionListResponse.model_validate( + {"data": node_executions}, from_attributes=True + ).model_dump(mode="json") @console_ns.route("/rag/pipelines/datasource-plugins") @@ -859,11 +888,15 @@ class DatasourceListApi(Resource): @console_ns.route("/rag/pipelines//workflows/draft/nodes//last-run") class RagPipelineWorkflowLastRunApi(Resource): + @console_ns.response( + 200, + "Node last run retrieved successfully", + console_ns.models[WorkflowRunNodeExecutionResponse.__name__], + ) @setup_required @login_required @account_initialization_required @get_rag_pipeline - @marshal_with(workflow_run_node_execution_model) def get(self, pipeline: Pipeline, node_id: str): rag_pipeline_service = RagPipelineService() workflow = rag_pipeline_service.get_draft_workflow(pipeline=pipeline) @@ -876,7 +909,7 @@ class RagPipelineWorkflowLastRunApi(Resource): ) if node_exec is None: raise NotFound("last run not found") - return node_exec + return WorkflowRunNodeExecutionResponse.model_validate(node_exec, from_attributes=True).model_dump(mode="json") @console_ns.route("/rag/pipelines/transform/datasets/") @@ -899,12 +932,16 @@ class RagPipelineTransformApi(Resource): @console_ns.route("/rag/pipelines//workflows/draft/datasource/variables-inspect") class RagPipelineDatasourceVariableApi(Resource): @console_ns.expect(console_ns.models[DatasourceVariablesPayload.__name__]) + @console_ns.response( + 200, + "Datasource variables set successfully", + console_ns.models[WorkflowRunNodeExecutionResponse.__name__], + ) @setup_required @login_required @account_initialization_required @get_rag_pipeline @edit_permission_required - @marshal_with(workflow_run_node_execution_model) def post(self, pipeline: Pipeline): """ Set datasource variables @@ -918,7 +955,9 @@ class RagPipelineDatasourceVariableApi(Resource): args=args, current_user=current_user, ) - return workflow_node_execution + return WorkflowRunNodeExecutionResponse.model_validate( + workflow_node_execution, from_attributes=True + ).model_dump(mode="json") @console_ns.route("/rag/pipelines/recommended-plugins") diff --git a/api/controllers/console/explore/recommended_app.py b/api/controllers/console/explore/recommended_app.py index 55bd679b48..5821b91489 100644 --- a/api/controllers/console/explore/recommended_app.py +++ b/api/controllers/console/explore/recommended_app.py @@ -1,11 +1,12 @@ from typing import Any +from uuid import UUID from flask import request from flask_restx import Resource from pydantic import BaseModel, Field, computed_field, field_validator from constants.languages import languages -from controllers.common.schema import register_schema_models +from controllers.common.schema import query_params_from_model, register_schema_models from controllers.console import console_ns from controllers.console.wraps import account_initialization_required from fields.base import ResponseModel @@ -15,7 +16,7 @@ from services.recommended_app_service import RecommendedAppService class RecommendedAppsQuery(BaseModel): - language: str | None = Field(default=None) + language: str | None = Field(default=None, description="Language code for recommended app localization") class RecommendedAppInfoResponse(ResponseModel): @@ -52,7 +53,7 @@ class RecommendedAppResponse(ResponseModel): copyright: str | None = None privacy_policy: str | None = None custom_disclaimer: str | None = None - category: str | None = None + categories: list[str] = Field(default_factory=list) position: int | None = None is_listed: bool | None = None can_trial: bool | None = None @@ -74,13 +75,13 @@ register_schema_models( @console_ns.route("/explore/apps") class RecommendedAppListApi(Resource): - @console_ns.expect(console_ns.models[RecommendedAppsQuery.__name__]) + @console_ns.doc(params=query_params_from_model(RecommendedAppsQuery)) @console_ns.response(200, "Success", console_ns.models[RecommendedAppListResponse.__name__]) @login_required @account_initialization_required def get(self): # language args - args = RecommendedAppsQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = RecommendedAppsQuery.model_validate(request.args.to_dict(flat=True)) language = args.language if language and language in languages: language_prefix = language @@ -99,6 +100,5 @@ class RecommendedAppListApi(Resource): class RecommendedAppApi(Resource): @login_required @account_initialization_required - def get(self, app_id): - app_id = str(app_id) - return RecommendedAppService.get_recommend_app_detail(app_id) + def get(self, app_id: UUID): + return RecommendedAppService.get_recommend_app_detail(str(app_id)) diff --git a/api/controllers/console/explore/trial.py b/api/controllers/console/explore/trial.py index 1456301a24..26b48ec599 100644 --- a/api/controllers/console/explore/trial.py +++ b/api/controllers/console/explore/trial.py @@ -10,7 +10,7 @@ from werkzeug.exceptions import Forbidden, InternalServerError, NotFound import services from controllers.common.fields import Parameters as ParametersResponse from controllers.common.fields import Site as SiteResponse -from controllers.common.schema import get_or_create_model +from controllers.common.schema import get_or_create_model, register_schema_models from controllers.console import console_ns from controllers.console.app.error import ( AppUnavailableError, @@ -106,7 +106,7 @@ app_detail_fields_with_site_copy["tags"] = fields.List(fields.Nested(tag_model)) app_detail_fields_with_site_copy["site"] = fields.Nested(site_model) app_detail_with_site_model = get_or_create_model("TrialAppDetailWithSite", app_detail_fields_with_site_copy) -simple_account_model = get_or_create_model("SimpleAccount", simple_account_fields) +simple_account_model = get_or_create_model("TrialSimpleAccount", simple_account_fields) conversation_variable_model = get_or_create_model("TrialConversationVariable", conversation_variable_fields) pipeline_variable_model = get_or_create_model("TrialPipelineVariable", pipeline_variable_fields) @@ -120,10 +120,6 @@ workflow_fields_copy["rag_pipeline_variables"] = fields.List(fields.Nested(pipel workflow_model = get_or_create_model("TrialWorkflow", workflow_fields_copy) -# Pydantic models for request validation -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" - - class WorkflowRunRequest(BaseModel): inputs: dict files: list | None = None @@ -153,19 +149,7 @@ class CompletionRequest(BaseModel): retriever_from: str = "explore_app" -# Register schemas for Swagger documentation -console_ns.schema_model( - WorkflowRunRequest.__name__, WorkflowRunRequest.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0) -) -console_ns.schema_model( - ChatRequest.__name__, ChatRequest.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0) -) -console_ns.schema_model( - TextToSpeechRequest.__name__, TextToSpeechRequest.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0) -) -console_ns.schema_model( - CompletionRequest.__name__, CompletionRequest.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0) -) +register_schema_models(console_ns, WorkflowRunRequest, ChatRequest, TextToSpeechRequest, CompletionRequest) class TrialAppWorkflowRunApi(TrialAppResource): diff --git a/api/controllers/console/extension.py b/api/controllers/console/extension.py index 7a6356d052..9ffc18e4c2 100644 --- a/api/controllers/console/extension.py +++ b/api/controllers/console/extension.py @@ -89,7 +89,7 @@ class CodeBasedExtensionAPI(Resource): @login_required @account_initialization_required def get(self): - query = CodeBasedExtensionQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + query = CodeBasedExtensionQuery.model_validate(request.args.to_dict(flat=True)) return CodeBasedExtensionResponse( module=query.module, diff --git a/api/controllers/console/files.py b/api/controllers/console/files.py index 109a3cd0d3..9fa5b0f5c1 100644 --- a/api/controllers/console/files.py +++ b/api/controllers/console/files.py @@ -82,7 +82,7 @@ class FileApi(Resource): try: upload_file = FileService(db.engine).upload_file( filename=file.filename, - content=file.read(), + content=file.stream.read(), mimetype=file.mimetype, user=current_user, source=source, diff --git a/api/controllers/console/tag/tags.py b/api/controllers/console/tag/tags.py index f73e2da54e..b9e876c906 100644 --- a/api/controllers/console/tag/tags.py +++ b/api/controllers/console/tag/tags.py @@ -32,12 +32,7 @@ class TagBindingPayload(BaseModel): class TagBindingRemovePayload(BaseModel): - tag_id: str = Field(description="Tag ID to remove") - target_id: str = Field(description="Target ID to unbind tag from") - type: TagType = Field(description="Tag type") - - -class TagBindingItemDeletePayload(BaseModel): + tag_ids: list[str] = Field(description="Tag IDs to remove", min_length=1) target_id: str = Field(description="Target ID to unbind tag from") type: TagType = Field(description="Tag type") @@ -75,7 +70,6 @@ register_schema_models( TagBasePayload, TagBindingPayload, TagBindingRemovePayload, - TagBindingItemDeletePayload, TagListQueryParam, TagResponse, ) @@ -184,13 +178,13 @@ def _create_tag_bindings() -> tuple[dict[str, str], int]: return {"result": "success"}, 200 -def _remove_tag_binding() -> tuple[dict[str, str], int]: +def _remove_tag_bindings() -> tuple[dict[str, str], int]: _require_tag_binding_edit_permission() payload = TagBindingRemovePayload.model_validate(console_ns.payload or {}) TagService.delete_tag_binding( TagBindingDeletePayload( - tag_id=payload.tag_id, + tag_ids=payload.tag_ids, target_id=payload.target_id, type=payload.type, ) @@ -211,54 +205,15 @@ class TagBindingCollectionApi(Resource): return _create_tag_bindings() -@console_ns.route("/tag-bindings/") -class TagBindingItemApi(Resource): - """Canonical item resource for tag binding deletion.""" - - @console_ns.doc("delete_tag_binding") - @console_ns.doc(params={"id": "Tag ID"}) - @console_ns.expect(console_ns.models[TagBindingItemDeletePayload.__name__]) - @setup_required - @login_required - @account_initialization_required - def delete(self, id): - _require_tag_binding_edit_permission() - payload = TagBindingItemDeletePayload.model_validate(console_ns.payload or {}) - TagService.delete_tag_binding( - TagBindingDeletePayload( - tag_id=str(id), - target_id=payload.target_id, - type=payload.type, - ) - ) - return {"result": "success"}, 200 - - -@console_ns.route("/tag-bindings/create") -class DeprecatedTagBindingCreateApi(Resource): - """Deprecated verb-based alias for tag binding creation.""" - - @console_ns.doc("create_tag_binding_deprecated") - @console_ns.doc(deprecated=True) - @console_ns.doc(description="Deprecated legacy alias. Use POST /tag-bindings instead.") - @console_ns.expect(console_ns.models[TagBindingPayload.__name__]) - @setup_required - @login_required - @account_initialization_required - def post(self): - return _create_tag_bindings() - - @console_ns.route("/tag-bindings/remove") -class DeprecatedTagBindingRemoveApi(Resource): - """Deprecated verb-based alias for tag binding deletion.""" +class TagBindingRemoveApi(Resource): + """Batch resource for tag binding deletion.""" - @console_ns.doc("delete_tag_binding_deprecated") - @console_ns.doc(deprecated=True) - @console_ns.doc(description="Deprecated legacy alias. Use DELETE /tag-bindings/{id} instead.") + @console_ns.doc("remove_tag_bindings") + @console_ns.doc(description="Remove one or more tag bindings from a target.") @console_ns.expect(console_ns.models[TagBindingRemovePayload.__name__]) @setup_required @login_required @account_initialization_required def post(self): - return _remove_tag_binding() + return _remove_tag_bindings() diff --git a/api/controllers/console/workspace/account.py b/api/controllers/console/workspace/account.py index c01286cc59..68520e540b 100644 --- a/api/controllers/console/workspace/account.py +++ b/api/controllers/console/workspace/account.py @@ -8,6 +8,7 @@ from flask import request from flask_restx import Resource from pydantic import BaseModel, Field, field_validator, model_validator from sqlalchemy import select +from werkzeug.exceptions import NotFound from configs import dify_config from constants.languages import supported_language @@ -45,12 +46,12 @@ from libs.helper import EmailStr, extract_remote_ip, timezone from libs.login import current_account_with_tenant, login_required from models import AccountIntegrate, InvitationCode from models.account import AccountStatus, InvitationCodeStatus +from models.enums import CreatorUserRole +from models.model import UploadFile from services.account_service import AccountService from services.billing_service import BillingService from services.errors.account import CurrentPasswordIncorrectError as ServiceCurrentPasswordIncorrectError -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" - class AccountInitPayload(BaseModel): interface_language: str @@ -158,27 +159,26 @@ class CheckEmailUniquePayload(BaseModel): email: EmailStr -def reg(cls: type[BaseModel]): - console_ns.schema_model(cls.__name__, cls.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0)) - - -reg(AccountInitPayload) -reg(AccountNamePayload) -reg(AccountAvatarPayload) -reg(AccountAvatarQuery) -reg(AccountInterfaceLanguagePayload) -reg(AccountInterfaceThemePayload) -reg(AccountTimezonePayload) -reg(AccountPasswordPayload) -reg(AccountDeletePayload) -reg(AccountDeletionFeedbackPayload) -reg(EducationActivatePayload) -reg(EducationAutocompleteQuery) -reg(ChangeEmailSendPayload) -reg(ChangeEmailValidityPayload) -reg(ChangeEmailResetPayload) -reg(CheckEmailUniquePayload) -register_schema_models(console_ns, AccountResponse) +register_schema_models( + console_ns, + AccountResponse, + AccountInitPayload, + AccountNamePayload, + AccountAvatarPayload, + AccountAvatarQuery, + AccountInterfaceLanguagePayload, + AccountInterfaceThemePayload, + AccountTimezonePayload, + AccountPasswordPayload, + AccountDeletePayload, + AccountDeletionFeedbackPayload, + EducationActivatePayload, + EducationAutocompleteQuery, + ChangeEmailSendPayload, + ChangeEmailValidityPayload, + ChangeEmailResetPayload, + CheckEmailUniquePayload, +) def _serialize_account(account) -> dict[str, Any]: @@ -322,9 +322,24 @@ class AccountAvatarApi(Resource): @login_required @account_initialization_required def get(self): - args = AccountAvatarQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + current_user, current_tenant_id = current_account_with_tenant() + args = AccountAvatarQuery.model_validate(request.args.to_dict(flat=True)) + avatar = args.avatar - avatar_url = file_helpers.get_signed_file_url(args.avatar) + if avatar.startswith(("http://", "https://")): + return {"avatar_url": avatar} + + upload_file = db.session.scalar(select(UploadFile).where(UploadFile.id == avatar).limit(1)) + if upload_file is None: + raise NotFound("Avatar file not found") + + if upload_file.tenant_id != current_tenant_id: + raise NotFound("Avatar file not found") + + if upload_file.created_by_role != CreatorUserRole.ACCOUNT or upload_file.created_by != current_user.id: + raise NotFound("Avatar file not found") + + avatar_url = file_helpers.get_signed_file_url(upload_file_id=upload_file.id) return {"avatar_url": avatar_url} @console_ns.expect(console_ns.models[AccountAvatarPayload.__name__]) diff --git a/api/controllers/console/workspace/endpoint.py b/api/controllers/console/workspace/endpoint.py index d4be07382a..925f3e1197 100644 --- a/api/controllers/console/workspace/endpoint.py +++ b/api/controllers/console/workspace/endpoint.py @@ -20,8 +20,6 @@ from graphon.model_runtime.utils.encoders import jsonable_encoder from libs.login import current_account_with_tenant, login_required from services.plugin.endpoint_service import EndpointService -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" - class EndpointCreatePayload(BaseModel): plugin_unique_identifier: str @@ -80,10 +78,6 @@ class EndpointDisableResponse(BaseModel): success: bool = Field(description="Operation success") -def reg(cls: type[BaseModel]): - console_ns.schema_model(cls.__name__, cls.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0)) - - register_schema_models( console_ns, EndpointCreatePayload, @@ -215,7 +209,7 @@ class EndpointListApi(Resource): def get(self): user, tenant_id = current_account_with_tenant() - args = EndpointListQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = EndpointListQuery.model_validate(request.args.to_dict(flat=True)) page = args.page page_size = args.page_size @@ -248,7 +242,7 @@ class EndpointListForSinglePluginApi(Resource): def get(self): user, tenant_id = current_account_with_tenant() - args = EndpointListForPluginQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = EndpointListForPluginQuery.model_validate(request.args.to_dict(flat=True)) page = args.page page_size = args.page_size diff --git a/api/controllers/console/workspace/members.py b/api/controllers/console/workspace/members.py index f8a3005dd6..efc60299a4 100644 --- a/api/controllers/console/workspace/members.py +++ b/api/controllers/console/workspace/members.py @@ -34,8 +34,6 @@ from services.enterprise import rbac_service as enterprise_rbac_service from services.errors.account import AccountAlreadyInTenantError from services.feature_service import FeatureService -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" - class MemberInvitePayload(BaseModel): emails: list[str] = Field(default_factory=list) @@ -60,17 +58,17 @@ class OwnerTransferPayload(BaseModel): token: str -def reg(cls: type[BaseModel]): - console_ns.schema_model(cls.__name__, cls.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0)) - - -reg(MemberInvitePayload) -reg(MemberRoleUpdatePayload) -reg(OwnerTransferEmailPayload) -reg(OwnerTransferCheckPayload) -reg(OwnerTransferPayload) register_enum_models(console_ns, TenantAccountRole) -register_schema_models(console_ns, AccountWithRole, AccountWithRoleList) +register_schema_models( + console_ns, + AccountWithRole, + AccountWithRoleList, + MemberInvitePayload, + MemberRoleUpdatePayload, + OwnerTransferEmailPayload, + OwnerTransferCheckPayload, + OwnerTransferPayload, +) def _serialize_member_roles(current_role: str | None, member_roles: list[enterprise_rbac_service.MemberRoleSummary]) -> list[dict[str, str]]: diff --git a/api/controllers/console/workspace/model_providers.py b/api/controllers/console/workspace/model_providers.py index 4b10561fdb..2f75218c0f 100644 --- a/api/controllers/console/workspace/model_providers.py +++ b/api/controllers/console/workspace/model_providers.py @@ -5,6 +5,7 @@ from flask import request, send_file from flask_restx import Resource from pydantic import BaseModel, Field, field_validator +from controllers.common.schema import register_schema_models from controllers.console import console_ns from controllers.console.wraps import account_initialization_required, is_admin_or_owner_required, setup_required from graphon.model_runtime.entities.model_entities import ModelType @@ -15,8 +16,6 @@ from libs.login import current_account_with_tenant, login_required from services.billing_service import BillingService from services.model_provider_service import ModelProviderService -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" - class ParserModelList(BaseModel): model_type: ModelType | None = None @@ -75,18 +74,17 @@ class ParserPreferredProviderType(BaseModel): preferred_provider_type: Literal["system", "custom"] -def reg(cls: type[BaseModel]): - console_ns.schema_model(cls.__name__, cls.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0)) - - -reg(ParserModelList) -reg(ParserCredentialId) -reg(ParserCredentialCreate) -reg(ParserCredentialUpdate) -reg(ParserCredentialDelete) -reg(ParserCredentialSwitch) -reg(ParserCredentialValidate) -reg(ParserPreferredProviderType) +register_schema_models( + console_ns, + ParserModelList, + ParserCredentialId, + ParserCredentialCreate, + ParserCredentialUpdate, + ParserCredentialDelete, + ParserCredentialSwitch, + ParserCredentialValidate, + ParserPreferredProviderType, +) @console_ns.route("/workspaces/current/model-providers") diff --git a/api/controllers/console/workspace/models.py b/api/controllers/console/workspace/models.py index b2d07ff8f9..7f7d6379c3 100644 --- a/api/controllers/console/workspace/models.py +++ b/api/controllers/console/workspace/models.py @@ -17,7 +17,6 @@ from services.model_load_balancing_service import ModelLoadBalancingService from services.model_provider_service import ModelProviderService logger = logging.getLogger(__name__) -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" class ParserGetDefault(BaseModel): @@ -107,6 +106,12 @@ class ParserParameter(BaseModel): model: str +class ParserSwitch(BaseModel): + model: str + model_type: ModelType + credential_id: str + + register_schema_models( console_ns, ParserGetDefault, @@ -119,6 +124,7 @@ register_schema_models( ParserDeleteCredential, ParserParameter, Inner, + ParserSwitch, ) register_enum_models(console_ns, ModelType) @@ -133,7 +139,7 @@ class DefaultModelApi(Resource): def get(self): _, tenant_id = current_account_with_tenant() - args = ParserGetDefault.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = ParserGetDefault.model_validate(request.args.to_dict(flat=True)) model_provider_service = ModelProviderService() default_model_entity = model_provider_service.get_default_model_of_model_type( @@ -261,7 +267,7 @@ class ModelProviderModelCredentialApi(Resource): def get(self, provider: str): _, tenant_id = current_account_with_tenant() - args = ParserGetCredentials.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = ParserGetCredentials.model_validate(request.args.to_dict(flat=True)) model_provider_service = ModelProviderService() current_credential = model_provider_service.get_model_credential( @@ -387,17 +393,6 @@ class ModelProviderModelCredentialApi(Resource): return {"result": "success"}, 204 -class ParserSwitch(BaseModel): - model: str - model_type: ModelType - credential_id: str - - -console_ns.schema_model( - ParserSwitch.__name__, ParserSwitch.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0) -) - - @console_ns.route("/workspaces/current/model-providers//models/credentials/switch") class ModelProviderModelCredentialSwitchApi(Resource): @console_ns.expect(console_ns.models[ParserSwitch.__name__]) @@ -468,9 +463,7 @@ class ParserValidate(BaseModel): credentials: dict[str, Any] -console_ns.schema_model( - ParserValidate.__name__, ParserValidate.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0) -) +register_schema_models(console_ns, ParserSwitch, ParserValidate) @console_ns.route("/workspaces/current/model-providers//models/credentials/validate") @@ -515,7 +508,7 @@ class ModelProviderModelParameterRuleApi(Resource): @login_required @account_initialization_required def get(self, provider: str): - args = ParserParameter.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = ParserParameter.model_validate(request.args.to_dict(flat=True)) _, tenant_id = current_account_with_tenant() model_provider_service = ModelProviderService() diff --git a/api/controllers/console/workspace/plugin.py b/api/controllers/console/workspace/plugin.py index b3e344ccea..a6d4a60beb 100644 --- a/api/controllers/console/workspace/plugin.py +++ b/api/controllers/console/workspace/plugin.py @@ -177,7 +177,7 @@ def _read_upload_content(file: FileStorage, max_size: int) -> bytes: FileStorage.content_length is not reliable for multipart test uploads and may be zero even when content exists, so the controllers validate against the loaded bytes instead. """ - content = file.read() + content = file.stream.read() if len(content) > max_size: raise ValueError("File size exceeds the maximum allowed size") @@ -211,7 +211,7 @@ class PluginListApi(Resource): @account_initialization_required def get(self): _, tenant_id = current_account_with_tenant() - args = ParserList.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = ParserList.model_validate(request.args.to_dict(flat=True)) try: plugins_with_total = PluginService.list_with_total(tenant_id, args.page, args.page_size) except PluginDaemonClientSideError as e: @@ -261,7 +261,7 @@ class PluginIconApi(Resource): @console_ns.expect(console_ns.models[ParserIcon.__name__]) @setup_required def get(self): - args = ParserIcon.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = ParserIcon.model_validate(request.args.to_dict(flat=True)) try: icon_bytes, mimetype = PluginService.get_asset(args.tenant_id, args.filename) @@ -279,7 +279,7 @@ class PluginAssetApi(Resource): @login_required @account_initialization_required def get(self): - args = ParserAsset.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = ParserAsset.model_validate(request.args.to_dict(flat=True)) _, tenant_id = current_account_with_tenant() try: @@ -421,7 +421,7 @@ class PluginFetchMarketplacePkgApi(Resource): @plugin_permission_required(install_required=True) def get(self): _, tenant_id = current_account_with_tenant() - args = ParserPluginIdentifierQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = ParserPluginIdentifierQuery.model_validate(request.args.to_dict(flat=True)) try: return jsonable_encoder( @@ -446,7 +446,7 @@ class PluginFetchManifestApi(Resource): def get(self): _, tenant_id = current_account_with_tenant() - args = ParserPluginIdentifierQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = ParserPluginIdentifierQuery.model_validate(request.args.to_dict(flat=True)) try: return jsonable_encoder( @@ -466,7 +466,7 @@ class PluginFetchInstallTasksApi(Resource): def get(self): _, tenant_id = current_account_with_tenant() - args = ParserTasks.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = ParserTasks.model_validate(request.args.to_dict(flat=True)) try: return jsonable_encoder({"tasks": PluginService.fetch_install_tasks(tenant_id, args.page, args.page_size)}) @@ -660,7 +660,7 @@ class PluginFetchDynamicSelectOptionsApi(Resource): current_user, tenant_id = current_account_with_tenant() user_id = current_user.id - args = ParserDynamicOptions.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = ParserDynamicOptions.model_validate(request.args.to_dict(flat=True)) try: options = PluginParameterService.get_dynamic_select_options( @@ -822,7 +822,7 @@ class PluginReadmeApi(Resource): @account_initialization_required def get(self): _, tenant_id = current_account_with_tenant() - args = ParserReadme.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = ParserReadme.model_validate(request.args.to_dict(flat=True)) return jsonable_encoder( {"readme": PluginService.fetch_plugin_readme(tenant_id, args.plugin_unique_identifier, args.language)} ) diff --git a/api/controllers/console/workspace/tool_providers.py b/api/controllers/console/workspace/tool_providers.py index 34c9534de8..e653c9064c 100644 --- a/api/controllers/console/workspace/tool_providers.py +++ b/api/controllers/console/workspace/tool_providers.py @@ -876,10 +876,10 @@ class ToolBuiltinProviderSetDefaultApi(Resource): @login_required @account_initialization_required def post(self, provider): - current_user, current_tenant_id = current_account_with_tenant() + _, current_tenant_id = current_account_with_tenant() payload = BuiltinProviderDefaultCredentialPayload.model_validate(console_ns.payload or {}) return BuiltinToolManageService.set_default_provider( - tenant_id=current_tenant_id, user_id=current_user.id, provider=provider, id=payload.id + tenant_id=current_tenant_id, provider=provider, id=payload.id ) diff --git a/api/controllers/console/workspace/workspace.py b/api/controllers/console/workspace/workspace.py index 565099db61..84890f0443 100644 --- a/api/controllers/console/workspace/workspace.py +++ b/api/controllers/console/workspace/workspace.py @@ -16,6 +16,7 @@ from controllers.common.errors import ( TooManyFilesError, UnsupportedFileTypeError, ) +from controllers.common.schema import register_schema_models from controllers.console import console_ns from controllers.console.admin import admin_required from controllers.console.error import AccountNotLinkTenantError @@ -39,7 +40,6 @@ from services.file_service import FileService from services.workspace_service import WorkspaceService logger = logging.getLogger(__name__) -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" class WorkspaceListQuery(BaseModel): @@ -91,15 +91,14 @@ class TenantInfoResponse(ResponseModel): return value -def reg(cls: type[BaseModel]): - console_ns.schema_model(cls.__name__, cls.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0)) - - -reg(WorkspaceListQuery) -reg(SwitchWorkspacePayload) -reg(WorkspaceCustomConfigPayload) -reg(WorkspaceInfoPayload) -reg(TenantInfoResponse) +register_schema_models( + console_ns, + WorkspaceListQuery, + SwitchWorkspacePayload, + WorkspaceCustomConfigPayload, + WorkspaceInfoPayload, + TenantInfoResponse, +) provider_fields = { "provider_name": fields.String, @@ -322,7 +321,7 @@ class WebappLogoWorkspaceApi(Resource): try: upload_file = FileService(db.engine).upload_file( filename=file.filename, - content=file.read(), + content=file.stream.read(), mimetype=file.mimetype, user=current_user, ) diff --git a/api/controllers/files/image_preview.py b/api/controllers/files/image_preview.py index a91e745f80..be7886e831 100644 --- a/api/controllers/files/image_preview.py +++ b/api/controllers/files/image_preview.py @@ -8,13 +8,12 @@ from werkzeug.exceptions import NotFound import services from controllers.common.errors import UnsupportedFileTypeError from controllers.common.file_response import enforce_download_for_html +from controllers.common.schema import register_schema_models from controllers.files import files_ns from extensions.ext_database import db from services.account_service import TenantService from services.file_service import FileService -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" - class FileSignatureQuery(BaseModel): timestamp: str = Field(..., description="Unix timestamp used in the signature") @@ -26,12 +25,7 @@ class FilePreviewQuery(FileSignatureQuery): as_attachment: bool = Field(default=False, description="Whether to download as attachment") -files_ns.schema_model( - FileSignatureQuery.__name__, FileSignatureQuery.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0) -) -files_ns.schema_model( - FilePreviewQuery.__name__, FilePreviewQuery.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0) -) +register_schema_models(files_ns, FileSignatureQuery, FilePreviewQuery) @files_ns.route("//image-preview") @@ -58,7 +52,7 @@ class ImagePreviewApi(Resource): def get(self, file_id): file_id = str(file_id) - args = FileSignatureQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = FileSignatureQuery.model_validate(request.args.to_dict(flat=True)) timestamp = args.timestamp nonce = args.nonce sign = args.sign @@ -100,7 +94,7 @@ class FilePreviewApi(Resource): def get(self, file_id): file_id = str(file_id) - args = FilePreviewQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = FilePreviewQuery.model_validate(request.args.to_dict(flat=True)) try: generator, upload_file = FileService(db.engine).get_file_generator_by_file_id( diff --git a/api/controllers/files/tool_files.py b/api/controllers/files/tool_files.py index 2f1e2f28bd..8ae16ce7f4 100644 --- a/api/controllers/files/tool_files.py +++ b/api/controllers/files/tool_files.py @@ -7,12 +7,11 @@ from werkzeug.exceptions import Forbidden, NotFound from controllers.common.errors import UnsupportedFileTypeError from controllers.common.file_response import enforce_download_for_html +from controllers.common.schema import register_schema_models from controllers.files import files_ns from core.tools.signature import verify_tool_file_signature from core.tools.tool_file_manager import ToolFileManager -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" - class ToolFileQuery(BaseModel): timestamp: str = Field(..., description="Unix timestamp") @@ -21,9 +20,7 @@ class ToolFileQuery(BaseModel): as_attachment: bool = Field(default=False, description="Download as attachment") -files_ns.schema_model( - ToolFileQuery.__name__, ToolFileQuery.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0) -) +register_schema_models(files_ns, ToolFileQuery) @files_ns.route("/tools/.") diff --git a/api/controllers/files/upload.py b/api/controllers/files/upload.py index ed3278a28b..7d588b95dd 100644 --- a/api/controllers/files/upload.py +++ b/api/controllers/files/upload.py @@ -20,8 +20,6 @@ from ..console.wraps import setup_required from ..files import files_ns from ..inner_api.plugin.wraps import get_user -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" - class PluginUploadQuery(BaseModel): timestamp: str = Field(..., description="Unix timestamp for signature verification") @@ -31,9 +29,8 @@ class PluginUploadQuery(BaseModel): user_id: str | None = Field(default=None, description="User identifier") -files_ns.schema_model( - PluginUploadQuery.__name__, PluginUploadQuery.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0) -) +register_schema_models(files_ns, PluginUploadQuery) + register_schema_models(files_ns, FileResponse) @@ -69,7 +66,7 @@ class PluginUploadFileApi(Resource): FileTooLargeError: File exceeds size limit UnsupportedFileTypeError: File type not supported """ - args = PluginUploadQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore + args = PluginUploadQuery.model_validate(request.args.to_dict(flat=True)) file = request.files.get("file") if file is None: @@ -103,7 +100,7 @@ class PluginUploadFileApi(Resource): tool_file = ToolFileManager().create_file_by_raw( user_id=user.id, tenant_id=tenant_id, - file_binary=file.read(), + file_binary=file.stream.read(), mimetype=mimetype, filename=filename, conversation_id=None, diff --git a/api/controllers/service_api/app/file.py b/api/controllers/service_api/app/file.py index 6f6dadf768..687d34076d 100644 --- a/api/controllers/service_api/app/file.py +++ b/api/controllers/service_api/app/file.py @@ -58,7 +58,7 @@ class FileApi(Resource): try: upload_file = FileService(db.engine).upload_file( filename=file.filename, - content=file.read(), + content=file.stream.read(), mimetype=file.mimetype, user=end_user, ) diff --git a/api/controllers/service_api/dataset/dataset.py b/api/controllers/service_api/dataset/dataset.py index 76519cad0a..9af66f1960 100644 --- a/api/controllers/service_api/dataset/dataset.py +++ b/api/controllers/service_api/dataset/dataset.py @@ -2,11 +2,11 @@ from typing import Any, Literal, cast from flask import request from flask_restx import marshal -from pydantic import BaseModel, Field, TypeAdapter, field_validator +from pydantic import BaseModel, Field, TypeAdapter, field_validator, model_validator from werkzeug.exceptions import Forbidden, NotFound import services -from controllers.common.schema import register_schema_models +from controllers.common.schema import register_enum_models, register_schema_models from controllers.console.wraps import edit_permission_required from controllers.service_api import service_api_ns from controllers.service_api.dataset.error import DatasetInUseError, DatasetNameDuplicateError, InvalidActionError @@ -34,13 +34,7 @@ from services.tag_service import ( UpdateTagPayload, ) -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" - - -service_api_ns.schema_model( - DatasetPermissionEnum.__name__, - TypeAdapter(DatasetPermissionEnum).json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), -) +register_enum_models(service_api_ns, DatasetPermissionEnum) class DatasetCreatePayload(BaseModel): @@ -100,9 +94,27 @@ class TagBindingPayload(BaseModel): class TagUnbindingPayload(BaseModel): - tag_id: str + """Accept the legacy single-tag Service API payload while exposing a normalized tag_ids list internally.""" + + tag_ids: list[str] = Field(default_factory=list) + tag_id: str | None = None target_id: str + @model_validator(mode="before") + @classmethod + def normalize_legacy_tag_id(cls, data: object) -> object: + if not isinstance(data, dict): + return data + if not data.get("tag_ids") and data.get("tag_id"): + return {**data, "tag_ids": [data["tag_id"]]} + return data + + @model_validator(mode="after") + def validate_tag_ids(self) -> "TagUnbindingPayload": + if not self.tag_ids: + raise ValueError("Tag IDs is required.") + return self + class DatasetListQuery(BaseModel): page: int = Field(default=1, description="Page number") @@ -601,11 +613,11 @@ class DatasetTagBindingApi(DatasetApiResource): @service_api_ns.route("/datasets/tags/unbinding") class DatasetTagUnbindingApi(DatasetApiResource): @service_api_ns.expect(service_api_ns.models[TagUnbindingPayload.__name__]) - @service_api_ns.doc("unbind_dataset_tag") - @service_api_ns.doc(description="Unbind a tag from a dataset") + @service_api_ns.doc("unbind_dataset_tags") + @service_api_ns.doc(description="Unbind tags from a dataset") @service_api_ns.doc( responses={ - 204: "Tag unbound successfully", + 204: "Tags unbound successfully", 401: "Unauthorized - invalid API token", 403: "Forbidden - insufficient permissions", } @@ -618,7 +630,7 @@ class DatasetTagUnbindingApi(DatasetApiResource): payload = TagUnbindingPayload.model_validate(service_api_ns.payload or {}) TagService.delete_tag_binding( - TagBindingDeletePayload(tag_id=payload.tag_id, target_id=payload.target_id, type=TagType.KNOWLEDGE) + TagBindingDeletePayload(tag_ids=payload.tag_ids, target_id=payload.target_id, type=TagType.KNOWLEDGE) ) return "", 204 diff --git a/api/controllers/service_api/dataset/document.py b/api/controllers/service_api/dataset/document.py index 0b09facf58..cb48fe6715 100644 --- a/api/controllers/service_api/dataset/document.py +++ b/api/controllers/service_api/dataset/document.py @@ -77,9 +77,6 @@ class DocumentTextCreatePayload(BaseModel): return value -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" - - class DocumentTextUpdate(BaseModel): name: str | None = None text: str | None = None @@ -435,7 +432,7 @@ class DocumentAddByFileApi(DatasetApiResource): raise ValueError("current_user is required") upload_file = FileService(db.engine).upload_file( filename=file.filename, - content=file.read(), + content=file.stream.read(), mimetype=file.mimetype, user=current_user, source="datasets", @@ -509,7 +506,7 @@ def _update_document_by_file(tenant_id: str, dataset_id: UUID, document_id: UUID try: upload_file = FileService(db.engine).upload_file( filename=file.filename, - content=file.read(), + content=file.stream.read(), mimetype=file.mimetype, user=current_user, source="datasets", diff --git a/api/controllers/service_api/dataset/rag_pipeline/rag_pipeline_workflow.py b/api/controllers/service_api/dataset/rag_pipeline/rag_pipeline_workflow.py index 2dc98bfbf7..8bc43bccd5 100644 --- a/api/controllers/service_api/dataset/rag_pipeline/rag_pipeline_workflow.py +++ b/api/controllers/service_api/dataset/rag_pipeline/rag_pipeline_workflow.py @@ -241,7 +241,7 @@ class KnowledgebasePipelineFileUploadApi(DatasetApiResource): try: upload_file = FileService(db.engine).upload_file( filename=file.filename, - content=file.read(), + content=file.stream.read(), mimetype=file.mimetype, user=current_user, ) diff --git a/api/controllers/web/audio.py b/api/controllers/web/audio.py index 3ad595f1f4..8ddbc3abb8 100644 --- a/api/controllers/web/audio.py +++ b/api/controllers/web/audio.py @@ -23,7 +23,7 @@ from controllers.web.wraps import WebApiResource from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError from graphon.model_runtime.errors.invoke import InvokeError from libs.helper import uuid_value -from models.model import App +from models.model import App, EndUser from services.audio_service import AudioService from services.errors.audio import ( AudioTooLargeServiceError, @@ -69,12 +69,12 @@ class AudioApi(WebApiResource): 500: "Internal Server Error", } ) - def post(self, app_model: App, end_user): + def post(self, app_model: App, end_user: EndUser): """Convert audio to text""" file = request.files["file"] try: - response = AudioService.transcript_asr(app_model=app_model, file=file, end_user=end_user) + response = AudioService.transcript_asr(app_model=app_model, file=file, end_user=end_user.external_user_id) return response except services.errors.app_model_config.AppModelConfigBrokenError: @@ -117,7 +117,7 @@ class TextApi(WebApiResource): 500: "Internal Server Error", } ) - def post(self, app_model: App, end_user): + def post(self, app_model: App, end_user: EndUser): """Convert text to audio""" try: payload = TextToAudioPayload.model_validate(web_ns.payload or {}) diff --git a/api/controllers/web/files.py b/api/controllers/web/files.py index 0036c90800..6128490104 100644 --- a/api/controllers/web/files.py +++ b/api/controllers/web/files.py @@ -73,7 +73,7 @@ class FileApi(WebApiResource): try: upload_file = FileService(db.engine).upload_file( filename=file.filename, - content=file.read(), + content=file.stream.read(), mimetype=file.mimetype, user=end_user, source="datasets" if source == "datasets" else None, diff --git a/api/core/agent/base_agent_runner.py b/api/core/agent/base_agent_runner.py index c22102c2ba..cba4659483 100644 --- a/api/core/agent/base_agent_runner.py +++ b/api/core/agent/base_agent_runner.py @@ -532,7 +532,6 @@ class BaseAgentRunner(AppRunner): file_objs = file_factory.build_from_message_files( message_files=files, tenant_id=self.tenant_id, - config=file_extra_config, access_controller=_file_access_controller, ) if not file_objs: diff --git a/api/core/app/app_config/easy_ui_based_app/prompt_template/manager.py b/api/core/app/app_config/easy_ui_based_app/prompt_template/manager.py index 4c07445df3..f4bbbe5d8b 100644 --- a/api/core/app/app_config/easy_ui_based_app/prompt_template/manager.py +++ b/api/core/app/app_config/easy_ui_based_app/prompt_template/manager.py @@ -75,7 +75,7 @@ class PromptTemplateConfigManager: if not config.get("prompt_type"): config["prompt_type"] = PromptTemplateEntity.PromptType.SIMPLE - prompt_type_vals = [typ.value for typ in PromptTemplateEntity.PromptType] + prompt_type_vals = list(PromptTemplateEntity.PromptType) if config["prompt_type"] not in prompt_type_vals: raise ValueError(f"prompt_type must be in {prompt_type_vals}") diff --git a/api/core/app/apps/common/workflow_response_converter.py b/api/core/app/apps/common/workflow_response_converter.py index 7bab3f7bff..4a741d3154 100644 --- a/api/core/app/apps/common/workflow_response_converter.py +++ b/api/core/app/apps/common/workflow_response_converter.py @@ -842,24 +842,24 @@ class WorkflowResponseConverter: return [] files: list[Mapping[str, Any]] = [] - if isinstance(value, FileSegment): - files.append(value.value.to_dict()) - elif isinstance(value, ArrayFileSegment): - files.extend([i.to_dict() for i in value.value]) - elif isinstance(value, File): - files.append(value.to_dict()) - elif isinstance(value, list): - for item in value: - file = cls._get_file_var_from_value(item) + match value: + case FileSegment(): + files.append(value.value.to_dict()) + case ArrayFileSegment(): + files.extend([i.to_dict() for i in value.value]) + case File(): + files.append(value.to_dict()) + case list(): + for item in value: + file = cls._get_file_var_from_value(item) + if file: + files.append(file) + case dict(): + file = cls._get_file_var_from_value(value) if file: files.append(file) - elif isinstance( - value, - dict, - ): - file = cls._get_file_var_from_value(value) - if file: - files.append(file) + case _: + pass return files diff --git a/api/core/app/apps/workflow/app_generator.py b/api/core/app/apps/workflow/app_generator.py index e811c2b2e0..43546d57f5 100644 --- a/api/core/app/apps/workflow/app_generator.py +++ b/api/core/app/apps/workflow/app_generator.py @@ -32,7 +32,7 @@ from core.app.entities.task_entities import ( ) from core.app.layers.pause_state_persist_layer import PauseStateLayerConfig, PauseStatePersistenceLayer from core.db.session_factory import session_factory -from core.helper.trace_id_helper import extract_external_trace_id_from_args +from core.helper.trace_id_helper import extract_external_trace_id_from_args, extract_parent_trace_context_from_args from core.ops.ops_trace_manager import TraceQueueManager from core.repositories import DifyCoreRepositoryFactory from core.repositories.factory import WorkflowExecutionRepository, WorkflowNodeExecutionRepository @@ -166,6 +166,7 @@ class WorkflowAppGenerator(BaseAppGenerator): extras = { **extract_external_trace_id_from_args(args), + **extract_parent_trace_context_from_args(args), } workflow_run_id = str(workflow_run_id or uuid.uuid4()) # FIXME (Yeuoly): we need to remove the SKIP_PREPARE_USER_INPUTS_KEY from the args diff --git a/api/core/app/llm/__init__.py b/api/core/app/llm/__init__.py index f069bede74..d20a5b2344 100644 --- a/api/core/app/llm/__init__.py +++ b/api/core/app/llm/__init__.py @@ -1,5 +1,15 @@ """LLM-related application services.""" -from .quota import deduct_llm_quota, ensure_llm_quota_available +from .quota import ( + deduct_llm_quota, + deduct_llm_quota_for_model, + ensure_llm_quota_available, + ensure_llm_quota_available_for_model, +) -__all__ = ["deduct_llm_quota", "ensure_llm_quota_available"] +__all__ = [ + "deduct_llm_quota", + "deduct_llm_quota_for_model", + "ensure_llm_quota_available", + "ensure_llm_quota_available_for_model", +] diff --git a/api/core/app/llm/quota.py b/api/core/app/llm/quota.py index b6039e1e4e..5bf3334a7b 100644 --- a/api/core/app/llm/quota.py +++ b/api/core/app/llm/quota.py @@ -1,4 +1,14 @@ -from sqlalchemy import update +"""Tenant-scoped helpers for checking and deducting LLM provider quota. + +System-hosted quota accounting is currently defined only for LLM models. Keep +the public helpers LLM-specific so callers do not carry unused model-type +plumbing, and fail loudly if the deprecated ``ModelInstance`` wrappers are used +with a non-LLM model. +""" + +import warnings + +from sqlalchemy import select from sqlalchemy.orm import sessionmaker from configs import dify_config @@ -6,44 +16,47 @@ from core.entities.model_entities import ModelStatus from core.entities.provider_entities import ProviderQuotaType, QuotaUnit from core.errors.error import QuotaExceededError from core.model_manager import ModelInstance +from core.plugin.impl.model_runtime_factory import create_plugin_provider_manager from extensions.ext_database import db from graphon.model_runtime.entities.llm_entities import LLMUsage +from graphon.model_runtime.entities.model_entities import ModelType from libs.datetime_utils import naive_utc_now from models.provider import Provider, ProviderType from models.provider_ids import ModelProviderID -def ensure_llm_quota_available(*, model_instance: ModelInstance) -> None: - provider_model_bundle = model_instance.provider_model_bundle - provider_configuration = provider_model_bundle.configuration +def _get_provider_configuration(*, tenant_id: str, provider: str): + """Resolve the tenant-bound provider configuration for quota decisions.""" + provider_manager = create_plugin_provider_manager(tenant_id=tenant_id) + provider_configuration = provider_manager.get_configurations(tenant_id).get(provider) + if provider_configuration is None: + raise ValueError(f"Provider {provider} does not exist.") + return provider_configuration + +def ensure_llm_quota_available_for_model(*, tenant_id: str, provider: str, model: str) -> None: + """Raise when a tenant-bound LLM model is already out of quota.""" + provider_configuration = _get_provider_configuration(tenant_id=tenant_id, provider=provider) if provider_configuration.using_provider_type != ProviderType.SYSTEM: return provider_model = provider_configuration.get_provider_model( - model_type=model_instance.model_type_instance.model_type, - model=model_instance.model_name, + model_type=ModelType.LLM, + model=model, ) if provider_model and provider_model.status == ModelStatus.QUOTA_EXCEEDED: - raise QuotaExceededError(f"Model provider {model_instance.provider} quota exceeded.") + raise QuotaExceededError(f"Model provider {provider} quota exceeded.") -def deduct_llm_quota(*, tenant_id: str, model_instance: ModelInstance, usage: LLMUsage) -> None: - provider_model_bundle = model_instance.provider_model_bundle - provider_configuration = provider_model_bundle.configuration - - if provider_configuration.using_provider_type != ProviderType.SYSTEM: - return - - system_configuration = provider_configuration.system_configuration - +def _resolve_llm_used_quota(*, system_configuration, model: str, usage: LLMUsage) -> int | None: + """Compute the quota impact for an LLM invocation under the current quota mode.""" quota_unit = None for quota_configuration in system_configuration.quota_configurations: if quota_configuration.quota_type == system_configuration.current_quota_type: quota_unit = quota_configuration.quota_unit if quota_configuration.quota_limit == -1: - return + return None break @@ -52,42 +65,136 @@ def deduct_llm_quota(*, tenant_id: str, model_instance: ModelInstance, usage: LL if quota_unit == QuotaUnit.TOKENS: used_quota = usage.total_tokens elif quota_unit == QuotaUnit.CREDITS: - used_quota = dify_config.get_model_credits(model_instance.model_name) + used_quota = dify_config.get_model_credits(model) else: used_quota = 1 + return used_quota + + +def _deduct_free_llm_quota( + *, + tenant_id: str, + provider: str, + quota_type: ProviderQuotaType, + used_quota: int, +) -> None: + """Deduct FREE provider quota, capping at the limit before reporting exhaustion.""" + quota_exceeded = False + with sessionmaker(bind=db.engine).begin() as session: + provider_record = session.scalar( + select(Provider) + .where( + Provider.tenant_id == tenant_id, + # TODO: Use provider name with prefix after the data migration. + Provider.provider_name == ModelProviderID(provider).provider_name, + Provider.provider_type == ProviderType.SYSTEM.value, + Provider.quota_type == quota_type, + ) + .with_for_update() + ) + if ( + provider_record is None + or provider_record.quota_limit is None + or provider_record.quota_used is None + or provider_record.quota_limit <= provider_record.quota_used + ): + quota_exceeded = True + else: + available_quota = provider_record.quota_limit - provider_record.quota_used + deducted_quota = min(used_quota, available_quota) + provider_record.quota_used += deducted_quota + provider_record.last_used = naive_utc_now() + quota_exceeded = deducted_quota < used_quota + + if quota_exceeded: + raise QuotaExceededError(f"Model provider {provider} quota exceeded.") + + +def _deduct_used_llm_quota(*, tenant_id: str, provider: str, provider_configuration, used_quota: int | None) -> None: + """Apply a resolved LLM quota charge against the current provider quota bucket.""" + if provider_configuration.using_provider_type != ProviderType.SYSTEM: + return + + system_configuration = provider_configuration.system_configuration if used_quota is not None and system_configuration.current_quota_type is not None: match system_configuration.current_quota_type: case ProviderQuotaType.TRIAL: from services.credit_pool_service import CreditPoolService - CreditPoolService.check_and_deduct_credits( + CreditPoolService.deduct_credits_capped( tenant_id=tenant_id, credits_required=used_quota, ) case ProviderQuotaType.PAID: from services.credit_pool_service import CreditPoolService - CreditPoolService.check_and_deduct_credits( + CreditPoolService.deduct_credits_capped( tenant_id=tenant_id, credits_required=used_quota, pool_type="paid", ) case ProviderQuotaType.FREE: - with sessionmaker(bind=db.engine).begin() as session: - stmt = ( - update(Provider) - .where( - Provider.tenant_id == tenant_id, - # TODO: Use provider name with prefix after the data migration. - Provider.provider_name == ModelProviderID(model_instance.provider).provider_name, - Provider.provider_type == ProviderType.SYSTEM.value, - Provider.quota_type == system_configuration.current_quota_type, - Provider.quota_limit > Provider.quota_used, - ) - .values( - quota_used=Provider.quota_used + used_quota, - last_used=naive_utc_now(), - ) - ) - session.execute(stmt) + _deduct_free_llm_quota( + tenant_id=tenant_id, + provider=provider, + quota_type=system_configuration.current_quota_type, + used_quota=used_quota, + ) + case _: + return + + +def deduct_llm_quota_for_model(*, tenant_id: str, provider: str, model: str, usage: LLMUsage) -> None: + """Deduct tenant-bound quota for the resolved LLM model identity.""" + provider_configuration = _get_provider_configuration(tenant_id=tenant_id, provider=provider) + used_quota = _resolve_llm_used_quota( + system_configuration=provider_configuration.system_configuration, + model=model, + usage=usage, + ) + _deduct_used_llm_quota( + tenant_id=tenant_id, + provider=provider, + provider_configuration=provider_configuration, + used_quota=used_quota, + ) + + +def _require_llm_model_instance(model_instance: ModelInstance) -> None: + """Reject deprecated wrapper calls that pass a non-LLM model instance.""" + if model_instance.model_type_instance.model_type != ModelType.LLM: + raise ValueError("LLM quota helpers only support LLM model instances.") + + +def ensure_llm_quota_available(*, model_instance: ModelInstance) -> None: + """Deprecated compatibility wrapper for callers that still pass ModelInstance.""" + warnings.warn( + "ensure_llm_quota_available(model_instance=...) is deprecated; " + "use ensure_llm_quota_available_for_model(...) instead.", + DeprecationWarning, + stacklevel=2, + ) + _require_llm_model_instance(model_instance) + ensure_llm_quota_available_for_model( + tenant_id=model_instance.provider_model_bundle.configuration.tenant_id, + provider=model_instance.provider, + model=model_instance.model_name, + ) + + +def deduct_llm_quota(*, tenant_id: str, model_instance: ModelInstance, usage: LLMUsage) -> None: + """Deprecated compatibility wrapper for callers that still pass ModelInstance.""" + warnings.warn( + "deduct_llm_quota(tenant_id=..., model_instance=..., usage=...) is deprecated; " + "use deduct_llm_quota_for_model(...) instead.", + DeprecationWarning, + stacklevel=2, + ) + _require_llm_model_instance(model_instance) + deduct_llm_quota_for_model( + tenant_id=tenant_id, + provider=model_instance.provider, + model=model_instance.model_name, + usage=usage, + ) diff --git a/api/core/app/workflow/layers/llm_quota.py b/api/core/app/workflow/layers/llm_quota.py index 4a7918032e..2422eed5a7 100644 --- a/api/core/app/workflow/layers/llm_quota.py +++ b/api/core/app/workflow/layers/llm_quota.py @@ -1,36 +1,48 @@ """ LLM quota deduction layer for GraphEngine. -This layer centralizes model-quota deduction outside node implementations. +This layer centralizes model-quota handling outside node implementations. + +Graphon LLM-backed nodes expose provider/model identity through public node +configuration and, after execution, through ``node_run_result.inputs``. Resolve +quota billing from that public identity instead of depending on +``ModelInstance`` reconstruction inside the workflow layer. Missing identity on +quota-tracked nodes is treated as a workflow bug and aborts execution so quota +handling is never silently skipped. """ import logging -from typing import TYPE_CHECKING, cast, final, override +from typing import final, override -from core.app.entities.app_invoke_entities import DIFY_RUN_CONTEXT_KEY, DifyRunContext -from core.app.llm import deduct_llm_quota, ensure_llm_quota_available +from core.app.llm import deduct_llm_quota_for_model, ensure_llm_quota_available_for_model from core.errors.error import QuotaExceededError -from core.model_manager import ModelInstance -from graphon.enums import BuiltinNodeTypes +from graphon.enums import BuiltinNodeTypes, WorkflowNodeExecutionStatus from graphon.graph_engine.entities.commands import AbortCommand, CommandType from graphon.graph_engine.layers import GraphEngineLayer from graphon.graph_events import GraphEngineEvent, GraphNodeEventBase, NodeRunSucceededEvent +from graphon.node_events import NodeRunResult from graphon.nodes.base.node import Node -if TYPE_CHECKING: - from graphon.nodes.llm.node import LLMNode - from graphon.nodes.parameter_extractor.parameter_extractor_node import ParameterExtractorNode - from graphon.nodes.question_classifier.question_classifier_node import QuestionClassifierNode - logger = logging.getLogger(__name__) +_QUOTA_NODE_TYPES = frozenset( + [ + BuiltinNodeTypes.LLM, + BuiltinNodeTypes.PARAMETER_EXTRACTOR, + BuiltinNodeTypes.QUESTION_CLASSIFIER, + ] +) @final class LLMQuotaLayer(GraphEngineLayer): - """Graph layer that applies LLM quota deduction after node execution.""" + """Graph layer that applies tenant-scoped quota checks to LLM-backed nodes.""" - def __init__(self) -> None: + tenant_id: str + _abort_sent: bool + + def __init__(self, tenant_id: str) -> None: super().__init__() + self.tenant_id = tenant_id self._abort_sent = False @override @@ -50,33 +62,49 @@ class LLMQuotaLayer(GraphEngineLayer): if self._abort_sent: return - model_instance = self._extract_model_instance(node) - if model_instance is None: + if not self._supports_quota(node): return + model_identity = self._extract_model_identity_from_node(node) + if model_identity is None: + reason = "LLM quota check requires public node model identity before execution." + self._abort_before_node_run(node=node, reason=reason, error_type="LLMQuotaIdentityError") + logger.error("LLM quota handling aborted, node_id=%s, reason=%s", node.id, reason) + return + + provider, model_name = model_identity try: - ensure_llm_quota_available(model_instance=model_instance) + ensure_llm_quota_available_for_model( + tenant_id=self.tenant_id, + provider=provider, + model=model_name, + ) except QuotaExceededError as exc: - self._set_stop_event(node) - self._send_abort_command(reason=str(exc)) + self._abort_before_node_run(node=node, reason=str(exc), error_type=QuotaExceededError.__name__) logger.warning("LLM quota check failed, node_id=%s, error=%s", node.id, exc) @override def on_node_run_end( self, node: Node, error: Exception | None, result_event: GraphNodeEventBase | None = None ) -> None: - if error is not None or not isinstance(result_event, NodeRunSucceededEvent): + if error is not None or not isinstance(result_event, NodeRunSucceededEvent) or not self._supports_quota(node): return - model_instance = self._extract_model_instance(node) - if model_instance is None: + model_identity = self._extract_model_identity_from_result_event(result_event) + if model_identity is None: + self._abort_for_missing_model_identity( + node=node, + reason="LLM quota deduction requires model identity in the node result event.", + ) return + provider, model_name = model_identity + try: - dify_ctx = DifyRunContext.model_validate(node.require_run_context_value(DIFY_RUN_CONTEXT_KEY)) - deduct_llm_quota( - tenant_id=dify_ctx.tenant_id, - model_instance=model_instance, + deduct_llm_quota_for_model( + tenant_id=self.tenant_id, + provider=provider, + model=model_name, usage=result_event.node_run_result.llm_usage, ) except QuotaExceededError as exc: @@ -92,6 +120,27 @@ class LLMQuotaLayer(GraphEngineLayer): if stop_event is not None: stop_event.set() + def _abort_before_node_run(self, *, node: Node, reason: str, error_type: str) -> None: + self._set_stop_event(node) + node.node_data.error_strategy = None + node.node_data.retry_config.retry_enabled = False + + def quota_aborted_run() -> NodeRunResult: + return NodeRunResult( + status=WorkflowNodeExecutionStatus.FAILED, + error=reason, + error_type=error_type, + ) + + # TODO: Push Graphon to expose a public pre-run failure/skip hook, then replace this private _run override. + node._run = quota_aborted_run # type: ignore[method-assign] + self._send_abort_command(reason=reason) + + def _abort_for_missing_model_identity(self, *, node: Node, reason: str) -> None: + self._set_stop_event(node) + self._send_abort_command(reason=reason) + logger.error("LLM quota handling aborted, node_id=%s, reason=%s", node.id, reason) + def _send_abort_command(self, *, reason: str) -> None: if not self.command_channel or self._abort_sent: return @@ -108,29 +157,38 @@ class LLMQuotaLayer(GraphEngineLayer): logger.exception("Failed to send quota abort command") @staticmethod - def _extract_model_instance(node: Node) -> ModelInstance | None: - try: - match node.node_type: - case BuiltinNodeTypes.LLM: - model_instance = cast("LLMNode", node).model_instance - case BuiltinNodeTypes.PARAMETER_EXTRACTOR: - model_instance = cast("ParameterExtractorNode", node).model_instance - case BuiltinNodeTypes.QUESTION_CLASSIFIER: - model_instance = cast("QuestionClassifierNode", node).model_instance - case _: - return None - except AttributeError: + def _supports_quota(node: Node) -> bool: + return node.node_type in _QUOTA_NODE_TYPES + + @staticmethod + def _extract_model_identity_from_result_event(result_event: NodeRunSucceededEvent) -> tuple[str, str] | None: + provider = result_event.node_run_result.inputs.get("model_provider") + model_name = result_event.node_run_result.inputs.get("model_name") + if isinstance(provider, str) and provider and isinstance(model_name, str) and model_name: + return provider, model_name + return None + + @staticmethod + def _extract_model_identity_from_node(node: Node) -> tuple[str, str] | None: + node_data = getattr(node, "node_data", None) + if node_data is None: + node_data = getattr(node, "data", None) + + model_config = getattr(node_data, "model", None) + if model_config is None: logger.warning( - "LLMQuotaLayer skipped quota deduction because node does not expose a model instance, node_id=%s", + "LLMQuotaLayer skipped quota handling because node model config is missing, node_id=%s", node.id, ) return None - if isinstance(model_instance, ModelInstance): - return model_instance - - raw_model_instance = getattr(model_instance, "_model_instance", None) - if isinstance(raw_model_instance, ModelInstance): - return raw_model_instance + provider = getattr(model_config, "provider", None) + model_name = getattr(model_config, "name", None) + if isinstance(provider, str) and provider and isinstance(model_name, str) and model_name: + return provider, model_name + logger.warning( + "LLMQuotaLayer skipped quota handling because node model identity is invalid, node_id=%s", + node.id, + ) return None diff --git a/api/core/app/workflow/layers/persistence.py b/api/core/app/workflow/layers/persistence.py index d521304615..19152cebae 100644 --- a/api/core/app/workflow/layers/persistence.py +++ b/api/core/app/workflow/layers/persistence.py @@ -15,6 +15,7 @@ from datetime import datetime from typing import Any, Union from core.app.entities.app_invoke_entities import AdvancedChatAppGenerateEntity, WorkflowAppGenerateEntity +from core.helper.trace_id_helper import ParentTraceContext from core.ops.entities.trace_entity import TraceTaskName from core.ops.ops_trace_manager import TraceQueueManager, TraceTask from core.repositories.factory import WorkflowExecutionRepository, WorkflowNodeExecutionRepository @@ -403,8 +404,13 @@ class WorkflowPersistenceLayer(GraphEngineLayer): conversation_id = self._system_variables().get(SystemVariableKey.CONVERSATION_ID.value) external_trace_id = None + parent_trace_context = None if isinstance(self._application_generate_entity, (WorkflowAppGenerateEntity, AdvancedChatAppGenerateEntity)): - external_trace_id = self._application_generate_entity.extras.get("external_trace_id") + extras = self._application_generate_entity.extras + external_trace_id = extras.get("external_trace_id") + parent_trace_context = extras.get("parent_trace_context") + if isinstance(parent_trace_context, ParentTraceContext): + parent_trace_context = parent_trace_context.model_dump(exclude_none=True) trace_task = TraceTask( TraceTaskName.WORKFLOW_TRACE, @@ -412,6 +418,7 @@ class WorkflowPersistenceLayer(GraphEngineLayer): conversation_id=conversation_id, user_id=self._trace_manager.user_id, external_trace_id=external_trace_id, + parent_trace_context=parent_trace_context, ) self._trace_manager.add_trace_task(trace_task) diff --git a/api/core/entities/provider_configuration.py b/api/core/entities/provider_configuration.py index 38b87e2cd1..495fd1d898 100644 --- a/api/core/entities/provider_configuration.py +++ b/api/core/entities/provider_configuration.py @@ -23,7 +23,7 @@ from core.entities.provider_entities import ( ) from core.helper import encrypter from core.helper.model_provider_cache import ProviderCredentialsCache, ProviderCredentialsCacheType -from core.plugin.impl.model_runtime_factory import create_plugin_model_provider_factory +from core.plugin.impl.model_runtime_factory import create_model_type_instance, create_plugin_model_assembly from graphon.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType from graphon.model_runtime.entities.provider_entities import ( ConfigurateMethod, @@ -33,7 +33,7 @@ from graphon.model_runtime.entities.provider_entities import ( ) from graphon.model_runtime.model_providers.base.ai_model import AIModel from graphon.model_runtime.model_providers.model_provider_factory import ModelProviderFactory -from graphon.model_runtime.runtime import ModelRuntime +from graphon.model_runtime.protocols.runtime import ModelRuntime from libs.datetime_utils import naive_utc_now from models.engine import db from models.enums import CredentialSourceType @@ -106,11 +106,18 @@ class ProviderConfiguration(BaseModel): """Attach the already-composed runtime for request-bound call chains.""" self._bound_model_runtime = model_runtime + def _get_runtime_and_provider_factory(self) -> tuple[ModelRuntime, ModelProviderFactory]: + """Resolve a provider factory that stays aligned with the runtime used by the caller.""" + if self._bound_model_runtime is not None: + return self._bound_model_runtime, ModelProviderFactory(runtime=self._bound_model_runtime) + + model_assembly = create_plugin_model_assembly(tenant_id=self.tenant_id) + return model_assembly.model_runtime, model_assembly.model_provider_factory + def get_model_provider_factory(self) -> ModelProviderFactory: """Return a provider factory that preserves any request-bound runtime.""" - if self._bound_model_runtime is not None: - return ModelProviderFactory(model_runtime=self._bound_model_runtime) - return create_plugin_model_provider_factory(tenant_id=self.tenant_id) + _, model_provider_factory = self._get_runtime_and_provider_factory() + return model_provider_factory def get_current_credentials(self, model_type: ModelType, model: str) -> dict[str, Any] | None: """ @@ -1392,10 +1399,13 @@ class ProviderConfiguration(BaseModel): :param model_type: model type :return: """ - model_provider_factory = self.get_model_provider_factory() - - # Get model instance of LLM - return model_provider_factory.get_model_type_instance(provider=self.provider.provider, model_type=model_type) + model_runtime, model_provider_factory = self._get_runtime_and_provider_factory() + provider_schema = model_provider_factory.get_provider_schema(provider=self.provider.provider) + return create_model_type_instance( + runtime=model_runtime, + provider_schema=provider_schema, + model_type=model_type, + ) def get_model_schema( self, model_type: ModelType, model: str, credentials: dict[str, Any] | None diff --git a/api/core/helper/moderation.py b/api/core/helper/moderation.py index f169f247cf..18b9b72e9d 100644 --- a/api/core/helper/moderation.py +++ b/api/core/helper/moderation.py @@ -4,7 +4,7 @@ from typing import cast from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity from core.entities import DEFAULT_PLUGIN_ID -from core.plugin.impl.model_runtime_factory import create_plugin_model_provider_factory +from core.plugin.impl.model_runtime_factory import create_plugin_model_assembly from extensions.ext_hosting_provider import hosting_configuration from graphon.model_runtime.entities.model_entities import ModelType from graphon.model_runtime.errors.invoke import InvokeBadRequestError @@ -41,10 +41,8 @@ def check_moderation(tenant_id: str, model_config: ModelConfigWithCredentialsEnt text_chunk = secrets.choice(text_chunks) try: - model_provider_factory = create_plugin_model_provider_factory(tenant_id=tenant_id) - - # Get model instance of LLM - model_type_instance = model_provider_factory.get_model_type_instance( + model_assembly = create_plugin_model_assembly(tenant_id=tenant_id) + model_type_instance = model_assembly.create_model_type_instance( provider=openai_provider_name, model_type=ModelType.MODERATION ) model_type_instance = cast(ModerationModel, model_type_instance) diff --git a/api/core/helper/trace_id_helper.py b/api/core/helper/trace_id_helper.py index e827859109..e4890c8d4d 100644 --- a/api/core/helper/trace_id_helper.py +++ b/api/core/helper/trace_id_helper.py @@ -3,6 +3,17 @@ import re from collections.abc import Mapping from typing import Any +from pydantic import BaseModel, ConfigDict, StrictStr, ValidationError + + +class ParentTraceContext(BaseModel): + """Typed parent trace context propagated from an outer workflow tool node.""" + + parent_workflow_run_id: StrictStr + parent_node_execution_id: StrictStr | None = None + + model_config = ConfigDict(extra="forbid") + def is_valid_trace_id(trace_id: str) -> bool: """ @@ -61,6 +72,30 @@ def extract_external_trace_id_from_args(args: Mapping[str, Any]): return {} +def extract_parent_trace_context_from_args(args: Mapping[str, Any]) -> dict[str, ParentTraceContext]: + """ + Extract 'parent_trace_context' from args. + + Returns a dict suitable for use in extras when both parent identifiers exist. + Returns an empty dict if the context is missing or incomplete. + """ + parent_trace_context = args.get("parent_trace_context") + if isinstance(parent_trace_context, ParentTraceContext): + context = parent_trace_context + elif isinstance(parent_trace_context, Mapping): + try: + context = ParentTraceContext.model_validate(parent_trace_context) + except ValidationError: + return {} + else: + return {} + + if context.parent_node_execution_id is None: + return {} + + return {"parent_trace_context": context} + + def get_trace_id_from_otel_context() -> str | None: """ Retrieve the current trace ID from the active OpenTelemetry trace context. diff --git a/api/core/indexing_runner.py b/api/core/indexing_runner.py index b6e33396d1..537b14388e 100644 --- a/api/core/indexing_runner.py +++ b/api/core/indexing_runner.py @@ -324,9 +324,10 @@ class IndexingRunner: # one extract_setting is one source document for extract_setting in extract_settings: # extract - processing_rule = DatasetProcessRule( - mode=tmp_processing_rule["mode"], rules=json.dumps(tmp_processing_rule["rules"]) - ) + processing_rule = { + "mode": tmp_processing_rule["mode"], + "rules": tmp_processing_rule.get("rules"), + } # Extract document content text_docs = index_processor.extract(extract_setting, process_rule_mode=tmp_processing_rule["mode"]) # Cleaning and segmentation @@ -334,7 +335,7 @@ class IndexingRunner: text_docs, current_user=None, embedding_model_instance=embedding_model_instance, - process_rule=processing_rule.to_dict(), + process_rule=processing_rule, tenant_id=tenant_id, doc_language=doc_language, preview=True, diff --git a/api/core/memory/token_buffer_memory.py b/api/core/memory/token_buffer_memory.py index d840ee213c..c41c175cca 100644 --- a/api/core/memory/token_buffer_memory.py +++ b/api/core/memory/token_buffer_memory.py @@ -86,12 +86,10 @@ class TokenBufferMemory: detail = ImagePromptMessageContent.DETAIL.HIGH if file_extra_config and app_record: - # Build files directly without filtering by belongs_to file_objs = [ file_factory.build_from_message_file( message_file=message_file, tenant_id=app_record.tenant_id, - config=file_extra_config, access_controller=_file_access_controller, ) for message_file in message_files diff --git a/api/core/ops/entities/trace_entity.py b/api/core/ops/entities/trace_entity.py index 45b2f635ba..98e87a0ceb 100644 --- a/api/core/ops/entities/trace_entity.py +++ b/api/core/ops/entities/trace_entity.py @@ -5,6 +5,8 @@ from typing import Any, Union from pydantic import BaseModel, ConfigDict, field_serializer, field_validator +from core.helper.trace_id_helper import ParentTraceContext + class BaseTraceInfo(BaseModel): message_id: str | None = None @@ -51,8 +53,8 @@ class BaseTraceInfo(BaseModel): def resolved_parent_context(self) -> tuple[str | None, str | None]: """Resolve cross-workflow parent linking from metadata. - Extracts typed parent IDs from the untyped ``parent_trace_context`` - metadata dict (set by tool_node when invoking nested workflows). + Extracts typed parent IDs from the ``parent_trace_context`` metadata + payload (set by tool_node when invoking nested workflows). Returns: (trace_correlation_override, parent_span_id_source) where @@ -60,13 +62,18 @@ class BaseTraceInfo(BaseModel): parent_span_id_source is the outer node_execution_id. """ parent_ctx = self.metadata.get("parent_trace_context") - if not isinstance(parent_ctx, dict): + if isinstance(parent_ctx, ParentTraceContext): + context = parent_ctx + elif isinstance(parent_ctx, Mapping): + try: + context = ParentTraceContext.model_validate(parent_ctx) + except ValueError: + return None, None + else: return None, None - trace_override = parent_ctx.get("parent_workflow_run_id") - parent_span = parent_ctx.get("parent_node_execution_id") return ( - trace_override if isinstance(trace_override, str) else None, - parent_span if isinstance(parent_span, str) else None, + context.parent_workflow_run_id, + context.parent_node_execution_id, ) @field_serializer("start_time", "end_time") diff --git a/api/core/ops/exceptions.py b/api/core/ops/exceptions.py new file mode 100644 index 0000000000..4551704687 --- /dev/null +++ b/api/core/ops/exceptions.py @@ -0,0 +1,22 @@ +"""Core exceptions shared by ops trace dispatchers and trace providers. + +Provider packages may raise these types to request generic task behavior, but +generic Celery tasks should not import provider-specific exception classes. +""" + + +class RetryableTraceDispatchError(RuntimeError): + """Base class for transient trace dispatch failures that Celery may retry.""" + + +class PendingTraceParentContextError(RetryableTraceDispatchError): + """Raised when a nested trace arrives before its parent span context is available.""" + + parent_node_execution_id: str + + def __init__(self, parent_node_execution_id: str) -> None: + self.parent_node_execution_id = parent_node_execution_id + super().__init__( + "Pending trace parent context for parent_node_execution_id=" + f"{parent_node_execution_id}. Retry after the parent span context is published." + ) diff --git a/api/core/ops/ops_trace_manager.py b/api/core/ops/ops_trace_manager.py index e7ba6e502b..61fd0e5c1f 100644 --- a/api/core/ops/ops_trace_manager.py +++ b/api/core/ops/ops_trace_manager.py @@ -16,6 +16,7 @@ from sqlalchemy import select from sqlalchemy.orm import Session, sessionmaker from core.helper.encrypter import batch_decrypt_token, encrypt_token, obfuscated_token +from core.helper.trace_id_helper import ParentTraceContext from core.ops.entities.config_entity import ( OPS_FILE_PATH, BaseTracingConfig, @@ -52,6 +53,17 @@ if TYPE_CHECKING: logger = logging.getLogger(__name__) +def _dump_parent_trace_context(parent_trace_context: Any) -> dict[str, str] | None: + if isinstance(parent_trace_context, ParentTraceContext): + return parent_trace_context.model_dump(exclude_none=True) + if isinstance(parent_trace_context, dict): + try: + return ParentTraceContext.model_validate(parent_trace_context).model_dump(exclude_none=True) + except ValueError: + return None + return None + + class _AppTracingConfig(TypedDict, total=False): enabled: bool tracing_provider: str | None @@ -569,13 +581,13 @@ class OpsTraceManager: db.session.commit() @classmethod - def get_app_tracing_config(cls, app_id: str): + def get_app_tracing_config(cls, app_id: str, session: Session): """ Get app tracing config :param app_id: app id :return: """ - app: App | None = db.session.get(App, app_id) + app: App | None = session.get(App, app_id) if not app: raise ValueError("App not found") if not app.tracing: @@ -857,8 +869,9 @@ class TraceTask: } parent_trace_context = self.kwargs.get("parent_trace_context") - if parent_trace_context: - metadata["parent_trace_context"] = parent_trace_context + dumped_parent_trace_context = _dump_parent_trace_context(parent_trace_context) + if dumped_parent_trace_context: + metadata["parent_trace_context"] = dumped_parent_trace_context workflow_trace_info = WorkflowTraceInfo( trace_id=self.trace_id, @@ -1371,13 +1384,14 @@ class TraceTask: } parent_trace_context = node_data.get("parent_trace_context") - if parent_trace_context: - metadata["parent_trace_context"] = parent_trace_context + dumped_parent_trace_context = _dump_parent_trace_context(parent_trace_context) + if dumped_parent_trace_context: + metadata["parent_trace_context"] = dumped_parent_trace_context message_id: str | None = None conversation_id = node_data.get("conversation_id") workflow_execution_id = node_data.get("workflow_execution_id") - if conversation_id and workflow_execution_id and not parent_trace_context: + if conversation_id and workflow_execution_id and not dumped_parent_trace_context: with Session(db.engine) as session: msg_id = session.scalar( select(Message.id).where( diff --git a/api/core/plugin/impl/model_runtime.py b/api/core/plugin/impl/model_runtime.py index 4e66d58b5e..62573ba2f5 100644 --- a/api/core/plugin/impl/model_runtime.py +++ b/api/core/plugin/impl/model_runtime.py @@ -4,23 +4,32 @@ import hashlib import logging from collections.abc import Generator, Iterable, Sequence from threading import Lock -from typing import IO, Any, Union +from typing import IO, Any, Literal, cast, overload from pydantic import ValidationError from redis import RedisError from configs import dify_config +from core.llm_generator.output_parser.structured_output import ( + invoke_llm_with_structured_output as invoke_llm_with_structured_output_helper, +) from core.plugin.entities.plugin_daemon import PluginModelProviderEntity from core.plugin.impl.asset import PluginAssetManager from core.plugin.impl.model import PluginModelClient from extensions.ext_redis import redis_client -from graphon.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk +from graphon.model_runtime.entities.llm_entities import ( + LLMResult, + LLMResultChunk, + LLMResultChunkWithStructuredOutput, + LLMResultWithStructuredOutput, +) from graphon.model_runtime.entities.message_entities import PromptMessage, PromptMessageTool from graphon.model_runtime.entities.model_entities import AIModelEntity, ModelType from graphon.model_runtime.entities.provider_entities import ProviderEntity from graphon.model_runtime.entities.rerank_entities import MultimodalRerankInput, RerankResult from graphon.model_runtime.entities.text_embedding_entities import EmbeddingInputType, EmbeddingResult -from graphon.model_runtime.runtime import ModelRuntime +from graphon.model_runtime.model_providers.base.large_language_model import normalize_non_stream_runtime_result +from graphon.model_runtime.protocols.runtime import ModelRuntime from models.provider_ids import ModelProviderID logger = logging.getLogger(__name__) @@ -29,6 +38,68 @@ logger = logging.getLogger(__name__) TENANT_SCOPE_SCHEMA_CACHE_USER_ID = "__DIFY_TS__" +# TODO(-LAN-): Move native structured-output invocation into Graphon's LLM node. +# TODO(-LAN-): Remove this Dify-side adapter once Graphon owns structured output end-to-end. +class _PluginStructuredOutputModelInstance: + """Bind plugin model identity to the shared structured-output helper. + + The structured-output parser is shared with legacy ``ModelInstance`` flows + and only needs an object exposing ``invoke_llm(...)``. ``PluginModelRuntime`` + intentionally exposes a lower-level API where provider, model, and + credentials are passed per call. This adapter supplies the small bound + ``invoke_llm`` surface the helper needs without constructing a full + ``ModelInstance`` or reintroducing model-manager dependencies into the + plugin runtime path. + """ + + def __init__( + self, + *, + runtime: PluginModelRuntime, + provider: str, + model: str, + credentials: dict[str, Any], + ) -> None: + self._runtime = runtime + self._provider = provider + self._model = model + self._credentials = credentials + + def invoke_llm( + self, + *, + prompt_messages: Sequence[PromptMessage], + model_parameters: dict[str, Any] | None = None, + tools: Sequence[PromptMessageTool] | None = None, + stop: Sequence[str] | None = None, + stream: bool = True, + callbacks: object | None = None, + ) -> LLMResult | Generator[LLMResultChunk, None, None]: + del callbacks + if stream: + return self._runtime.invoke_llm( + provider=self._provider, + model=self._model, + credentials=self._credentials, + model_parameters=model_parameters or {}, + prompt_messages=prompt_messages, + tools=list(tools) if tools else None, + stop=stop, + stream=True, + ) + + return self._runtime.invoke_llm( + provider=self._provider, + model=self._model, + credentials=self._credentials, + model_parameters=model_parameters or {}, + prompt_messages=prompt_messages, + tools=list(tools) if tools else None, + stop=stop, + stream=False, + ) + + class PluginModelRuntime(ModelRuntime): """Plugin-backed runtime adapter bound to tenant context and optional caller scope.""" @@ -195,6 +266,34 @@ class PluginModelRuntime(ModelRuntime): return schema + @overload + def invoke_llm( + self, + *, + provider: str, + model: str, + credentials: dict[str, Any], + model_parameters: dict[str, Any], + prompt_messages: Sequence[PromptMessage], + tools: list[PromptMessageTool] | None, + stop: Sequence[str] | None, + stream: Literal[False], + ) -> LLMResult: ... + + @overload + def invoke_llm( + self, + *, + provider: str, + model: str, + credentials: dict[str, Any], + model_parameters: dict[str, Any], + prompt_messages: Sequence[PromptMessage], + tools: list[PromptMessageTool] | None, + stop: Sequence[str] | None, + stream: Literal[True], + ) -> Generator[LLMResultChunk, None, None]: ... + def invoke_llm( self, *, @@ -206,9 +305,9 @@ class PluginModelRuntime(ModelRuntime): tools: list[PromptMessageTool] | None, stop: Sequence[str] | None, stream: bool, - ) -> Union[LLMResult, Generator[LLMResultChunk, None, None]]: + ) -> LLMResult | Generator[LLMResultChunk, None, None]: plugin_id, provider_name = self._split_provider(provider) - return self.client.invoke_llm( + result = self.client.invoke_llm( tenant_id=self.tenant_id, user_id=self.user_id, plugin_id=plugin_id, @@ -221,6 +320,81 @@ class PluginModelRuntime(ModelRuntime): stop=list(stop) if stop else None, stream=stream, ) + if stream: + return result + + return normalize_non_stream_runtime_result( + model=model, + prompt_messages=prompt_messages, + result=result, + ) + + @overload + def invoke_llm_with_structured_output( + self, + *, + provider: str, + model: str, + credentials: dict[str, Any], + json_schema: dict[str, Any], + model_parameters: dict[str, Any], + prompt_messages: Sequence[PromptMessage], + stop: Sequence[str] | None, + stream: Literal[False], + ) -> LLMResultWithStructuredOutput: ... + + @overload + def invoke_llm_with_structured_output( + self, + *, + provider: str, + model: str, + credentials: dict[str, Any], + json_schema: dict[str, Any], + model_parameters: dict[str, Any], + prompt_messages: Sequence[PromptMessage], + stop: Sequence[str] | None, + stream: Literal[True], + ) -> Generator[LLMResultChunkWithStructuredOutput, None, None]: ... + + def invoke_llm_with_structured_output( + self, + *, + provider: str, + model: str, + credentials: dict[str, Any], + json_schema: dict[str, Any], + model_parameters: dict[str, Any], + prompt_messages: Sequence[PromptMessage], + stop: Sequence[str] | None, + stream: bool, + ) -> LLMResultWithStructuredOutput | Generator[LLMResultChunkWithStructuredOutput, None, None]: + model_schema = self.get_model_schema( + provider=provider, + model_type=ModelType.LLM, + model=model, + credentials=credentials, + ) + if model_schema is None: + raise ValueError(f"Model schema not found for {model}") + + adapter = _PluginStructuredOutputModelInstance( + runtime=self, + provider=provider, + model=model, + credentials=credentials, + ) + return invoke_llm_with_structured_output_helper( + provider=provider, + model_schema=model_schema, + model_instance=cast(Any, adapter), + prompt_messages=prompt_messages, + json_schema=json_schema, + model_parameters=model_parameters, + tools=None, + stop=list(stop) if stop else None, + stream=stream, + ) def get_llm_num_tokens( self, diff --git a/api/core/plugin/impl/model_runtime_factory.py b/api/core/plugin/impl/model_runtime_factory.py index 35abd2ae8c..fbe307ea60 100644 --- a/api/core/plugin/impl/model_runtime_factory.py +++ b/api/core/plugin/impl/model_runtime_factory.py @@ -3,13 +3,46 @@ from __future__ import annotations from typing import TYPE_CHECKING from core.plugin.impl.model import PluginModelClient +from graphon.model_runtime.entities.model_entities import ModelType +from graphon.model_runtime.entities.provider_entities import ProviderEntity +from graphon.model_runtime.model_providers.base.ai_model import AIModel +from graphon.model_runtime.model_providers.base.large_language_model import LargeLanguageModel +from graphon.model_runtime.model_providers.base.moderation_model import ModerationModel +from graphon.model_runtime.model_providers.base.rerank_model import RerankModel +from graphon.model_runtime.model_providers.base.speech2text_model import Speech2TextModel +from graphon.model_runtime.model_providers.base.text_embedding_model import TextEmbeddingModel +from graphon.model_runtime.model_providers.base.tts_model import TTSModel from graphon.model_runtime.model_providers.model_provider_factory import ModelProviderFactory +from graphon.model_runtime.protocols.runtime import ModelRuntime if TYPE_CHECKING: from core.model_manager import ModelManager from core.plugin.impl.model_runtime import PluginModelRuntime from core.provider_manager import ProviderManager +_MODEL_CLASS_BY_TYPE: dict[ModelType, type[AIModel]] = { + ModelType.LLM: LargeLanguageModel, + ModelType.TEXT_EMBEDDING: TextEmbeddingModel, + ModelType.RERANK: RerankModel, + ModelType.SPEECH2TEXT: Speech2TextModel, + ModelType.MODERATION: ModerationModel, + ModelType.TTS: TTSModel, +} + + +def create_model_type_instance( + *, + runtime: ModelRuntime, + provider_schema: ProviderEntity, + model_type: ModelType, +) -> AIModel: + """Build the graphon model wrapper explicitly against the request runtime.""" + model_class = _MODEL_CLASS_BY_TYPE.get(model_type) + if model_class is None: + raise ValueError(f"Unsupported model type: {model_type}") + + return model_class(provider_schema=provider_schema, model_runtime=runtime) + class PluginModelAssembly: """Compose request-scoped model views on top of a single plugin runtime.""" @@ -38,9 +71,22 @@ class PluginModelAssembly: @property def model_provider_factory(self) -> ModelProviderFactory: if self._model_provider_factory is None: - self._model_provider_factory = ModelProviderFactory(model_runtime=self.model_runtime) + self._model_provider_factory = ModelProviderFactory(runtime=self.model_runtime) return self._model_provider_factory + def create_model_type_instance( + self, + *, + provider: str, + model_type: ModelType, + ) -> AIModel: + provider_schema = self.model_provider_factory.get_provider_schema(provider=provider) + return create_model_type_instance( + runtime=self.model_runtime, + provider_schema=provider_schema, + model_type=model_type, + ) + @property def provider_manager(self) -> ProviderManager: if self._provider_manager is None: diff --git a/api/core/plugin/utils/http_parser.py b/api/core/plugin/utils/http_parser.py index ce943929be..af0ff10bfb 100644 --- a/api/core/plugin/utils/http_parser.py +++ b/api/core/plugin/utils/http_parser.py @@ -151,6 +151,12 @@ def deserialize_response(raw_data: bytes) -> Response: response = Response(response=body, status=status_code) + # Replace Flask's default headers (e.g. Content-Type, Content-Length) with the + # parsed ones so we faithfully reproduce the original response. Use Headers.add + # rather than dict-style assignment so that repeated headers such as Set-Cookie + # (and any other multi-valued header per RFC 9110) are preserved instead of + # being overwritten. + response.headers.clear() for line in lines[1:]: if not line: continue @@ -158,6 +164,6 @@ def deserialize_response(raw_data: bytes) -> Response: if ":" not in line_str: continue name, value = line_str.split(":", 1) - response.headers[name] = value.strip() + response.headers.add(name, value.strip()) return response diff --git a/api/core/prompt/simple_prompt_transform.py b/api/core/prompt/simple_prompt_transform.py index 1665bdeb52..e836554ca0 100644 --- a/api/core/prompt/simple_prompt_transform.py +++ b/api/core/prompt/simple_prompt_transform.py @@ -123,12 +123,15 @@ class SimplePromptTransform(PromptTransform): for v in special_variable_keys: # support #context#, #query# and #histories# - if v == "#context#": - variables["#context#"] = context or "" - elif v == "#query#": - variables["#query#"] = query or "" - elif v == "#histories#": - variables["#histories#"] = histories or "" + match v: + case "#context#": + variables["#context#"] = context or "" + case "#query#": + variables["#query#"] = query or "" + case "#histories#": + variables["#histories#"] = histories or "" + case _: + pass prompt_template = prompt_template_config["prompt_template"] if not isinstance(prompt_template, PromptTemplateParser): diff --git a/api/core/prompt/utils/prompt_message_util.py b/api/core/prompt/utils/prompt_message_util.py index ba76eb0c4e..11414832e3 100644 --- a/api/core/prompt/utils/prompt_message_util.py +++ b/api/core/prompt/utils/prompt_message_util.py @@ -53,24 +53,27 @@ class PromptMessageUtil: files = [] if isinstance(prompt_message.content, list): for content in prompt_message.content: - if isinstance(content, TextPromptMessageContent): - text += content.data - elif isinstance(content, ImagePromptMessageContent): - files.append( - { - "type": "image", - "data": content.data[:10] + "...[TRUNCATED]..." + content.data[-10:], - "detail": content.detail.value, - } - ) - elif isinstance(content, AudioPromptMessageContent): - files.append( - { - "type": "audio", - "data": content.data[:10] + "...[TRUNCATED]..." + content.data[-10:], - "format": content.format, - } - ) + match content: + case TextPromptMessageContent(): + text += content.data + case ImagePromptMessageContent(): + files.append( + { + "type": "image", + "data": content.data[:10] + "...[TRUNCATED]..." + content.data[-10:], + "detail": content.detail.value, + } + ) + case AudioPromptMessageContent(): + files.append( + { + "type": "audio", + "data": content.data[:10] + "...[TRUNCATED]..." + content.data[-10:], + "format": content.format, + } + ) + case _: + continue else: text = cast(str, prompt_message.content) diff --git a/api/core/provider_manager.py b/api/core/provider_manager.py index 8969825be4..9faa70a0b8 100644 --- a/api/core/provider_manager.py +++ b/api/core/provider_manager.py @@ -9,9 +9,9 @@ from typing import TYPE_CHECKING, Any from pydantic import TypeAdapter from sqlalchemy import select from sqlalchemy.exc import IntegrityError -from sqlalchemy.orm import Session from configs import dify_config +from core.db.session_factory import session_factory from core.entities.model_entities import DefaultModelEntity, DefaultModelProviderEntity from core.entities.provider_configuration import ProviderConfiguration, ProviderConfigurations, ProviderModelBundle from core.entities.provider_entities import ( @@ -56,7 +56,7 @@ from models.provider_ids import ModelProviderID from services.feature_service import FeatureService if TYPE_CHECKING: - from graphon.model_runtime.runtime import ModelRuntime + from graphon.model_runtime.protocols.runtime import ModelRuntime _credentials_adapter: TypeAdapter[dict[str, Any]] = TypeAdapter(dict[str, Any]) @@ -165,7 +165,7 @@ class ProviderManager: ) # Get all provider entities - model_provider_factory = ModelProviderFactory(model_runtime=self._model_runtime) + model_provider_factory = ModelProviderFactory(runtime=self._model_runtime) provider_entities = model_provider_factory.get_providers() # Get All preferred provider types of the workspace @@ -362,7 +362,7 @@ class ProviderManager: if not default_model: return None - model_provider_factory = ModelProviderFactory(model_runtime=self._model_runtime) + model_provider_factory = ModelProviderFactory(runtime=self._model_runtime) provider_schema = model_provider_factory.get_provider_schema(provider=default_model.provider_name) return DefaultModelEntity( @@ -445,7 +445,7 @@ class ProviderManager: @staticmethod def _get_all_providers(tenant_id: str) -> dict[str, list[Provider]]: provider_name_to_provider_records_dict = defaultdict(list) - with Session(db.engine, expire_on_commit=False) as session: + with session_factory.create_session() as session: stmt = select(Provider).where(Provider.tenant_id == tenant_id, Provider.is_valid == True) providers = session.scalars(stmt) for provider in providers: @@ -462,7 +462,7 @@ class ProviderManager: :return: """ provider_name_to_provider_model_records_dict = defaultdict(list) - with Session(db.engine, expire_on_commit=False) as session: + with session_factory.create_session() as session: stmt = select(ProviderModel).where(ProviderModel.tenant_id == tenant_id, ProviderModel.is_valid == True) provider_models = session.scalars(stmt) for provider_model in provider_models: @@ -478,7 +478,7 @@ class ProviderManager: :return: """ provider_name_to_preferred_provider_type_records_dict = {} - with Session(db.engine, expire_on_commit=False) as session: + with session_factory.create_session() as session: stmt = select(TenantPreferredModelProvider).where(TenantPreferredModelProvider.tenant_id == tenant_id) preferred_provider_types = session.scalars(stmt) provider_name_to_preferred_provider_type_records_dict = { @@ -496,7 +496,7 @@ class ProviderManager: :return: """ provider_name_to_provider_model_settings_dict = defaultdict(list) - with Session(db.engine, expire_on_commit=False) as session: + with session_factory.create_session() as session: stmt = select(ProviderModelSetting).where(ProviderModelSetting.tenant_id == tenant_id) provider_model_settings = session.scalars(stmt) for provider_model_setting in provider_model_settings: @@ -514,7 +514,7 @@ class ProviderManager: :return: """ provider_name_to_provider_model_credentials_dict = defaultdict(list) - with Session(db.engine, expire_on_commit=False) as session: + with session_factory.create_session() as session: stmt = select(ProviderModelCredential).where(ProviderModelCredential.tenant_id == tenant_id) provider_model_credentials = session.scalars(stmt) for provider_model_credential in provider_model_credentials: @@ -544,7 +544,7 @@ class ProviderManager: return {} provider_name_to_provider_load_balancing_model_configs_dict = defaultdict(list) - with Session(db.engine, expire_on_commit=False) as session: + with session_factory.create_session() as session: stmt = select(LoadBalancingModelConfig).where(LoadBalancingModelConfig.tenant_id == tenant_id) provider_load_balancing_configs = session.scalars(stmt) for provider_load_balancing_config in provider_load_balancing_configs: @@ -578,7 +578,7 @@ class ProviderManager: :param provider_name: provider name :return: """ - with Session(db.engine, expire_on_commit=False) as session: + with session_factory.create_session() as session: stmt = ( select(ProviderCredential) .where( @@ -608,7 +608,7 @@ class ProviderManager: :param model_type: model type :return: """ - with Session(db.engine, expire_on_commit=False) as session: + with session_factory.create_session() as session: stmt = ( select(ProviderModelCredential) .where( diff --git a/api/core/rag/datasource/retrieval_service.py b/api/core/rag/datasource/retrieval_service.py index c60d19045a..7769878e70 100644 --- a/api/core/rag/datasource/retrieval_service.py +++ b/api/core/rag/datasource/retrieval_service.py @@ -21,7 +21,7 @@ from core.rag.index_processor.constant.query_type import QueryType from core.rag.models.document import Document from core.rag.rerank.rerank_type import RerankMode from core.rag.retrieval.retrieval_methods import RetrievalMethod -from core.tools.signature import sign_upload_file +from core.tools.signature import sign_upload_file_preview_url from extensions.ext_database import db from graphon.model_runtime.entities.model_entities import ModelType from models.dataset import ( @@ -217,10 +217,11 @@ class RetrievalService: """Deduplicate documents in O(n) while preserving first-seen order. Rules: - - For provider == "dify" and metadata["doc_id"] exists: keep the doc with the highest - metadata["score"] among duplicates; if a later duplicate has no score, ignore it. - - For non-dify documents (or dify without doc_id): deduplicate by content key - (provider, page_content), keeping the first occurrence. + - If metadata["doc_id"] exists (any provider): deduplicate by (provider, doc_id) key; + keep the doc with the highest metadata["score"] among duplicates. If a later duplicate + has no score, ignore it. + - If metadata["doc_id"] is absent: deduplicate by content key (provider, page_content), + keeping the first occurrence. """ if not documents: return documents @@ -231,11 +232,10 @@ class RetrievalService: order: list[tuple] = [] for doc in documents: - is_dify = doc.provider == "dify" - doc_id = (doc.metadata or {}).get("doc_id") if is_dify else None + doc_id = (doc.metadata or {}).get("doc_id") - if is_dify and doc_id: - key = ("dify", doc_id) + if doc_id: + key = (doc.provider or "dify", doc_id) if key not in chosen: chosen[key] = doc order.append(key) @@ -893,7 +893,7 @@ class RetrievalService: "name": upload_file.name, "extension": "." + upload_file.extension, "mime_type": upload_file.mime_type, - "source_url": sign_upload_file(upload_file.id, upload_file.extension), + "source_url": sign_upload_file_preview_url(upload_file.id, upload_file.extension), "size": upload_file.size, } return {"attachment_info": attachment_info, "segment_id": attachment_binding.segment_id} @@ -920,7 +920,7 @@ class RetrievalService: "name": upload_file.name, "extension": "." + upload_file.extension, "mime_type": upload_file.mime_type, - "source_url": sign_upload_file(upload_file.id, upload_file.extension), + "source_url": sign_upload_file_preview_url(upload_file.id, upload_file.extension), "size": upload_file.size, } if attachment_binding: diff --git a/api/core/rag/datasource/vdb/vector_factory.py b/api/core/rag/datasource/vdb/vector_factory.py index 9575377174..1f82f7a081 100644 --- a/api/core/rag/datasource/vdb/vector_factory.py +++ b/api/core/rag/datasource/vdb/vector_factory.py @@ -144,8 +144,20 @@ class Vector: def get_vector_factory(vector_type: str) -> type[AbstractVectorFactory]: return get_vector_factory_class(vector_type) + @staticmethod + def _filter_empty_text_documents(documents: list[Document]) -> list[Document]: + filtered_documents = [document for document in documents if document.page_content.strip()] + skipped_count = len(documents) - len(filtered_documents) + if skipped_count: + logger.warning("skip %d empty documents before vector embedding", skipped_count) + return filtered_documents + def create(self, texts: list | None = None, **kwargs): if texts: + texts = self._filter_empty_text_documents(texts) + if not texts: + return + start = time.time() logger.info("start embedding %s texts %s", len(texts), start) batch_size = 1000 @@ -203,8 +215,14 @@ class Vector: logger.info("Embedding %s files took %s s", len(file_documents), time.time() - start) def add_texts(self, documents: list[Document], **kwargs): + documents = self._filter_empty_text_documents(documents) + if not documents: + return + if kwargs.get("duplicate_check", False): documents = self._filter_duplicate_texts(documents) + if not documents: + return embeddings = self._embeddings.embed_documents([document.page_content for document in documents]) self._vector_processor.create(texts=documents, embeddings=embeddings, **kwargs) diff --git a/api/core/rag/extractor/pdf_extractor.py b/api/core/rag/extractor/pdf_extractor.py index 02f0efc908..25f6fe3e2a 100644 --- a/api/core/rag/extractor/pdf_extractor.py +++ b/api/core/rag/extractor/pdf_extractor.py @@ -115,7 +115,7 @@ class PdfExtractor(BaseExtractor): """ image_content = [] upload_files = [] - base_url = dify_config.INTERNAL_FILES_URL or dify_config.FILES_URL + base_url = dify_config.FILES_URL try: image_objects = page.get_objects(filter=(pdfium_c.FPDF_PAGEOBJ_IMAGE,)) diff --git a/api/core/rag/extractor/word_extractor.py b/api/core/rag/extractor/word_extractor.py index 0330a43b28..60f8906181 100644 --- a/api/core/rag/extractor/word_extractor.py +++ b/api/core/rag/extractor/word_extractor.py @@ -110,7 +110,7 @@ class WordExtractor(BaseExtractor): def _extract_images_from_docx(self, doc): image_count = 0 image_map = {} - base_url = dify_config.INTERNAL_FILES_URL or dify_config.FILES_URL + base_url = dify_config.FILES_URL for r_id, rel in doc.part.rels.items(): if "image" in rel.target_ref: diff --git a/api/core/rag/index_processor/processor/parent_child_index_processor.py b/api/core/rag/index_processor/processor/parent_child_index_processor.py index ba277d5018..a26a900512 100644 --- a/api/core/rag/index_processor/processor/parent_child_index_processor.py +++ b/api/core/rag/index_processor/processor/parent_child_index_processor.py @@ -29,6 +29,7 @@ from libs import helper from models import Account from models.dataset import ChildChunk, Dataset, DatasetProcessRule, DocumentSegment from models.dataset import Document as DatasetDocument +from models.enums import ProcessRuleMode from services.account_service import AccountService from services.summary_index_service import SummaryIndexService @@ -325,7 +326,7 @@ class ParentChildIndexProcessor(BaseIndexProcessor): # update document parent mode dataset_process_rule = DatasetProcessRule( dataset_id=dataset.id, - mode="hierarchical", + mode=ProcessRuleMode.HIERARCHICAL, rules=json.dumps( { "parent_mode": parent_childs.parent_mode, diff --git a/api/core/rag/retrieval/dataset_retrieval.py b/api/core/rag/retrieval/dataset_retrieval.py index 5631b3a921..010566d203 100644 --- a/api/core/rag/retrieval/dataset_retrieval.py +++ b/api/core/rag/retrieval/dataset_retrieval.py @@ -52,7 +52,7 @@ from core.rag.retrieval.template_prompts import ( METADATA_FILTER_USER_PROMPT_2, METADATA_FILTER_USER_PROMPT_3, ) -from core.tools.signature import sign_upload_file +from core.tools.signature import sign_upload_file_preview_url from core.tools.utils.dataset_retriever.dataset_retriever_base_tool import DatasetRetrieverBaseTool from core.workflow.file_reference import build_file_reference from core.workflow.nodes.knowledge_retrieval import exc @@ -529,7 +529,7 @@ class DatasetRetrieval: ), size=upload_file.size, storage_key=upload_file.key, - url=sign_upload_file(upload_file.id, upload_file.extension), + url=sign_upload_file_preview_url(upload_file.id, upload_file.extension), ) context_files.append(attachment_info) if show_retrieve_source: diff --git a/api/core/tools/signature.py b/api/core/tools/signature.py index 1807226924..3c7b523ff1 100644 --- a/api/core/tools/signature.py +++ b/api/core/tools/signature.py @@ -26,12 +26,14 @@ def sign_tool_file(tool_file_id: str, extension: str, for_external: bool = True) return f"{file_preview_url}?timestamp={timestamp}&nonce={nonce}&sign={encoded_sign}" -def sign_upload_file(upload_file_id: str, extension: str) -> str: +def sign_upload_file_preview_url(upload_file_id: str, extension: str) -> str: """ - sign file to get a temporary url for plugin access + Sign an upload file to get a temporary image preview URL. + + The URL generated by this function is only for external preview and download, + not for internal communication. """ - # Use internal URL for plugin/tool file access in Docker environments - base_url = dify_config.INTERNAL_FILES_URL or dify_config.FILES_URL + base_url = dify_config.FILES_URL file_preview_url = f"{base_url}/files/{upload_file_id}/image-preview" timestamp = str(int(time.time())) diff --git a/api/core/tools/tool_manager.py b/api/core/tools/tool_manager.py index 87cf6d7085..0a7811bb53 100644 --- a/api/core/tools/tool_manager.py +++ b/api/core/tools/tool_manager.py @@ -1078,6 +1078,13 @@ class ToolManager: if parameter.form == ToolParameter.ToolParameterForm.FORM: if variable_pool: config = tool_configurations.get(parameter.name, {}) + + selector_value = cls._extract_runtime_selector_value(parameter, config) + if selector_value is not None: + # Selector parameters carry structured dictionaries, not scalar ToolInput values. + runtime_parameters[parameter.name] = selector_value + continue + if not (config and isinstance(config, dict) and config.get("value") is not None): continue tool_input = ToolNodeData.ToolInput.model_validate(tool_configurations.get(parameter.name, {})) @@ -1105,5 +1112,39 @@ class ToolManager: runtime_parameters[parameter.name] = value return runtime_parameters + @classmethod + def _extract_runtime_selector_value(cls, parameter: ToolParameter, config: Any) -> dict[str, Any] | None: + if parameter.type not in { + ToolParameter.ToolParameterType.MODEL_SELECTOR, + ToolParameter.ToolParameterType.APP_SELECTOR, + }: + return None + if not isinstance(config, dict): + return None + + input_value = config.get("value") + if isinstance(input_value, dict) and cls._is_selector_value(parameter, input_value): + return cast("dict[str, Any]", parameter.init_frontend_parameter(input_value)) + + if cls._is_selector_value(parameter, config): + selector_value = dict(config) + selector_value.pop("type", None) + selector_value.pop("value", None) + return cast("dict[str, Any]", parameter.init_frontend_parameter(selector_value)) + + return None + + @classmethod + def _is_selector_value(cls, parameter: ToolParameter, value: Mapping[str, Any]) -> bool: + if parameter.type == ToolParameter.ToolParameterType.MODEL_SELECTOR: + return ( + isinstance(value.get("provider"), str) + and isinstance(value.get("model"), str) + and isinstance(value.get("model_type"), str) + ) + if parameter.type == ToolParameter.ToolParameterType.APP_SELECTOR: + return isinstance(value.get("app_id"), str) + return False + ToolManager.load_hardcoded_providers_cache() diff --git a/api/core/tools/utils/message_transformer.py b/api/core/tools/utils/message_transformer.py index 5679466cbc..4c6e647335 100644 --- a/api/core/tools/utils/message_transformer.py +++ b/api/core/tools/utils/message_transformer.py @@ -23,36 +23,37 @@ _TOOL_FILE_URL_PATTERN = re.compile(r"(?:^|/+)files/tools/(?P[^/?# def safe_json_value(v): - if isinstance(v, datetime): - tz_name = "UTC" - if isinstance(current_user, Account) and current_user.timezone is not None: - tz_name = current_user.timezone - return v.astimezone(pytz.timezone(tz_name)).isoformat() - elif isinstance(v, date): - return v.isoformat() - elif isinstance(v, UUID): - return str(v) - elif isinstance(v, Decimal): - return float(v) - elif isinstance(v, bytes): - try: - return v.decode("utf-8") - except UnicodeDecodeError: - return v.hex() - elif isinstance(v, memoryview): - return v.tobytes().hex() - elif isinstance(v, np.integer): - return int(v) - elif isinstance(v, np.floating): - return float(v) - elif isinstance(v, np.ndarray): - return v.tolist() - elif isinstance(v, dict): - return safe_json_dict(v) - elif isinstance(v, list | tuple | set): - return [safe_json_value(i) for i in v] - else: - return v + match v: + case datetime(): + tz_name = "UTC" + if isinstance(current_user, Account) and current_user.timezone is not None: + tz_name = current_user.timezone + return v.astimezone(pytz.timezone(tz_name)).isoformat() + case date(): + return v.isoformat() + case UUID(): + return str(v) + case Decimal(): + return float(v) + case bytes(): + try: + return v.decode("utf-8") + except UnicodeDecodeError: + return v.hex() + case memoryview(): + return v.tobytes().hex() + case np.integer(): + return int(v) + case np.floating(): + return float(v) + case np.ndarray(): + return v.tolist() + case dict(): + return safe_json_dict(v) + case list() | tuple() | set(): + return [safe_json_value(i) for i in v] + case _: + return v def safe_json_dict(d: dict[str, Any]): diff --git a/api/core/tools/workflow_as_tool/tool.py b/api/core/tools/workflow_as_tool/tool.py index cd8c6352b5..3fbd456fe5 100644 --- a/api/core/tools/workflow_as_tool/tool.py +++ b/api/core/tools/workflow_as_tool/tool.py @@ -9,6 +9,7 @@ from sqlalchemy import select from core.app.file_access import DatabaseFileAccessController from core.db.session_factory import session_factory +from core.helper.trace_id_helper import ParentTraceContext, extract_parent_trace_context_from_args from core.tools.__base.tool import Tool from core.tools.__base.tool_runtime import ToolRuntime from core.tools.entities.tool_entities import ( @@ -36,6 +37,8 @@ class WorkflowTool(Tool): Workflow tool. """ + _parent_trace_context: ParentTraceContext | None + def __init__( self, workflow_app_id: str, @@ -54,6 +57,7 @@ class WorkflowTool(Tool): self.workflow_call_depth = workflow_call_depth self.label = label self._latest_usage = LLMUsage.empty_usage() + self._parent_trace_context = None super().__init__(entity=entity, runtime=runtime) @@ -94,11 +98,17 @@ class WorkflowTool(Tool): self._latest_usage = LLMUsage.empty_usage() + generator_args: dict[str, Any] = {"inputs": tool_parameters, "files": files} + if self._parent_trace_context: + generator_args.update( + extract_parent_trace_context_from_args({"parent_trace_context": self._parent_trace_context}) + ) + result = generator.generate( app_model=app, workflow=workflow, user=user, - args={"inputs": tool_parameters, "files": files}, + args=generator_args, invoke_from=self.runtime.invoke_from, streaming=False, call_depth=self.workflow_call_depth + 1, @@ -194,7 +204,7 @@ class WorkflowTool(Tool): :return: the new tool """ - return self.__class__( + forked = self.__class__( entity=self.entity.model_copy(), runtime=runtime, workflow_app_id=self.workflow_app_id, @@ -204,6 +214,24 @@ class WorkflowTool(Tool): version=self.version, label=self.label, ) + forked._parent_trace_context = self._parent_trace_context.model_copy() if self._parent_trace_context else None + return forked + + def set_parent_trace_context( + self, + *, + parent_workflow_run_id: str, + parent_node_execution_id: str, + ) -> None: + """Attach outer workflow trace context without exposing it as tool input.""" + self._parent_trace_context = ParentTraceContext( + parent_workflow_run_id=parent_workflow_run_id, + parent_node_execution_id=parent_node_execution_id, + ) + + def clear_parent_trace_context(self) -> None: + """Remove parent trace context before invoking this tool outside a nested workflow.""" + self._parent_trace_context = None def _resolve_user(self, user_id: str) -> Account | EndUser | None: """ diff --git a/api/core/workflow/human_input_adapter.py b/api/core/workflow/human_input_adapter.py index 4b765e6aea..731ae2b858 100644 --- a/api/core/workflow/human_input_adapter.py +++ b/api/core/workflow/human_input_adapter.py @@ -272,6 +272,14 @@ def _adapt_tool_node_data_for_graph(node_data: Mapping[str, Any]) -> dict[str, A normalized_tool_configurations[name] = value continue + selector_value = _extract_selector_configuration(value) + if selector_value is not None: + # Model/app selectors are dictionaries even when they come through the legacy tool configuration path. + # Move them to tool_parameters so graph validation does not flatten them as primitive constants. + found_legacy_tool_inputs = True + normalized_tool_parameters.setdefault(name, {"type": "constant", "value": selector_value}) + continue + input_type = value.get("type") input_value = value.get("value") if input_type not in {"mixed", "variable", "constant"}: @@ -310,6 +318,28 @@ def _flatten_legacy_tool_configuration_value(*, input_type: Any, input_value: An return None +def _extract_selector_configuration(value: Mapping[str, Any]) -> dict[str, Any] | None: + input_value = value.get("value") + if isinstance(input_value, Mapping) and _is_selector_configuration(input_value): + return dict(input_value) + + if _is_selector_configuration(value): + selector_value = dict(value) + selector_value.pop("type", None) + selector_value.pop("value", None) + return selector_value + + return None + + +def _is_selector_configuration(value: Mapping[str, Any]) -> bool: + return ( + isinstance(value.get("provider"), str) + and isinstance(value.get("model"), str) + and isinstance(value.get("model_type"), str) + ) or isinstance(value.get("app_id"), str) + + def _normalize_email_recipients(recipients: Mapping[str, Any]) -> dict[str, Any]: normalized = dict(recipients) diff --git a/api/core/workflow/node_factory.py b/api/core/workflow/node_factory.py index de4eae1b22..a306b1c9ac 100644 --- a/api/core/workflow/node_factory.py +++ b/api/core/workflow/node_factory.py @@ -365,7 +365,8 @@ class DifyNodeFactory(NodeFactory): (including pydantic ValidationError, which subclasses ValueError), if node type is unknown, or if no implementation exists for the resolved version """ - typed_node_config = NodeConfigDictAdapter.validate_python(adapt_node_config_for_graph(node_config)) + adapted_node_config = adapt_node_config_for_graph(node_config) + typed_node_config = NodeConfigDictAdapter.validate_python(adapted_node_config) node_id = typed_node_config["id"] node_data = typed_node_config["data"] node_class = self._resolve_node_class(node_type=node_data.type, node_version=str(node_data.version)) @@ -440,9 +441,10 @@ class DifyNodeFactory(NodeFactory): }, } node_init_kwargs = node_init_kwargs_factories.get(node_type, lambda: {})() + constructor_node_data = resolved_node_data.model_dump(mode="python", by_alias=True) return node_class( node_id=node_id, - config=resolved_node_data, + data=constructor_node_data, graph_init_params=self.graph_init_params, graph_runtime_state=self.graph_runtime_state, **node_init_kwargs, @@ -474,10 +476,7 @@ class DifyNodeFactory(NodeFactory): include_retriever_attachment_loader: bool, include_jinja2_template_renderer: bool, ) -> dict[str, object]: - validated_node_data = cast( - LLMCompatibleNodeData, - self._validate_resolved_node_data(node_class=node_class, node_data=node_data), - ) + validated_node_data = cast(LLMCompatibleNodeData, node_data) model_instance = self._build_model_instance_for_llm_node(validated_node_data) node_init_kwargs: dict[str, object] = { "credentials_provider": self._llm_credentials_provider, diff --git a/api/core/workflow/node_runtime.py b/api/core/workflow/node_runtime.py index b8725853c4..db7d78bf45 100644 --- a/api/core/workflow/node_runtime.py +++ b/api/core/workflow/node_runtime.py @@ -10,6 +10,7 @@ from sqlalchemy.orm import Session from core.app.entities.app_invoke_entities import DIFY_RUN_CONTEXT_KEY, DifyRunContext from core.app.file_access import DatabaseFileAccessController from core.callback_handler.workflow_tool_callback_handler import DifyWorkflowCallbackHandler +from core.helper.trace_id_helper import ParentTraceContext from core.llm_generator.output_parser.errors import OutputParserError from core.llm_generator.output_parser.structured_output import invoke_llm_with_structured_output from core.model_manager import ModelInstance @@ -358,6 +359,7 @@ class _WorkflowToolRuntimeBinding: tool: Tool conversation_id: str | None = None + parent_trace_context: ParentTraceContext | None = None class DifyToolNodeRuntime(ToolNodeRuntimeProtocol): @@ -378,6 +380,7 @@ class DifyToolNodeRuntime(ToolNodeRuntimeProtocol): node_id: str, node_data: ToolNodeData, variable_pool, + node_execution_id: str | None = None, ) -> ToolRuntimeHandle: try: tool_runtime = ToolManager.get_workflow_tool_runtime( @@ -397,7 +400,25 @@ class DifyToolNodeRuntime(ToolNodeRuntimeProtocol): conversation_id = ( None if variable_pool is None else get_system_text(variable_pool, SystemVariableKey.CONVERSATION_ID) ) - return ToolRuntimeHandle(raw=_WorkflowToolRuntimeBinding(tool=tool_runtime, conversation_id=conversation_id)) + parent_trace_context: ParentTraceContext | None = None + if self._is_workflow_tool_provider(node_data): + outer_workflow_run_id = ( + None + if variable_pool is None + else get_system_text(variable_pool, SystemVariableKey.WORKFLOW_EXECUTION_ID) + ) + if isinstance(outer_workflow_run_id, str) and isinstance(node_execution_id, str): + parent_trace_context = ParentTraceContext( + parent_workflow_run_id=outer_workflow_run_id, + parent_node_execution_id=node_execution_id, + ) + return ToolRuntimeHandle( + raw=_WorkflowToolRuntimeBinding( + tool=tool_runtime, + conversation_id=conversation_id, + parent_trace_context=parent_trace_context, + ) + ) def get_runtime_parameters( self, @@ -421,6 +442,13 @@ class DifyToolNodeRuntime(ToolNodeRuntimeProtocol): runtime_binding = self._binding_from_handle(tool_runtime) tool = runtime_binding.tool callback = DifyWorkflowCallbackHandler() + if runtime_binding.parent_trace_context and hasattr(tool, "set_parent_trace_context"): + tool.set_parent_trace_context( + parent_workflow_run_id=runtime_binding.parent_trace_context.parent_workflow_run_id, + parent_node_execution_id=runtime_binding.parent_trace_context.parent_node_execution_id, + ) + elif hasattr(tool, "clear_parent_trace_context"): + tool.clear_parent_trace_context() try: messages = ToolEngine.generic_invoke( @@ -501,14 +529,22 @@ class DifyToolNodeRuntime(ToolNodeRuntimeProtocol): @staticmethod def _build_tool_runtime_spec(node_data: ToolNodeData) -> _WorkflowToolRuntimeSpec: + tool_configurations = dict(node_data.tool_configurations) + tool_configurations.update( + {name: tool_input.model_dump(mode="python") for name, tool_input in node_data.tool_parameters.items()} + ) return _WorkflowToolRuntimeSpec( provider_type=CoreToolProviderType(node_data.provider_type.value), provider_id=node_data.provider_id, tool_name=node_data.tool_name, - tool_configurations=dict(node_data.tool_configurations), + tool_configurations=tool_configurations, credential_id=node_data.credential_id, ) + @staticmethod + def _is_workflow_tool_provider(node_data: ToolNodeData) -> bool: + return node_data.provider_type.value == CoreToolProviderType.WORKFLOW.value + def _adapt_messages( self, messages: Generator[CoreToolInvokeMessage, None, None], diff --git a/api/core/workflow/nodes/agent/agent_node.py b/api/core/workflow/nodes/agent/agent_node.py index 68a24e86b1..17d71668cb 100644 --- a/api/core/workflow/nodes/agent/agent_node.py +++ b/api/core/workflow/nodes/agent/agent_node.py @@ -35,7 +35,7 @@ class AgentNode(Node[AgentNodeData]): def __init__( self, node_id: str, - config: AgentNodeData, + data: AgentNodeData, *, graph_init_params: GraphInitParams, graph_runtime_state: GraphRuntimeState, @@ -46,7 +46,7 @@ class AgentNode(Node[AgentNodeData]): ) -> None: super().__init__( node_id=node_id, - config=config, + data=data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, ) diff --git a/api/core/workflow/nodes/datasource/datasource_node.py b/api/core/workflow/nodes/datasource/datasource_node.py index f3006c4242..a4ef3d1ea7 100644 --- a/api/core/workflow/nodes/datasource/datasource_node.py +++ b/api/core/workflow/nodes/datasource/datasource_node.py @@ -36,14 +36,14 @@ class DatasourceNode(Node[DatasourceNodeData]): def __init__( self, node_id: str, - config: DatasourceNodeData, + data: DatasourceNodeData, *, graph_init_params: "GraphInitParams", graph_runtime_state: "GraphRuntimeState", ) -> None: super().__init__( node_id=node_id, - config=config, + data=data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, ) diff --git a/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py b/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py index 9c1b7ab2c4..1d60f530a1 100644 --- a/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py +++ b/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py @@ -32,14 +32,14 @@ class KnowledgeIndexNode(Node[KnowledgeIndexNodeData]): def __init__( self, node_id: str, - config: KnowledgeIndexNodeData, + data: KnowledgeIndexNodeData, *, graph_init_params: "GraphInitParams", graph_runtime_state: "GraphRuntimeState", ) -> None: super().__init__( node_id=node_id, - config=config, + data=data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, ) diff --git a/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py b/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py index 25f73e446d..1aba2737b0 100644 --- a/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py +++ b/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py @@ -71,14 +71,14 @@ class KnowledgeRetrievalNode(LLMUsageTrackingMixin, Node[KnowledgeRetrievalNodeD def __init__( self, node_id: str, - config: KnowledgeRetrievalNodeData, + data: KnowledgeRetrievalNodeData, *, graph_init_params: "GraphInitParams", graph_runtime_state: "GraphRuntimeState", ) -> None: super().__init__( node_id=node_id, - config=config, + data=data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, ) diff --git a/api/core/workflow/system_variables.py b/api/core/workflow/system_variables.py index 9d15a3fcea..77ef3826e9 100644 --- a/api/core/workflow/system_variables.py +++ b/api/core/workflow/system_variables.py @@ -3,7 +3,7 @@ from __future__ import annotations from collections import defaultdict from collections.abc import Mapping, Sequence from enum import StrEnum -from typing import Any, Protocol, cast +from typing import Any, Protocol from uuid import uuid4 from graphon.enums import BuiltinNodeTypes @@ -82,13 +82,10 @@ def build_system_variables(values: Mapping[str, Any] | None = None, /, **kwargs: normalized = _normalize_system_variable_values(values, **kwargs) return [ - cast( - Variable, - segment_to_variable( - segment=build_segment(value), - selector=system_variable_selector(key), - name=key, - ), + segment_to_variable( + segment=build_segment(value), + selector=system_variable_selector(key), + name=key, ) for key, value in normalized.items() ] @@ -130,13 +127,10 @@ def build_bootstrap_variables( for node_id, value in rag_pipeline_variables_map.items(): variables.append( - cast( - Variable, - segment_to_variable( - segment=build_segment(value), - selector=(RAG_PIPELINE_VARIABLE_NODE_ID, node_id), - name=node_id, - ), + segment_to_variable( + segment=build_segment(value), + selector=(RAG_PIPELINE_VARIABLE_NODE_ID, node_id), + name=node_id, ) ) diff --git a/api/core/workflow/workflow_entry.py b/api/core/workflow/workflow_entry.py index 4e2f603e5b..3019704dac 100644 --- a/api/core/workflow/workflow_entry.py +++ b/api/core/workflow/workflow_entry.py @@ -46,6 +46,11 @@ _file_access_controller = DatabaseFileAccessController() class _WorkflowChildEngineBuilder: + tenant_id: str + + def __init__(self, *, tenant_id: str) -> None: + self.tenant_id = tenant_id + @staticmethod def _has_node_id(graph_config: Mapping[str, Any], node_id: str) -> bool | None: """ @@ -107,7 +112,7 @@ class _WorkflowChildEngineBuilder: config=config, child_engine_builder=self, ) - child_engine.layer(LLMQuotaLayer()) + child_engine.layer(LLMQuotaLayer(tenant_id=self.tenant_id)) return child_engine @@ -176,7 +181,7 @@ class WorkflowEntry: self.command_channel = command_channel execution_context = capture_current_context() graph_runtime_state.execution_context = execution_context - self._child_engine_builder = _WorkflowChildEngineBuilder() + self._child_engine_builder = _WorkflowChildEngineBuilder(tenant_id=tenant_id) self.graph_engine = GraphEngine( workflow_id=workflow_id, graph=graph, @@ -208,7 +213,7 @@ class WorkflowEntry: max_steps=dify_config.WORKFLOW_MAX_EXECUTION_STEPS, max_time=dify_config.WORKFLOW_MAX_EXECUTION_TIME ) self.graph_engine.layer(limits_layer) - self.graph_engine.layer(LLMQuotaLayer()) + self.graph_engine.layer(LLMQuotaLayer(tenant_id=tenant_id)) # Add observability layer when OTel is enabled if dify_config.ENABLE_OTEL or is_instrument_flag_enabled(): diff --git a/api/dev/generate_fastopenapi_specs.py b/api/dev/generate_fastopenapi_specs.py new file mode 100644 index 0000000000..5a94d32b93 --- /dev/null +++ b/api/dev/generate_fastopenapi_specs.py @@ -0,0 +1,95 @@ +"""Generate FastOpenAPI OpenAPI 3.0 specs without booting the full backend.""" + +from __future__ import annotations + +import argparse +import json +import logging +import sys +from dataclasses import dataclass +from pathlib import Path + +API_ROOT = Path(__file__).resolve().parents[1] +if str(API_ROOT) not in sys.path: + sys.path.insert(0, str(API_ROOT)) + +from dev.generate_swagger_specs import apply_runtime_defaults, drop_null_values, sort_openapi_arrays + +logger = logging.getLogger(__name__) + + +@dataclass(frozen=True) +class FastOpenApiSpecTarget: + route: str + filename: str + + +FASTOPENAPI_SPEC_TARGETS: tuple[FastOpenApiSpecTarget, ...] = ( + FastOpenApiSpecTarget(route="/fastopenapi/openapi.json", filename="fastopenapi-console-openapi.json"), +) + + +def create_fastopenapi_spec_app(): + """Build a minimal Flask app that only mounts FastOpenAPI docs routes.""" + + apply_runtime_defaults() + + from app_factory import create_flask_app_with_configs + from extensions import ext_fastopenapi + + app = create_flask_app_with_configs() + ext_fastopenapi.init_app(app) + return app + + +def generate_fastopenapi_specs(output_dir: Path) -> list[Path]: + """Write FastOpenAPI specs to `output_dir` and return the written paths.""" + + output_dir.mkdir(parents=True, exist_ok=True) + + app = create_fastopenapi_spec_app() + client = app.test_client() + + written_paths: list[Path] = [] + for target in FASTOPENAPI_SPEC_TARGETS: + response = client.get(target.route) + if response.status_code != 200: + raise RuntimeError(f"failed to fetch {target.route}: {response.status_code}") + + payload = response.get_json() + if not isinstance(payload, dict): + raise RuntimeError(f"unexpected response payload for {target.route}") + payload = drop_null_values(payload) + payload = sort_openapi_arrays(payload) + + output_path = output_dir / target.filename + output_path.write_text(json.dumps(payload, indent=2, sort_keys=True) + "\n", encoding="utf-8") + written_paths.append(output_path) + + return written_paths + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument( + "-o", + "--output-dir", + type=Path, + default=Path("openapi"), + help="Directory where the OpenAPI JSON files will be written.", + ) + return parser.parse_args() + + +def main() -> int: + args = parse_args() + written_paths = generate_fastopenapi_specs(args.output_dir) + + for path in written_paths: + logger.debug(path) + + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/api/dev/generate_swagger_markdown_docs.py b/api/dev/generate_swagger_markdown_docs.py new file mode 100644 index 0000000000..e0028c63f6 --- /dev/null +++ b/api/dev/generate_swagger_markdown_docs.py @@ -0,0 +1,182 @@ +"""Generate OpenAPI JSON specs and split Markdown API docs. + +The Markdown step uses `swagger-markdown`, the same converter family as the +Swagger Markdown UI, so CI and local regeneration catch converter-incompatible +OpenAPI output early. +""" + +from __future__ import annotations + +import argparse +import logging +import subprocess +import sys +import tempfile +from pathlib import Path + +API_ROOT = Path(__file__).resolve().parents[1] +if str(API_ROOT) not in sys.path: + sys.path.insert(0, str(API_ROOT)) + +from dev.generate_fastopenapi_specs import FASTOPENAPI_SPEC_TARGETS, generate_fastopenapi_specs +from dev.generate_swagger_specs import SPEC_TARGETS, generate_specs + +logger = logging.getLogger(__name__) + +SWAGGER_MARKDOWN_PACKAGE = "swagger-markdown@3.0.0" +CONSOLE_SWAGGER_FILENAME = "console-swagger.json" +STALE_COMBINED_MARKDOWN_FILENAME = "api-reference.md" + + +def _convert_spec_to_markdown(spec_path: Path, markdown_path: Path) -> None: + markdown_path.parent.mkdir(parents=True, exist_ok=True) + with tempfile.TemporaryDirectory(prefix=f"{markdown_path.stem}-", dir=markdown_path.parent) as temp_dir: + temp_markdown_path = Path(temp_dir) / markdown_path.name + result = subprocess.run( + [ + "npx", + "--yes", + SWAGGER_MARKDOWN_PACKAGE, + "-i", + str(spec_path), + "-o", + str(temp_markdown_path), + ], + check=False, + capture_output=True, + text=True, + ) + if result.returncode != 0: + raise subprocess.CalledProcessError( + result.returncode, + result.args, + output=result.stdout, + stderr=result.stderr, + ) + if not temp_markdown_path.exists(): + converter_output = "\n".join(item for item in (result.stdout, result.stderr) if item).strip() + raise RuntimeError(f"swagger-markdown did not write {markdown_path}: {converter_output}") + + converted_markdown = temp_markdown_path.read_text(encoding="utf-8") + if not converted_markdown.strip(): + raise RuntimeError(f"swagger-markdown wrote an empty document for {markdown_path}") + + markdown_path.write_text(converted_markdown, encoding="utf-8") + + +def _demote_markdown_headings(markdown: str, *, levels: int = 1) -> str: + """Nest generated Markdown under another Markdown section.""" + + heading_prefix = "#" * levels + lines = [] + for line in markdown.splitlines(): + if line.startswith("#"): + lines.append(f"{heading_prefix}{line}") + else: + lines.append(line) + return "\n".join(lines).strip() + + +def _append_fastopenapi_markdown(console_markdown_path: Path, fastopenapi_markdown_path: Path) -> None: + """Append FastOpenAPI console docs to the existing console API Markdown.""" + + console_markdown = console_markdown_path.read_text(encoding="utf-8").rstrip() + fastopenapi_markdown = _demote_markdown_headings( + fastopenapi_markdown_path.read_text(encoding="utf-8"), + levels=2, + ) + console_markdown_path.write_text( + "\n\n".join( + [ + console_markdown, + "## FastOpenAPI Preview (OpenAPI 3.0)", + fastopenapi_markdown, + ] + ) + + "\n", + encoding="utf-8", + ) + + +def generate_markdown_docs( + swagger_dir: Path, + markdown_dir: Path, + *, + keep_swagger_json: bool = False, +) -> list[Path]: + """Generate intermediate specs, convert them to split Markdown API docs, and return Markdown paths.""" + + swagger_paths = generate_specs(swagger_dir) + fastopenapi_paths = generate_fastopenapi_specs(swagger_dir) + spec_paths = [*swagger_paths, *fastopenapi_paths] + swagger_paths_by_name = {path.name: path for path in swagger_paths} + fastopenapi_paths_by_name = {path.name: path for path in fastopenapi_paths} + + markdown_dir.mkdir(parents=True, exist_ok=True) + + written_paths: list[Path] = [] + try: + with tempfile.TemporaryDirectory(prefix="dify-api-docs-") as temp_dir: + temp_markdown_dir = Path(temp_dir) + + for target in SPEC_TARGETS: + swagger_path = swagger_paths_by_name[target.filename] + markdown_path = markdown_dir / f"{swagger_path.stem}.md" + _convert_spec_to_markdown(swagger_path, markdown_path) + written_paths.append(markdown_path) + + for target in FASTOPENAPI_SPEC_TARGETS: # type: ignore + fastopenapi_path = fastopenapi_paths_by_name[target.filename] + markdown_path = temp_markdown_dir / f"{fastopenapi_path.stem}.md" + _convert_spec_to_markdown(fastopenapi_path, markdown_path) + + console_markdown_path = markdown_dir / f"{Path(CONSOLE_SWAGGER_FILENAME).stem}.md" + _append_fastopenapi_markdown(console_markdown_path, markdown_path) + + (markdown_dir / STALE_COMBINED_MARKDOWN_FILENAME).unlink(missing_ok=True) + finally: + if not keep_swagger_json: + for path in spec_paths: + path.unlink(missing_ok=True) + + return written_paths + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument( + "--swagger-dir", + type=Path, + default=Path("openapi"), + help="Directory where intermediate JSON spec files will be written.", + ) + parser.add_argument( + "--markdown-dir", + type=Path, + default=Path("openapi/markdown"), + help="Directory where split Markdown API docs will be written.", + ) + parser.add_argument( + "--keep-swagger-json", + action="store_true", + help="Keep intermediate JSON spec files after Markdown generation.", + ) + return parser.parse_args() + + +def main() -> int: + args = parse_args() + written_paths = generate_markdown_docs( + args.swagger_dir, + args.markdown_dir, + keep_swagger_json=args.keep_swagger_json, + ) + + for path in written_paths: + logger.debug(path) + + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/api/dev/generate_swagger_specs.py b/api/dev/generate_swagger_specs.py index 7e9688bfb4..254310cd2a 100644 --- a/api/dev/generate_swagger_specs.py +++ b/api/dev/generate_swagger_specs.py @@ -9,15 +9,17 @@ which is unnecessary when the goal is only to serialize the Flask-RESTX from __future__ import annotations import argparse +import hashlib import json import logging import os import sys +from collections.abc import MutableMapping from dataclasses import dataclass from pathlib import Path +from typing import Protocol, TypeGuard from flask import Flask -from flask_restx.swagger import Swagger logger = logging.getLogger(__name__) @@ -30,19 +32,107 @@ if str(API_ROOT) not in sys.path: class SpecTarget: route: str filename: str + namespace: str + + +class RestxApi(Protocol): + models: MutableMapping[str, object] + + def model(self, name: str, model: dict[object, object]) -> object: ... SPEC_TARGETS: tuple[SpecTarget, ...] = ( - SpecTarget(route="/console/api/swagger.json", filename="console-swagger.json"), - SpecTarget(route="/api/swagger.json", filename="web-swagger.json"), - SpecTarget(route="/v1/swagger.json", filename="service-swagger.json"), + SpecTarget(route="/console/api/swagger.json", filename="console-swagger.json", namespace="console"), + SpecTarget(route="/api/swagger.json", filename="web-swagger.json", namespace="web"), + SpecTarget(route="/v1/swagger.json", filename="service-swagger.json", namespace="service"), ) -_ORIGINAL_REGISTER_MODEL = Swagger.register_model -_ORIGINAL_REGISTER_FIELD = Swagger.register_field + +def _is_inline_field_map(value: object) -> TypeGuard[dict[object, object]]: + """Return whether a nested field map is an anonymous inline mapping.""" + + from flask_restx.model import Model, OrderedModel + + return isinstance(value, dict) and not isinstance(value, (Model, OrderedModel)) -def _apply_runtime_defaults() -> None: +def _jsonable_schema_value(value: object) -> object: + """Return a deterministic JSON-serializable representation for schema fingerprints.""" + + if value is None or isinstance(value, str | int | float | bool): + return value + if isinstance(value, list | tuple): + return [_jsonable_schema_value(item) for item in value] + if isinstance(value, dict): + return {str(key): _jsonable_schema_value(item) for key, item in value.items()} + value_type = type(value) + return f"<{value_type.__module__}.{value_type.__qualname__}>" + + +def _field_signature(field: object) -> object: + """Build a stable signature for a Flask-RESTX field object.""" + + from flask_restx import fields + from flask_restx.model import instance + + field_instance = instance(field) + signature: dict[str, object] = { + "class": f"{field_instance.__class__.__module__}.{field_instance.__class__.__qualname__}" + } + + if isinstance(field_instance, fields.Nested): + nested = getattr(field_instance, "nested", None) + if _is_inline_field_map(nested): + signature["nested"] = _inline_model_signature(nested) + else: + signature["nested"] = getattr( + nested, + "name", + f"<{type(nested).__module__}.{type(nested).__qualname__}>", + ) + elif hasattr(field_instance, "container"): + signature["container"] = _field_signature(field_instance.container) + else: + schema = getattr(field_instance, "__schema__", None) + if isinstance(schema, dict): + signature["schema"] = _jsonable_schema_value(schema) + + for attr_name in ( + "attribute", + "default", + "description", + "example", + "max", + "min", + "nullable", + "readonly", + "required", + "title", + ): + if hasattr(field_instance, attr_name): + signature[attr_name] = _jsonable_schema_value(getattr(field_instance, attr_name)) + + return signature + + +def _inline_model_signature(nested_fields: dict[object, object]) -> object: + """Build a stable signature for an anonymous inline model.""" + + return [ + (str(field_name), _field_signature(field)) + for field_name, field in sorted(nested_fields.items(), key=lambda item: str(item[0])) + ] + + +def _inline_model_name(nested_fields: dict[object, object]) -> str: + """Return a stable Swagger model name for an anonymous inline field map.""" + + signature = json.dumps(_inline_model_signature(nested_fields), sort_keys=True, separators=(",", ":")) + digest = hashlib.sha1(signature.encode("utf-8")).hexdigest()[:12] + return f"_AnonymousInlineModel_{digest}" + + +def apply_runtime_defaults() -> None: """Force the small config surface required for Swagger generation.""" os.environ.setdefault("SECRET_KEY", "spec-export") @@ -58,69 +148,175 @@ def _apply_runtime_defaults() -> None: dify_config.SWAGGER_UI_ENABLED = os.environ["SWAGGER_UI_ENABLED"].lower() == "true" -def _patch_swagger_for_inline_nested_dicts() -> None: - """Teach Flask-RESTX Swagger generation to tolerate inline nested field maps. - - Some existing controllers use `fields.Nested({...})` with a raw field mapping - instead of a named `api.model(...)`. Flask-RESTX crashes on those anonymous - dicts during schema registration, so this helper upgrades them into temporary - named models at export time. - """ - - if getattr(Swagger, "_dify_inline_nested_dict_patch", False): - return - - def get_or_create_inline_model(self: Swagger, nested_fields: dict[object, object]) -> object: - anonymous_models = getattr(self, "_anonymous_inline_models", None) - if anonymous_models is None: - anonymous_models = {} - self._anonymous_inline_models = anonymous_models - - anonymous_name = anonymous_models.get(id(nested_fields)) - if anonymous_name is None: - anonymous_name = f"_AnonymousInlineModel{len(anonymous_models) + 1}" - anonymous_models[id(nested_fields)] = anonymous_name - self.api.model(anonymous_name, nested_fields) - - return self.api.models[anonymous_name] - - def register_model_with_inline_dict_support(self: Swagger, model: object) -> dict[str, str]: - if isinstance(model, dict): - model = get_or_create_inline_model(self, model) - - return _ORIGINAL_REGISTER_MODEL(self, model) - - def register_field_with_inline_dict_support(self: Swagger, field: object) -> None: - nested = getattr(field, "nested", None) - if isinstance(nested, dict): - field.model = get_or_create_inline_model(self, nested) # type: ignore - - _ORIGINAL_REGISTER_FIELD(self, field) - - Swagger.register_model = register_model_with_inline_dict_support - Swagger.register_field = register_field_with_inline_dict_support - Swagger._dify_inline_nested_dict_patch = True - - def create_spec_app() -> Flask: """Build a minimal Flask app that only mounts the Swagger-producing blueprints.""" - _apply_runtime_defaults() - _patch_swagger_for_inline_nested_dicts() + apply_runtime_defaults() + + from libs.flask_restx_compat import patch_swagger_for_inline_nested_dicts + + patch_swagger_for_inline_nested_dicts() app = Flask(__name__) from controllers.console import bp as console_bp + from controllers.console import console_ns from controllers.service_api import bp as service_api_bp + from controllers.service_api import service_api_ns from controllers.web import bp as web_bp + from controllers.web import web_ns app.register_blueprint(console_bp) app.register_blueprint(web_bp) app.register_blueprint(service_api_bp) + for namespace in (console_ns, web_ns, service_api_ns): + for api in namespace.apis: + _materialize_inline_model_definitions(api) + return app +def _registered_models(namespace: str) -> dict[str, object]: + """Return the Flask-RESTX models registered for a Swagger namespace.""" + + if namespace == "console": + from controllers.console import console_ns + + models = dict(console_ns.models) + for api in console_ns.apis: + models.update(api.models) + return models + if namespace == "web": + from controllers.web import web_ns + + models = dict(web_ns.models) + for api in web_ns.apis: + models.update(api.models) + return models + if namespace == "service": + from controllers.service_api import service_api_ns + + models = dict(service_api_ns.models) + for api in service_api_ns.apis: + models.update(api.models) + return models + + raise ValueError(f"unknown Swagger namespace: {namespace}") + + +def _materialize_inline_model_definitions(api: RestxApi) -> None: + """Convert inline `fields.Nested({...})` maps into named API models.""" + + from flask_restx import fields + from flask_restx.model import Model, OrderedModel, instance + + inline_models: dict[int, dict[object, object]] = {} + inline_model_names: dict[int, str] = {} + + def collect_field(field: object) -> None: + field_instance = instance(field) + if isinstance(field_instance, fields.Nested): + nested = getattr(field_instance, "nested", None) + if _is_inline_field_map(nested) and id(nested) not in inline_models: + inline_models[id(nested)] = nested + for nested_field in nested.values(): + collect_field(nested_field) + + container = getattr(field_instance, "container", None) + if container is not None: + collect_field(container) + + for model in list(api.models.values()): + if isinstance(model, (Model, OrderedModel)): + for field in model.values(): + collect_field(field) + + for nested_fields in sorted(inline_models.values(), key=_inline_model_name): + anonymous_name = _inline_model_name(nested_fields) + inline_model_names[id(nested_fields)] = anonymous_name + if anonymous_name not in api.models: + api.model(anonymous_name, nested_fields) + + def model_name_for(nested_fields: dict[object, object]) -> str: + anonymous_name = inline_model_names.get(id(nested_fields)) + if anonymous_name is None: + anonymous_name = _inline_model_name(nested_fields) + inline_model_names[id(nested_fields)] = anonymous_name + if anonymous_name not in api.models: + api.model(anonymous_name, nested_fields) + return anonymous_name + + def materialize_field(field: object) -> None: + field_instance = instance(field) + if isinstance(field_instance, fields.Nested): + nested = getattr(field_instance, "nested", None) + if _is_inline_field_map(nested): + field_instance.model = api.models[model_name_for(nested)] # type: ignore[attr-defined] + + container = getattr(field_instance, "container", None) + if container is not None: + materialize_field(container) + + index = 0 + while index < len(api.models): + model = list(api.models.values())[index] + index += 1 + if isinstance(model, (Model, OrderedModel)): + for field in model.values(): + materialize_field(field) + + +def drop_null_values(value: object) -> object: + """Remove JSON null values that make the Markdown converter crash.""" + + if isinstance(value, dict): + return {key: drop_null_values(item) for key, item in value.items() if item is not None} + if isinstance(value, list): + return [drop_null_values(item) for item in value] + return value + + +def sort_openapi_arrays(value: object, *, parent_key: str | None = None) -> object: + """Sort order-insensitive Swagger arrays so generated Markdown is stable.""" + + if isinstance(value, dict): + return {key: sort_openapi_arrays(item, parent_key=key) for key, item in value.items()} + if not isinstance(value, list): + return value + + sorted_items = [sort_openapi_arrays(item, parent_key=parent_key) for item in value] + if parent_key == "parameters": + return sorted( + sorted_items, + key=lambda item: ( + item.get("in", "") if isinstance(item, dict) else "", + item.get("name", "") if isinstance(item, dict) else "", + json.dumps(item, sort_keys=True, default=str), + ), + ) + if parent_key in {"enum", "required", "schemes", "tags"}: + string_items = [item for item in sorted_items if isinstance(item, str)] + if len(string_items) == len(sorted_items): + return sorted(string_items) + return sorted_items + + +def _merge_registered_definitions(payload: dict[str, object], namespace: str) -> dict[str, object]: + """Include registered but route-indirect models in the exported Swagger definitions.""" + + definitions = payload.setdefault("definitions", {}) + if not isinstance(definitions, dict): + raise RuntimeError("unexpected Swagger definitions payload") + + for name, model in _registered_models(namespace).items(): + schema = getattr(model, "__schema__", None) + if isinstance(schema, dict): + definitions.setdefault(name, schema) + + return payload + + def generate_specs(output_dir: Path) -> list[Path]: """Write all Swagger specs to `output_dir` and return the written paths.""" @@ -138,6 +334,9 @@ def generate_specs(output_dir: Path) -> list[Path]: payload = response.get_json() if not isinstance(payload, dict): raise RuntimeError(f"unexpected response payload for {target.route}") + payload = _merge_registered_definitions(payload, target.namespace) + payload = drop_null_values(payload) + payload = sort_openapi_arrays(payload) output_path = output_dir / target.filename output_path.write_text(json.dumps(payload, indent=2, sort_keys=True) + "\n", encoding="utf-8") diff --git a/api/events/event_handlers/delete_tool_parameters_cache_when_sync_draft_workflow.py b/api/events/event_handlers/delete_tool_parameters_cache_when_sync_draft_workflow.py index ba9758175f..f1196445ed 100644 --- a/api/events/event_handlers/delete_tool_parameters_cache_when_sync_draft_workflow.py +++ b/api/events/event_handlers/delete_tool_parameters_cache_when_sync_draft_workflow.py @@ -3,6 +3,7 @@ import logging from core.tools.entities.tool_entities import ToolProviderType from core.tools.tool_manager import ToolManager from core.tools.utils.configuration import ToolParameterConfigurationManager +from core.workflow.human_input_adapter import adapt_node_config_for_graph from events.app_event import app_draft_workflow_was_synced from graphon.nodes import BuiltinNodeTypes from graphon.nodes.tool.entities import ToolEntity @@ -19,7 +20,8 @@ def handle(sender, **kwargs): for node_data in synced_draft_workflow.graph_dict.get("nodes", []): if node_data.get("data", {}).get("type") == BuiltinNodeTypes.TOOL: try: - tool_entity = ToolEntity.model_validate(node_data["data"]) + adapted_node_data = adapt_node_config_for_graph(node_data) + tool_entity = ToolEntity.model_validate(adapted_node_data["data"]) provider_type = ToolProviderType(tool_entity.provider_type.value) tool_runtime = ToolManager.get_tool_runtime( provider_type=provider_type, diff --git a/api/events/event_handlers/update_provider_when_message_created.py b/api/events/event_handlers/update_provider_when_message_created.py index 1d615f0f87..8dec5876a9 100644 --- a/api/events/event_handlers/update_provider_when_message_created.py +++ b/api/events/event_handlers/update_provider_when_message_created.py @@ -137,17 +137,13 @@ def handle(sender: Message, **kwargs): if used_quota is not None: match provider_configuration.system_configuration.current_quota_type: case ProviderQuotaType.TRIAL: - from services.credit_pool_service import CreditPoolService - - CreditPoolService.check_and_deduct_credits( + _deduct_credit_pool_quota_capped( tenant_id=tenant_id, credits_required=used_quota, pool_type="trial", ) case ProviderQuotaType.PAID: - from services.credit_pool_service import CreditPoolService - - CreditPoolService.check_and_deduct_credits( + _deduct_credit_pool_quota_capped( tenant_id=tenant_id, credits_required=used_quota, pool_type="paid", @@ -200,6 +196,26 @@ def handle(sender: Message, **kwargs): raise +def _deduct_credit_pool_quota_capped(*, tenant_id: str, credits_required: int, pool_type: str) -> None: + """Apply post-generation credit accounting without failing message persistence on quota exhaustion.""" + from services.credit_pool_service import CreditPoolService + + deducted_credits = CreditPoolService.deduct_credits_capped( + tenant_id=tenant_id, + credits_required=credits_required, + pool_type=pool_type, + ) + if deducted_credits < credits_required: + logger.warning( + "Credit pool exhausted during message-created accounting, " + "tenant_id=%s, pool_type=%s, credits_required=%s, credits_deducted=%s", + tenant_id, + pool_type, + credits_required, + deducted_credits, + ) + + def _calculate_quota_usage( *, message: Message, system_configuration: SystemConfiguration, model_name: str ) -> int | None: diff --git a/api/extensions/ext_session_factory.py b/api/extensions/ext_session_factory.py index 0eb43d66f4..e19ccd11e5 100644 --- a/api/extensions/ext_session_factory.py +++ b/api/extensions/ext_session_factory.py @@ -1,7 +1,9 @@ +from flask import Flask + from core.db.session_factory import configure_session_factory from extensions.ext_database import db -def init_app(app): +def init_app(app: Flask): with app.app_context(): configure_session_factory(db.engine) diff --git a/api/factories/file_factory/builders.py b/api/factories/file_factory/builders.py index 1d2ad4d445..4fb976f0e7 100644 --- a/api/factories/file_factory/builders.py +++ b/api/factories/file_factory/builders.py @@ -298,7 +298,7 @@ def _build_from_datasource_file( raise ValueError(f"DatasourceFile {mapping.get('datasource_file_id')} not found") extension = "." + datasource_file.key.split(".")[-1] if "." in datasource_file.key else ".bin" - detected_file_type = standardize_file_type(extension="." + extension, mime_type=datasource_file.mime_type) + detected_file_type = standardize_file_type(extension=extension, mime_type=datasource_file.mime_type) file_type = _resolve_file_type( detected_file_type=detected_file_type, specified_type=mapping.get("type"), diff --git a/api/factories/file_factory/message_files.py b/api/factories/file_factory/message_files.py index 4b3d514238..27441bdcc1 100644 --- a/api/factories/file_factory/message_files.py +++ b/api/factories/file_factory/message_files.py @@ -1,11 +1,18 @@ -"""Adapters from persisted message files to graph-layer file values.""" +"""Adapters from persisted message files to graph-layer file values. + +Replay paths only: files in conversation history were validated at upload time, +so these helpers deliberately do not accept (or forward) a ``FileUploadConfig`` — +re-validation here would break replays whenever workflow ``file_upload`` config +drifts between rounds. Mirrors ``build_file_from_stored_mapping`` in +``models/utils/file_input_compat.py``. +""" from __future__ import annotations from collections.abc import Sequence from core.app.file_access import FileAccessControllerProtocol -from graphon.file import File, FileBelongsTo, FileTransferMethod, FileUploadConfig +from graphon.file import File, FileBelongsTo, FileTransferMethod from models import MessageFile from .builders import build_from_mapping @@ -15,14 +22,12 @@ def build_from_message_files( *, message_files: Sequence[MessageFile], tenant_id: str, - config: FileUploadConfig | None = None, access_controller: FileAccessControllerProtocol, ) -> Sequence[File]: return [ build_from_message_file( message_file=message_file, tenant_id=tenant_id, - config=config, access_controller=access_controller, ) for message_file in message_files @@ -34,7 +39,6 @@ def build_from_message_file( *, message_file: MessageFile, tenant_id: str, - config: FileUploadConfig | None, access_controller: FileAccessControllerProtocol, ) -> File: mapping = { @@ -54,6 +58,5 @@ def build_from_message_file( return build_from_mapping( mapping=mapping, tenant_id=tenant_id, - config=config, access_controller=access_controller, ) diff --git a/api/factories/file_factory/remote.py b/api/factories/file_factory/remote.py index e5a7186007..9b8f94b1f3 100644 --- a/api/factories/file_factory/remote.py +++ b/api/factories/file_factory/remote.py @@ -19,8 +19,13 @@ from werkzeug.http import parse_options_header from core.helper import ssrf_proxy -def extract_filename(url_path: str, content_disposition: str | None) -> str | None: - """Extract a safe filename from Content-Disposition or the request URL path.""" +def extract_filename(url_or_path: str, content_disposition: str | None) -> str | None: + """Extract a safe filename from Content-Disposition or the request URL path. + + Handles full URLs, paths with query strings, hash fragments, and percent-encoded segments. + Query strings and hash fragments are stripped from the URL before extracting the basename. + Percent-encoded characters in the path are decoded safely. + """ filename: str | None = None if content_disposition: filename_star_match = re.search(r"filename\*=([^;]+)", content_disposition) @@ -47,8 +52,13 @@ def extract_filename(url_path: str, content_disposition: str | None) -> str | No filename = urllib.parse.unquote(raw) if not filename: - candidate = os.path.basename(url_path) - filename = urllib.parse.unquote(candidate) if candidate else None + # Parse the URL to extract just the path, stripping query strings and fragments + # This handles both full URLs and bare paths + parsed = urllib.parse.urlparse(url_or_path) + path = parsed.path + candidate = os.path.basename(path) + # Decode percent-encoded characters, with safe fallback for malformed input + filename = urllib.parse.unquote(candidate, errors="replace") if candidate else None if filename: filename = os.path.basename(filename) diff --git a/api/factories/file_factory/validation.py b/api/factories/file_factory/validation.py index 4c4f6150e4..8c4e7ef1d4 100644 --- a/api/factories/file_factory/validation.py +++ b/api/factories/file_factory/validation.py @@ -2,9 +2,25 @@ from __future__ import annotations +from collections.abc import Iterable + from graphon.file import FileTransferMethod, FileType, FileUploadConfig +def _normalize_extension(extension: str) -> str: + s = extension.strip().lower() + if not s: + return "" + return s if s.startswith(".") else "." + s + + +def _extension_matches(extension: str, whitelist: Iterable[str]) -> bool: + normalized = _normalize_extension(extension) + if not normalized: + return False + return normalized in {_normalize_extension(e) for e in whitelist} + + def is_file_valid_with_config( *, input_file_type: str, @@ -12,22 +28,31 @@ def is_file_valid_with_config( file_transfer_method: FileTransferMethod, config: FileUploadConfig, ) -> bool: - # FIXME(QIN2DIM): Always allow tool files (files generated by the assistant/model) - # These are internally generated and should bypass user upload restrictions + """Return whether the file is allowed by the upload config. + + ``allowed_file_types`` lists the buckets a file may fall into; ``CUSTOM`` is + a fallback bucket gated by ``allowed_file_extensions`` (case- and + dot-insensitive). Tool-generated files bypass user-facing config. + """ if file_transfer_method == FileTransferMethod.TOOL_FILE: return True - if ( - config.allowed_file_types - and input_file_type not in config.allowed_file_types - and input_file_type != FileType.CUSTOM - ): + allowed_types = config.allowed_file_types or [] + custom_allowed = FileType.CUSTOM in allowed_types + type_allowed = not allowed_types or input_file_type in allowed_types + + if not type_allowed and not custom_allowed: return False + # When the file is in the CUSTOM bucket, the extension whitelist is authoritative. + # An explicitly set whitelist (including the empty list) is enforced; empty == deny — + # the UI never submits an empty list, so this guards against DSL/API paths that + # bypass the UI from accidentally widening the allowlist. + in_custom_bucket = input_file_type == FileType.CUSTOM or not type_allowed if ( - input_file_type == FileType.CUSTOM + in_custom_bucket and config.allowed_file_extensions is not None - and file_extension not in config.allowed_file_extensions + and not _extension_matches(file_extension, config.allowed_file_extensions) ): return False diff --git a/api/fields/workflow_run_fields.py b/api/fields/workflow_run_fields.py index 8c659086ed..a852f21bb2 100644 --- a/api/fields/workflow_run_fields.py +++ b/api/fields/workflow_run_fields.py @@ -1,14 +1,21 @@ +"""Workflow run response schemas for console APIs. + +Most workflow-run endpoints should document and serialize responses with the +Pydantic models in this module. The remaining Flask-RESTX field dictionaries are +kept only for workflow app-log endpoints that still build legacy log models. +""" + from __future__ import annotations from datetime import datetime from typing import Any from flask_restx import Namespace, fields -from pydantic import Field, field_validator +from pydantic import AliasChoices, Field, field_validator from fields.base import ResponseModel -from fields.end_user_fields import SimpleEndUser, simple_end_user_fields -from fields.member_fields import SimpleAccount, simple_account_fields +from fields.end_user_fields import SimpleEndUser +from fields.member_fields import SimpleAccount from libs.helper import TimestampField workflow_run_for_log_fields = { @@ -43,119 +50,6 @@ def build_workflow_run_for_archived_log_model(api_or_ns: Namespace): return api_or_ns.model("WorkflowRunForArchivedLog", workflow_run_for_archived_log_fields) -workflow_run_for_list_fields = { - "id": fields.String, - "version": fields.String, - "status": fields.String, - "elapsed_time": fields.Float, - "total_tokens": fields.Integer, - "total_steps": fields.Integer, - "created_by_account": fields.Nested(simple_account_fields, attribute="created_by_account", allow_null=True), - "created_at": TimestampField, - "finished_at": TimestampField, - "exceptions_count": fields.Integer, - "retry_index": fields.Integer, -} - -advanced_chat_workflow_run_for_list_fields = { - "id": fields.String, - "conversation_id": fields.String, - "message_id": fields.String, - "version": fields.String, - "status": fields.String, - "elapsed_time": fields.Float, - "total_tokens": fields.Integer, - "total_steps": fields.Integer, - "created_by_account": fields.Nested(simple_account_fields, attribute="created_by_account", allow_null=True), - "created_at": TimestampField, - "finished_at": TimestampField, - "exceptions_count": fields.Integer, - "retry_index": fields.Integer, -} - -advanced_chat_workflow_run_pagination_fields = { - "limit": fields.Integer(attribute="limit"), - "has_more": fields.Boolean(attribute="has_more"), - "data": fields.List(fields.Nested(advanced_chat_workflow_run_for_list_fields), attribute="data"), -} - -workflow_run_pagination_fields = { - "limit": fields.Integer(attribute="limit"), - "has_more": fields.Boolean(attribute="has_more"), - "data": fields.List(fields.Nested(workflow_run_for_list_fields), attribute="data"), -} - -workflow_run_count_fields = { - "total": fields.Integer, - "running": fields.Integer, - "succeeded": fields.Integer, - "failed": fields.Integer, - "stopped": fields.Integer, - "partial_succeeded": fields.Integer(attribute="partial-succeeded"), -} - -workflow_run_detail_fields = { - "id": fields.String, - "version": fields.String, - "graph": fields.Raw(attribute="graph_dict"), - "inputs": fields.Raw(attribute="inputs_dict"), - "status": fields.String, - "outputs": fields.Raw(attribute="outputs_dict"), - "error": fields.String, - "elapsed_time": fields.Float, - "total_tokens": fields.Integer, - "total_steps": fields.Integer, - "created_by_role": fields.String, - "created_by_account": fields.Nested(simple_account_fields, attribute="created_by_account", allow_null=True), - "created_by_end_user": fields.Nested(simple_end_user_fields, attribute="created_by_end_user", allow_null=True), - "created_at": TimestampField, - "finished_at": TimestampField, - "exceptions_count": fields.Integer, -} - -retry_event_field = { - "elapsed_time": fields.Float, - "status": fields.String, - "inputs": fields.Raw(attribute="inputs"), - "process_data": fields.Raw(attribute="process_data"), - "outputs": fields.Raw(attribute="outputs"), - "metadata": fields.Raw(attribute="metadata"), - "llm_usage": fields.Raw(attribute="llm_usage"), - "error": fields.String, - "retry_index": fields.Integer, -} - - -workflow_run_node_execution_fields = { - "id": fields.String, - "index": fields.Integer, - "predecessor_node_id": fields.String, - "node_id": fields.String, - "node_type": fields.String, - "title": fields.String, - "inputs": fields.Raw(attribute="inputs_dict"), - "process_data": fields.Raw(attribute="process_data_dict"), - "outputs": fields.Raw(attribute="outputs_dict"), - "status": fields.String, - "error": fields.String, - "elapsed_time": fields.Float, - "execution_metadata": fields.Raw(attribute="execution_metadata_dict"), - "extras": fields.Raw, - "created_at": TimestampField, - "created_by_role": fields.String, - "created_by_account": fields.Nested(simple_account_fields, attribute="created_by_account", allow_null=True), - "created_by_end_user": fields.Nested(simple_end_user_fields, attribute="created_by_end_user", allow_null=True), - "finished_at": TimestampField, - "inputs_truncated": fields.Boolean, - "outputs_truncated": fields.Boolean, - "process_data_truncated": fields.Boolean, -} - -workflow_run_node_execution_list_fields = { - "data": fields.List(fields.Nested(workflow_run_node_execution_fields)), -} - - def _to_timestamp(value: datetime | int | None) -> int | None: if isinstance(value, datetime): return int(value.timestamp()) @@ -252,7 +146,10 @@ class WorkflowRunCountResponse(ResponseModel): succeeded: int failed: int stopped: int - partial_succeeded: int = Field(validation_alias="partial-succeeded") + partial_succeeded: int = Field( + alias="partial_succeeded", + validation_alias=AliasChoices("partial_succeeded", "partial-succeeded"), + ) class WorkflowRunDetailResponse(ResponseModel): diff --git a/api/libs/external_api.py b/api/libs/external_api.py index f907d17750..64eb99a42b 100644 --- a/api/libs/external_api.py +++ b/api/libs/external_api.py @@ -9,6 +9,7 @@ from werkzeug.http import HTTP_STATUS_CODES from configs import dify_config from core.errors.error import AppInvokeQuotaExceededError +from libs.flask_restx_compat import patch_swagger_for_inline_nested_dicts from libs.token import build_force_logout_cookie_headers @@ -120,6 +121,7 @@ class ExternalApi(Api): } def __init__(self, app: Blueprint | Flask, *args, **kwargs): + patch_swagger_for_inline_nested_dicts() kwargs.setdefault("authorizations", self._authorizations) kwargs.setdefault("security", "Bearer") kwargs["add_specs"] = dify_config.SWAGGER_UI_ENABLED diff --git a/api/libs/flask_restx_compat.py b/api/libs/flask_restx_compat.py new file mode 100644 index 0000000000..34e0d586a0 --- /dev/null +++ b/api/libs/flask_restx_compat.py @@ -0,0 +1,149 @@ +"""Compatibility helpers for Dify's Flask-RESTX Swagger integration. + +These helpers are temporary bridges for legacy Flask-RESTX field contracts +while controllers migrate their request and response documentation to Pydantic +models. Keep the behavior centralized so live Swagger endpoints and offline +spec export fail or succeed in the same way. +""" + +import hashlib +import json +from typing import TypeGuard + +from flask import current_app +from flask_restx import fields +from flask_restx.model import Model, OrderedModel, instance +from flask_restx.swagger import Swagger + + +def _is_inline_field_map(value: object) -> TypeGuard[dict[object, object]]: + """Return whether a nested field map is an anonymous inline mapping.""" + + return isinstance(value, dict) and not isinstance(value, (Model, OrderedModel)) + + +def _jsonable_schema_value(value: object) -> object: + """Return a deterministic JSON-serializable representation for schema fingerprints.""" + + if value is None or isinstance(value, str | int | float | bool): + return value + if isinstance(value, list | tuple): + return [_jsonable_schema_value(item) for item in value] + if isinstance(value, dict): + return {str(key): _jsonable_schema_value(item) for key, item in value.items()} + value_type = type(value) + return f"<{value_type.__module__}.{value_type.__qualname__}>" + + +def _field_signature(field: object) -> object: + """Build a stable signature for a Flask-RESTX field object.""" + + field_instance = instance(field) + signature: dict[str, object] = { + "class": f"{field_instance.__class__.__module__}.{field_instance.__class__.__qualname__}" + } + + if isinstance(field_instance, fields.Nested): + nested = getattr(field_instance, "nested", None) + if _is_inline_field_map(nested): + signature["nested"] = _inline_model_signature(nested) + else: + signature["nested"] = getattr( + nested, + "name", + f"<{type(nested).__module__}.{type(nested).__qualname__}>", + ) + elif hasattr(field_instance, "container"): + signature["container"] = _field_signature(field_instance.container) + else: + schema = getattr(field_instance, "__schema__", None) + if isinstance(schema, dict): + signature["schema"] = _jsonable_schema_value(schema) + + for attr_name in ( + "attribute", + "default", + "description", + "example", + "max", + "max_items", + "min", + "min_items", + "nullable", + "readonly", + "required", + "title", + "unique", + ): + if hasattr(field_instance, attr_name): + signature[attr_name] = _jsonable_schema_value(getattr(field_instance, attr_name)) + + return signature + + +def _inline_model_signature(nested_fields: dict[object, object]) -> object: + """Build a stable signature for an anonymous inline model.""" + + return [ + (str(field_name), _field_signature(field)) + for field_name, field in sorted(nested_fields.items(), key=lambda item: str(item[0])) + ] + + +def _inline_model_name(nested_fields: dict[object, object]) -> str: + """Return a stable Swagger model name for an anonymous inline field map.""" + + signature = json.dumps(_inline_model_signature(nested_fields), sort_keys=True, separators=(",", ":")) + digest = hashlib.sha1(signature.encode("utf-8")).hexdigest()[:12] + return f"_AnonymousInlineModel_{digest}" + + +def patch_swagger_for_inline_nested_dicts() -> None: + """Allow Swagger generation to handle legacy inline Flask-RESTX field dicts. + + Some existing controllers use raw field mappings in `fields.Nested({...})` + or directly in `@namespace.response(...)`. Runtime marshalling accepts that, + but Flask-RESTX Swagger registration expects a named model. Convert those + anonymous mappings into temporary named models during docs generation. + """ + + if getattr(Swagger, "_dify_inline_nested_dict_patch", False): + return + + original_register_model = Swagger.register_model + original_register_field = Swagger.register_field + original_as_dict = Swagger.as_dict + + def get_or_create_inline_model(self: Swagger, nested_fields: dict[object, object]) -> object: + anonymous_name = _inline_model_name(nested_fields) + if anonymous_name not in self.api.models: + self.api.model(anonymous_name, nested_fields) + + return self.api.models[anonymous_name] + + def register_model_with_inline_dict_support(self: Swagger, model: object) -> dict[str, str]: + if _is_inline_field_map(model): + model = get_or_create_inline_model(self, model) + + return original_register_model(self, model) + + def register_field_with_inline_dict_support(self: Swagger, field: object) -> None: + nested = getattr(field, "nested", None) + if _is_inline_field_map(nested): + field.model = get_or_create_inline_model(self, nested) # type: ignore[attr-defined] + + original_register_field(self, field) + + def as_dict_with_inline_dict_support(self: Swagger): + # Temporary set RESTX_INCLUDE_ALL_MODELS = false to prevent "length changed while iterating" error + include_all_models = current_app.config.get("RESTX_INCLUDE_ALL_MODELS", False) + current_app.config["RESTX_INCLUDE_ALL_MODELS"] = False + try: + return original_as_dict(self) + finally: + current_app.config["RESTX_INCLUDE_ALL_MODELS"] = include_all_models + + Swagger.register_model = register_model_with_inline_dict_support + Swagger.register_field = register_field_with_inline_dict_support + Swagger.as_dict = as_dict_with_inline_dict_support + Swagger._dify_inline_nested_dict_patch = True diff --git a/api/libs/typing.py b/api/libs/typing.py deleted file mode 100644 index f84e9911e0..0000000000 --- a/api/libs/typing.py +++ /dev/null @@ -1,9 +0,0 @@ -from typing import TypeGuard - - -def is_str_dict(v: object) -> TypeGuard[dict[str, object]]: - return isinstance(v, dict) - - -def is_str(v: object) -> TypeGuard[str]: - return isinstance(v, str) diff --git a/api/migrations/versions/2026_04_29_1200-a4f2d8c9b731_add_recommended_app_categories.py b/api/migrations/versions/2026_04_29_1200-a4f2d8c9b731_add_recommended_app_categories.py new file mode 100644 index 0000000000..eee58b6310 --- /dev/null +++ b/api/migrations/versions/2026_04_29_1200-a4f2d8c9b731_add_recommended_app_categories.py @@ -0,0 +1,26 @@ +"""add recommended app categories + +Revision ID: a4f2d8c9b731 +Revises: 227822d22895 +Create Date: 2026-04-29 12:00:00.000000 + +""" + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "a4f2d8c9b731" +down_revision = "227822d22895" +branch_labels = None +depends_on = None + + +def upgrade(): + with op.batch_alter_table("recommended_apps", schema=None) as batch_op: + batch_op.add_column(sa.Column("categories", sa.JSON(), nullable=True)) + + +def downgrade(): + with op.batch_alter_table("recommended_apps", schema=None) as batch_op: + batch_op.drop_column("categories") diff --git a/api/models/comment.py b/api/models/comment.py index 5d4a08e783..6d151fe13d 100644 --- a/api/models/comment.py +++ b/api/models/comment.py @@ -1,19 +1,22 @@ """Workflow comment models.""" +from __future__ import annotations + from datetime import datetime -from typing import Optional import sqlalchemy as sa from sqlalchemy import Index, func from sqlalchemy.orm import Mapped, mapped_column, relationship +from models.base import TypeBase + from .account import Account -from .base import Base, gen_uuidv7_string +from .base import gen_uuidv7_string from .engine import db from .types import StringUUID -class WorkflowComment(Base): +class WorkflowComment(TypeBase): """Workflow comment model for canvas commenting functionality. Comments are associated with apps rather than specific workflow versions, @@ -42,27 +45,33 @@ class WorkflowComment(Base): Index("workflow_comments_created_at_idx", "created_at"), ) - id: Mapped[str] = mapped_column(StringUUID, default=gen_uuidv7_string) + id: Mapped[str] = mapped_column(StringUUID, default_factory=gen_uuidv7_string, init=False) tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=False) app_id: Mapped[str] = mapped_column(StringUUID, nullable=False) position_x: Mapped[float] = mapped_column(sa.Float) position_y: Mapped[float] = mapped_column(sa.Float) content: Mapped[str] = mapped_column(sa.Text, nullable=False) created_by: Mapped[str] = mapped_column(StringUUID, nullable=False) - created_at: Mapped[datetime] = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) + created_at: Mapped[datetime] = mapped_column( + sa.DateTime, nullable=False, server_default=func.current_timestamp(), init=False + ) updated_at: Mapped[datetime] = mapped_column( - sa.DateTime, nullable=False, server_default=func.current_timestamp(), onupdate=func.current_timestamp() + sa.DateTime, + nullable=False, + server_default=func.current_timestamp(), + onupdate=func.current_timestamp(), + init=False, ) - resolved: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("false")) - resolved_at: Mapped[datetime | None] = mapped_column(sa.DateTime) - resolved_by: Mapped[str | None] = mapped_column(StringUUID) + resolved_at: Mapped[datetime | None] = mapped_column(sa.DateTime, default=None) + resolved_by: Mapped[str | None] = mapped_column(StringUUID, default=None) + resolved: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, server_default=sa.text("false"), default=False) # Relationships - replies: Mapped[list["WorkflowCommentReply"]] = relationship( - "WorkflowCommentReply", back_populates="comment", cascade="all, delete-orphan" + replies: Mapped[list[WorkflowCommentReply]] = relationship( + lambda: WorkflowCommentReply, back_populates="comment", cascade="all, delete-orphan", init=False ) - mentions: Mapped[list["WorkflowCommentMention"]] = relationship( - "WorkflowCommentMention", back_populates="comment", cascade="all, delete-orphan" + mentions: Mapped[list[WorkflowCommentMention]] = relationship( + lambda: WorkflowCommentMention, back_populates="comment", cascade="all, delete-orphan", init=False ) @property @@ -131,7 +140,7 @@ class WorkflowComment(Base): return participants -class WorkflowCommentReply(Base): +class WorkflowCommentReply(TypeBase): """Workflow comment reply model. Attributes: @@ -149,18 +158,24 @@ class WorkflowCommentReply(Base): Index("comment_replies_created_at_idx", "created_at"), ) - id: Mapped[str] = mapped_column(StringUUID, default=gen_uuidv7_string) + id: Mapped[str] = mapped_column(StringUUID, default_factory=gen_uuidv7_string, init=False) comment_id: Mapped[str] = mapped_column( StringUUID, sa.ForeignKey("workflow_comments.id", ondelete="CASCADE"), nullable=False ) content: Mapped[str] = mapped_column(sa.Text, nullable=False) created_by: Mapped[str] = mapped_column(StringUUID, nullable=False) - created_at: Mapped[datetime] = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) + created_at: Mapped[datetime] = mapped_column( + sa.DateTime, nullable=False, server_default=func.current_timestamp(), init=False + ) updated_at: Mapped[datetime] = mapped_column( - sa.DateTime, nullable=False, server_default=func.current_timestamp(), onupdate=func.current_timestamp() + sa.DateTime, + nullable=False, + server_default=func.current_timestamp(), + onupdate=func.current_timestamp(), + init=False, ) # Relationships - comment: Mapped["WorkflowComment"] = relationship("WorkflowComment", back_populates="replies") + comment: Mapped[WorkflowComment] = relationship(lambda: WorkflowComment, back_populates="replies", init=False) @property def created_by_account(self): @@ -174,7 +189,7 @@ class WorkflowCommentReply(Base): self._created_by_account_cache = account -class WorkflowCommentMention(Base): +class WorkflowCommentMention(TypeBase): """Workflow comment mention model. Mentions are only for internal accounts since end users @@ -194,18 +209,18 @@ class WorkflowCommentMention(Base): Index("comment_mentions_user_idx", "mentioned_user_id"), ) - id: Mapped[str] = mapped_column(StringUUID, default=gen_uuidv7_string) + id: Mapped[str] = mapped_column(StringUUID, default_factory=gen_uuidv7_string, init=False) comment_id: Mapped[str] = mapped_column( StringUUID, sa.ForeignKey("workflow_comments.id", ondelete="CASCADE"), nullable=False ) - reply_id: Mapped[str | None] = mapped_column( - StringUUID, sa.ForeignKey("workflow_comment_replies.id", ondelete="CASCADE"), nullable=True - ) mentioned_user_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + reply_id: Mapped[str | None] = mapped_column( + StringUUID, sa.ForeignKey("workflow_comment_replies.id", ondelete="CASCADE"), nullable=True, default=None + ) # Relationships - comment: Mapped["WorkflowComment"] = relationship("WorkflowComment", back_populates="mentions") - reply: Mapped[Optional["WorkflowCommentReply"]] = relationship("WorkflowCommentReply") + comment: Mapped[WorkflowComment] = relationship(lambda: WorkflowComment, back_populates="mentions", init=False) + reply: Mapped[WorkflowCommentReply | None] = relationship(lambda: WorkflowCommentReply, init=False) @property def mentioned_user_account(self): diff --git a/api/models/dataset.py b/api/models/dataset.py index a00e9f7640..f823e0aa10 100644 --- a/api/models/dataset.py +++ b/api/models/dataset.py @@ -11,7 +11,7 @@ import time from collections.abc import Sequence from datetime import datetime from json import JSONDecodeError -from typing import Any, TypedDict, cast +from typing import Any, ClassVar, TypedDict, cast from uuid import uuid4 import sqlalchemy as sa @@ -24,7 +24,7 @@ from core.rag.index_processor.constant.built_in_field import BuiltInField, Metad from core.rag.index_processor.constant.index_type import IndexStructureType, IndexTechniqueType from core.rag.index_processor.constant.query_type import QueryType from core.rag.retrieval.retrieval_methods import RetrievalMethod -from core.tools.signature import sign_upload_file +from core.tools.signature import sign_upload_file_preview_url from extensions.ext_storage import storage from libs.uuid_utils import uuidv7 @@ -441,23 +441,27 @@ class Dataset(Base): return f"{dify_config.VECTOR_INDEX_NAME_PREFIX}_{normalized_dataset_id}_Node" -class DatasetProcessRule(Base): # bug +class DatasetProcessRule(TypeBase): __tablename__ = "dataset_process_rules" __table_args__ = ( sa.PrimaryKeyConstraint("id", name="dataset_process_rule_pkey"), sa.Index("dataset_process_rule_dataset_id_idx", "dataset_id"), ) - id = mapped_column(StringUUID, nullable=False, default=lambda: str(uuid4())) - dataset_id = mapped_column(StringUUID, nullable=False) - mode = mapped_column(EnumText(ProcessRuleMode, length=255), nullable=False, server_default=sa.text("'automatic'")) - rules = mapped_column(LongText, nullable=True) - created_by = mapped_column(StringUUID, nullable=False) - created_at: Mapped[datetime] = mapped_column(DateTime, nullable=False, server_default=func.current_timestamp()) + id: Mapped[str] = mapped_column(StringUUID, nullable=False, default_factory=lambda: str(uuid4()), init=False) + dataset_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + mode: Mapped[ProcessRuleMode] = mapped_column( + EnumText(ProcessRuleMode, length=255), nullable=False, server_default=sa.text("'automatic'") + ) + rules: Mapped[str | None] = mapped_column(LongText, nullable=True) + created_by: Mapped[str] = mapped_column(StringUUID, nullable=False) + created_at: Mapped[datetime] = mapped_column( + DateTime, nullable=False, server_default=func.current_timestamp(), init=False + ) MODES = ["automatic", "custom", "hierarchical"] PRE_PROCESSING_RULES = ["remove_stopwords", "remove_extra_spaces", "remove_urls_emails"] - AUTOMATIC_RULES: AutomaticRulesConfig = { + AUTOMATIC_RULES: ClassVar[AutomaticRulesConfig] = { "pre_processing_rules": [ {"id": "remove_extra_spaces", "enabled": True}, {"id": "remove_urls_emails", "enabled": False}, @@ -1020,7 +1024,7 @@ class DocumentSegment(Base): encoded_sign = base64.urlsafe_b64encode(sign).decode() params = f"timestamp={timestamp}&nonce={nonce}&sign={encoded_sign}" - reference_url = dify_config.CONSOLE_API_URL or "" + reference_url = dify_config.FILES_URL or dify_config.CONSOLE_API_URL or "" base_url = f"{reference_url}/files/{upload_file_id}/image-preview" source_url = f"{base_url}?{params}" attachment_list.append( @@ -1162,7 +1166,7 @@ class DatasetQuery(TypeBase): "size": file_info.size, "extension": file_info.extension, "mime_type": file_info.mime_type, - "source_url": sign_upload_file(file_info.id, file_info.extension), + "source_url": sign_upload_file_preview_url(file_info.id, file_info.extension), } else: query["file_info"] = None diff --git a/api/models/model.py b/api/models/model.py index 25c330b062..f7f90465cf 100644 --- a/api/models/model.py +++ b/api/models/model.py @@ -878,6 +878,7 @@ class RecommendedApp(TypeBase): copyright: Mapped[str] = mapped_column(String(255), nullable=False) privacy_policy: Mapped[str] = mapped_column(String(255), nullable=False) category: Mapped[str] = mapped_column(String(255), nullable=False) + categories: Mapped[list[str] | None] = mapped_column(sa.JSON, nullable=True, default=None) custom_disclaimer: Mapped[str] = mapped_column(LongText, default="") position: Mapped[int] = mapped_column(sa.Integer, nullable=False, default=0) is_listed: Mapped[bool] = mapped_column(sa.Boolean, nullable=False, default=True) diff --git a/api/models/provider.py b/api/models/provider.py index 2bb67d605b..8dc3ce4ff6 100644 --- a/api/models/provider.py +++ b/api/models/provider.py @@ -9,11 +9,11 @@ import sqlalchemy as sa from sqlalchemy import DateTime, String, func, select, text from sqlalchemy.orm import Mapped, mapped_column +from core.db.session_factory import session_factory from graphon.model_runtime.entities.model_entities import ModelType from libs.uuid_utils import uuidv7 from .base import TypeBase -from .engine import db from .enums import CredentialSourceType, PaymentStatus, ProviderQuotaType from .types import EnumText, LongText, StringUUID @@ -82,7 +82,8 @@ class Provider(TypeBase): @cached_property def credential(self): if self.credential_id: - return db.session.scalar(select(ProviderCredential).where(ProviderCredential.id == self.credential_id)) + with session_factory.create_session() as session: + return session.scalar(select(ProviderCredential).where(ProviderCredential.id == self.credential_id)) @property def credential_name(self): @@ -145,9 +146,10 @@ class ProviderModel(TypeBase): @cached_property def credential(self): if self.credential_id: - return db.session.scalar( - select(ProviderModelCredential).where(ProviderModelCredential.id == self.credential_id) - ) + with session_factory.create_session() as session: + return session.scalar( + select(ProviderModelCredential).where(ProviderModelCredential.id == self.credential_id) + ) @property def credential_name(self): diff --git a/api/models/workflow.py b/api/models/workflow.py index cb1723440b..7936c06a5a 100644 --- a/api/models/workflow.py +++ b/api/models/workflow.py @@ -1568,12 +1568,14 @@ class WorkflowDraftVariable(Base): ), ) - # Relationship to WorkflowDraftVariableFile + # WorkflowDraftVariableFile uses TypeBase while WorkflowDraftVariable uses Base, so the relationship + # must resolve the class object lazily instead of relying on string lookup across registries. variable_file: Mapped[Optional["WorkflowDraftVariableFile"]] = orm.relationship( + lambda: WorkflowDraftVariableFile, foreign_keys=[file_id], lazy="raise", uselist=False, - primaryjoin="WorkflowDraftVariableFile.id == WorkflowDraftVariable.file_id", + primaryjoin=lambda: orm.foreign(WorkflowDraftVariable.file_id) == WorkflowDraftVariableFile.id, ) # Cache for deserialized value @@ -1892,7 +1894,7 @@ class WorkflowDraftVariable(Base): return self.last_edited_at is not None -class WorkflowDraftVariableFile(Base): +class WorkflowDraftVariableFile(TypeBase): """Stores metadata about files associated with large workflow draft variables. This model acts as an intermediary between WorkflowDraftVariable and UploadFile, @@ -1906,18 +1908,7 @@ class WorkflowDraftVariableFile(Base): __tablename__ = "workflow_draft_variable_files" # Primary key - id: Mapped[str] = mapped_column( - StringUUID, - primary_key=True, - default=lambda: str(uuidv7()), - ) - - created_at: Mapped[datetime] = mapped_column( - DateTime, - nullable=False, - default=naive_utc_now, - server_default=func.current_timestamp(), - ) + id: Mapped[str] = mapped_column(StringUUID, primary_key=True, default_factory=lambda: str(uuidv7()), init=False) tenant_id: Mapped[str] = mapped_column( StringUUID, @@ -1969,15 +1960,23 @@ class WorkflowDraftVariableFile(Base): nullable=False, ) - # Relationship to UploadFile + # Rows are created with `upload_file_id`; callers should load this relationship explicitly when needed. upload_file: Mapped["UploadFile"] = orm.relationship( UploadFile, foreign_keys=[upload_file_id], lazy="raise", + init=False, uselist=False, primaryjoin=lambda: orm.foreign(WorkflowDraftVariableFile.upload_file_id) == UploadFile.id, ) + created_at: Mapped[datetime] = mapped_column( + DateTime, + nullable=False, + default_factory=naive_utc_now, + server_default=func.current_timestamp(), + ) + def is_system_variable_editable(name: str) -> bool: return name in _EDITABLE_SYSTEM_VARIABLE diff --git a/api/openapi/markdown/console-swagger.md b/api/openapi/markdown/console-swagger.md new file mode 100644 index 0000000000..e56d5f6fe5 --- /dev/null +++ b/api/openapi/markdown/console-swagger.md @@ -0,0 +1,14795 @@ +# Console API +Console management APIs for app configuration, monitoring, and administration + +## Version: 1.0 + +### Security +**Bearer** + +| apiKey | *API Key* | +| ------ | --------- | +| Description | Type: Bearer {your-api-key} | +| In | header | +| Name | Authorization | + +--- +## console +Console management API operations + +### /account/avatar + +#### GET +##### Description + +Get account avatar url + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountAvatarQuery](#accountavatarquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountAvatarPayload](#accountavatarpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/change-email + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChangeEmailSendPayload](#changeemailsendpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/change-email/check-email-unique + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CheckEmailUniquePayload](#checkemailuniquepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/change-email/reset + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChangeEmailResetPayload](#changeemailresetpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/change-email/validity + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChangeEmailValidityPayload](#changeemailvaliditypayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/delete + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountDeletePayload](#accountdeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/delete/feedback + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountDeletionFeedbackPayload](#accountdeletionfeedbackpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/delete/verify + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/education + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [EducationStatusResponse](#educationstatusresponse) | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EducationActivatePayload](#educationactivatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/education/autocomplete + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EducationAutocompleteQuery](#educationautocompletequery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [EducationAutocompleteResponse](#educationautocompleteresponse) | + +### /account/education/verify + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [EducationVerifyResponse](#educationverifyresponse) | + +### /account/init + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountInitPayload](#accountinitpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/integrates + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [AccountIntegrateListResponse](#accountintegratelistresponse) | + +### /account/interface-language + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountInterfaceLanguagePayload](#accountinterfacelanguagepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/interface-theme + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountInterfaceThemePayload](#accountinterfacethemepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/name + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountNamePayload](#accountnamepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/password + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountPasswordPayload](#accountpasswordpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/profile + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/timezone + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountTimezonePayload](#accounttimezonepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /activate + +#### POST +##### Description + +Activate account with invitation token + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ActivatePayload](#activatepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Account activated successfully | [ActivationResponse](#activationresponse) | +| 400 | Already activated or invalid token | | + +### /activate/check + +#### GET +##### Description + +Check if activation token is valid + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ActivateCheckQuery](#activatecheckquery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ActivationCheckResponse](#activationcheckresponse) | + +### /admin/batch_add_notification_accounts + +#### POST +##### Description + +Register target accounts for a notification by email address. JSON body: {"notification_id": "...", "user_email": ["a@example.com", ...]}. File upload: multipart/form-data with a 'file' field (CSV or TXT, one email per line) plus a 'notification_id' field. Emails that do not match any account are silently skipped. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Accounts added successfully | + +### /admin/delete-explore-banner/{banner_id} + +#### DELETE +##### Description + +Delete an explore banner + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| banner_id | path | Banner ID to delete | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Banner deleted successfully | + +### /admin/insert-explore-apps + +#### POST +##### Description + +Insert or update an app in the explore list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [InsertExploreAppPayload](#insertexploreapppayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | App updated successfully | +| 201 | App inserted successfully | +| 404 | App not found | + +### /admin/insert-explore-apps/{app_id} + +#### DELETE +##### Description + +Remove an app from the explore list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID to remove | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | App removed successfully | + +### /admin/insert-explore-banner + +#### POST +##### Description + +Insert an explore banner + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [InsertExploreBannerPayload](#insertexplorebannerpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 201 | Banner inserted successfully | + +### /admin/upsert_notification + +#### POST +##### Description + +Create or update an in-product notification. Supply notification_id to update an existing one; omit it to create a new one. Pass at least one language variant in contents (zh / en / jp). + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [UpsertNotificationPayload](#upsertnotificationpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Notification upserted successfully | + +### /all-workspaces + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkspaceListQuery](#workspacelistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /api-based-extension + +#### GET +##### Description + +Get all API-based extensions for current tenant + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [APIBasedExtensionListResponse](#apibasedextensionlistresponse) | + +#### POST +##### Description + +Create a new API-based extension + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [APIBasedExtensionPayload](#apibasedextensionpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Extension created successfully | [APIBasedExtensionResponse](#apibasedextensionresponse) | + +### /api-based-extension/{id} + +#### DELETE +##### Description + +Delete API-based extension + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| id | path | Extension ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Extension deleted successfully | + +#### GET +##### Description + +Get API-based extension by ID + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| id | path | Extension ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [APIBasedExtensionResponse](#apibasedextensionresponse) | + +#### POST +##### Description + +Update API-based extension + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [APIBasedExtensionPayload](#apibasedextensionpayload) | +| id | path | Extension ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Extension updated successfully | [APIBasedExtensionResponse](#apibasedextensionresponse) | + +### /api-key-auth/data-source + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /api-key-auth/data-source/binding + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ApiKeyAuthBindingPayload](#apikeyauthbindingpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /api-key-auth/data-source/{binding_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| binding_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /app/prompt-templates + +#### GET +##### Description + +Get advanced prompt templates based on app mode and model configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AdvancedPromptTemplateQuery](#advancedprompttemplatequery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Prompt templates retrieved successfully | [ object ] | +| 400 | Invalid request parameters | | + +### /apps + +#### GET +##### Summary + +Get app list + +##### Description + +Get list of applications with pagination and filtering + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppListQuery](#applistquery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [AppPagination](#apppagination) | + +#### POST +##### Summary + +Create app + +##### Description + +Create a new application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CreateAppPayload](#createapppayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | App created successfully | [AppDetail](#appdetail) | +| 400 | Invalid request parameters | | +| 403 | Insufficient permissions | | + +### /apps/imports + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppImportPayload](#appimportpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Import completed | [Import](#import) | +| 202 | Import pending confirmation | [Import](#import) | +| 400 | Import failed | [Import](#import) | + +### /apps/imports/{app_id}/check-dependencies + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Dependencies checked | [CheckDependenciesResult](#checkdependenciesresult) | + +### /apps/imports/{import_id}/confirm + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| import_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Import confirmed | [Import](#import) | +| 400 | Import failed | [Import](#import) | + +### /apps/workflows/online-users + +#### POST +##### Description + +Get workflow online users + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowOnlineUsersPayload](#workflowonlineuserspayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id} + +#### DELETE +##### Summary + +Delete app + +##### Description + +Delete application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | App deleted successfully | +| 403 | Insufficient permissions | + +#### GET +##### Summary + +Get app detail + +##### Description + +Get application details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [AppDetailWithSite](#appdetailwithsite) | + +#### PUT +##### Summary + +Update app + +##### Description + +Update application details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [UpdateAppPayload](#updateapppayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | App updated successfully | [AppDetailWithSite](#appdetailwithsite) | +| 400 | Invalid request parameters | | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/advanced-chat/workflow-runs + +#### GET +##### Summary + +Get advanced chat app workflow run list + +##### Description + +Get advanced chat workflow run list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| last_id | query | Last run ID for pagination | No | string | +| limit | query | Number of items per page (1-100) | No | integer | +| status | query | Workflow run status filter | No | string | +| triggered_from | query | Filter by trigger source: debugging or app-run. Default: debugging | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow runs retrieved successfully | [AdvancedChatWorkflowRunPaginationResponse](#advancedchatworkflowrunpaginationresponse) | + +### /apps/{app_id}/advanced-chat/workflow-runs/count + +#### GET +##### Summary + +Get advanced chat workflow runs count statistics + +##### Description + +Get advanced chat workflow runs count statistics + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| status | query | Workflow run status filter | No | string | +| time_range | query | Filter by time range (optional): e.g., 7d (7 days), 4h (4 hours), 30m (30 minutes), 30s (30 seconds). Filters by created_at field. | No | string | +| triggered_from | query | Filter by trigger source: debugging or app-run. Default: debugging | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow runs count retrieved successfully | [WorkflowRunCountResponse](#workflowruncountresponse) | + +### /apps/{app_id}/advanced-chat/workflows/draft/human-input/nodes/{node_id}/form/preview + +#### POST +##### Summary + +Preview human input form content and placeholders + +##### Description + +Get human input form preview for advanced chat workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HumanInputFormPreviewPayload](#humaninputformpreviewpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/advanced-chat/workflows/draft/human-input/nodes/{node_id}/form/run + +#### POST +##### Summary + +Submit human input form preview + +##### Description + +Submit human input form preview for advanced chat workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HumanInputFormSubmitPayload](#humaninputformsubmitpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/advanced-chat/workflows/draft/iteration/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow iteration node + +##### Description + +Run draft workflow iteration node for advanced chat + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [IterationNodeRunPayload](#iterationnoderunpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Iteration node run started successfully | +| 403 | Permission denied | +| 404 | Node not found | + +### /apps/{app_id}/advanced-chat/workflows/draft/loop/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow loop node + +##### Description + +Run draft workflow loop node for advanced chat + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [LoopNodeRunPayload](#loopnoderunpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Loop node run started successfully | +| 403 | Permission denied | +| 404 | Node not found | + +### /apps/{app_id}/advanced-chat/workflows/draft/run + +#### POST +##### Summary + +Run draft workflow + +##### Description + +Run draft workflow for advanced chat application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AdvancedChatWorkflowRunPayload](#advancedchatworkflowrunpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow run started successfully | +| 400 | Invalid request parameters | +| 403 | Permission denied | + +### /apps/{app_id}/agent/logs + +#### GET +##### Summary + +Get agent logs + +##### Description + +Get agent execution logs for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AgentLogQuery](#agentlogquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Agent logs retrieved successfully | [ object ] | +| 400 | Invalid request parameters | | + +### /apps/{app_id}/annotation-reply/{action} + +#### POST +##### Description + +Enable or disable annotation reply for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AnnotationReplyPayload](#annotationreplypayload) | +| action | path | Action to perform (enable/disable) | Yes | string | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Action completed successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/annotation-reply/{action}/status/{job_id} + +#### GET +##### Description + +Get status of annotation reply action job + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | Action type | Yes | string | +| app_id | path | Application ID | Yes | string | +| job_id | path | Job ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Job status retrieved successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/annotation-setting + +#### GET +##### Description + +Get annotation settings for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Annotation settings retrieved successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/annotation-settings/{annotation_setting_id} + +#### POST +##### Description + +Update annotation settings for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AnnotationSettingUpdatePayload](#annotationsettingupdatepayload) | +| annotation_setting_id | path | Annotation setting ID | Yes | string | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Settings updated successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/annotations + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Description + +Get annotations for an app with pagination + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AnnotationListQuery](#annotationlistquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Annotations retrieved successfully | +| 403 | Insufficient permissions | + +#### POST +##### Description + +Create a new annotation for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CreateAnnotationPayload](#createannotationpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Annotation created successfully | [Annotation](#annotation) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/annotations/batch-import + +#### POST +##### Description + +Batch import annotations from CSV file with rate limiting and security checks + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Batch import started successfully | +| 400 | No file uploaded or too many files | +| 403 | Insufficient permissions | +| 413 | File too large | +| 429 | Too many requests or concurrent imports | + +### /apps/{app_id}/annotations/batch-import-status/{job_id} + +#### GET +##### Description + +Get status of batch import job + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| job_id | path | Job ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Job status retrieved successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/annotations/count + +#### GET +##### Description + +Get count of message annotations for the app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Annotation count retrieved successfully | [AnnotationCountResponse](#annotationcountresponse) | + +### /apps/{app_id}/annotations/export + +#### GET +##### Description + +Export all annotations for an app with CSV injection protection + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Annotations exported successfully | [AnnotationExportList](#annotationexportlist) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/annotations/{annotation_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| annotation_id | path | | Yes | string | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Description + +Update or delete an annotation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [UpdateAnnotationPayload](#updateannotationpayload) | +| annotation_id | path | Annotation ID | Yes | string | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Annotation updated successfully | [Annotation](#annotation) | +| 204 | Annotation deleted successfully | | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/annotations/{annotation_id}/hit-histories + +#### GET +##### Description + +Get hit histories for an annotation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| annotation_id | path | Annotation ID | Yes | string | +| app_id | path | Application ID | Yes | string | +| limit | query | Page size | No | integer | +| page | query | Page number | No | integer | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Hit histories retrieved successfully | [AnnotationHitHistoryList](#annotationhithistorylist) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/api-enable + +#### POST +##### Description + +Enable or disable app API + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppApiStatusPayload](#appapistatuspayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | API status updated successfully | [AppDetail](#appdetail) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/audio-to-text + +#### POST +##### Description + +Transcript audio to text for chat messages + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | App ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Audio transcription successful | [AudioTranscriptResponse](#audiotranscriptresponse) | +| 400 | Bad request - No audio uploaded or unsupported type | | +| 413 | Audio file too large | | + +### /apps/{app_id}/chat-conversations + +#### GET +##### Description + +Get chat conversations with pagination, filtering and summary + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChatConversationQuery](#chatconversationquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ConversationWithSummaryPagination](#conversationwithsummarypagination) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/chat-conversations/{conversation_id} + +#### DELETE +##### Description + +Delete a chat conversation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| conversation_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Conversation deleted successfully | +| 403 | Insufficient permissions | +| 404 | Conversation not found | + +#### GET +##### Description + +Get chat conversation details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| conversation_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ConversationDetail](#conversationdetail) | +| 403 | Insufficient permissions | | +| 404 | Conversation not found | | + +### /apps/{app_id}/chat-messages + +#### GET +##### Description + +Get chat messages for a conversation with pagination + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChatMessagesQuery](#chatmessagesquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [MessageInfiniteScrollPaginationResponse](#messageinfinitescrollpaginationresponse) | +| 404 | Conversation not found | | + +### /apps/{app_id}/chat-messages/{message_id}/suggested-questions + +#### GET +##### Description + +Get suggested questions for a message + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| message_id | path | Message ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Suggested questions retrieved successfully | [SuggestedQuestionsResponse](#suggestedquestionsresponse) | +| 404 | Message or conversation not found | | + +### /apps/{app_id}/chat-messages/{task_id}/stop + +#### POST +##### Description + +Stop a running chat message generation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| task_id | path | Task ID to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Task stopped successfully | + +### /apps/{app_id}/completion-conversations + +#### GET +##### Description + +Get completion conversations with pagination and filtering + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CompletionConversationQuery](#completionconversationquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ConversationPagination](#conversationpagination) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/completion-conversations/{conversation_id} + +#### DELETE +##### Description + +Delete a completion conversation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| conversation_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Conversation deleted successfully | +| 403 | Insufficient permissions | +| 404 | Conversation not found | + +#### GET +##### Description + +Get completion conversation details with messages + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| conversation_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ConversationMessageDetail](#conversationmessagedetail) | +| 403 | Insufficient permissions | | +| 404 | Conversation not found | | + +### /apps/{app_id}/completion-messages + +#### POST +##### Description + +Generate completion message for debugging + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CompletionMessagePayload](#completionmessagepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Completion generated successfully | +| 400 | Invalid request parameters | +| 404 | App not found | + +### /apps/{app_id}/completion-messages/{task_id}/stop + +#### POST +##### Description + +Stop a running completion message generation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| task_id | path | Task ID to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Task stopped successfully | + +### /apps/{app_id}/conversation-variables + +#### GET +##### Description + +Get conversation variables for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConversationVariablesQuery](#conversationvariablesquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Conversation variables retrieved successfully | [PaginatedConversationVariableResponse](#paginatedconversationvariableresponse) | + +### /apps/{app_id}/convert-to-workflow + +#### POST +##### Summary + +Convert basic mode of chatbot app to workflow mode + +##### Description + +Convert application to workflow mode +Convert expert mode of chatbot app to workflow mode +Convert Completion App to Workflow App + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConvertToWorkflowPayload](#converttoworkflowpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Application converted to workflow successfully | +| 400 | Application cannot be converted | +| 403 | Permission denied | + +### /apps/{app_id}/copy + +#### POST +##### Summary + +Copy app + +##### Description + +Create a copy of an existing application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CopyAppPayload](#copyapppayload) | +| app_id | path | Application ID to copy | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | App copied successfully | [AppDetailWithSite](#appdetailwithsite) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/export + +#### GET +##### Summary + +Export app + +##### Description + +Export application configuration as DSL + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppExportQuery](#appexportquery) | +| app_id | path | Application ID to export | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | App exported successfully | [AppExportResponse](#appexportresponse) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/feedbacks + +#### POST +##### Description + +Create or update message feedback (like/dislike) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MessageFeedbackPayload](#messagefeedbackpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Feedback updated successfully | +| 403 | Insufficient permissions | +| 404 | Message not found | + +### /apps/{app_id}/feedbacks/export + +#### GET +##### Description + +Export user feedback data for Google Sheets + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [FeedbackExportQuery](#feedbackexportquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Feedback data exported successfully | +| 400 | Invalid parameters | +| 500 | Internal server error | + +### /apps/{app_id}/icon + +#### POST +##### Description + +Update application icon + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppIconPayload](#appiconpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Icon updated successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/messages/{message_id} + +#### GET +##### Description + +Get message details by ID + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| message_id | path | Message ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Message retrieved successfully | [MessageDetailResponse](#messagedetailresponse) | +| 404 | Message not found | | + +### /apps/{app_id}/model-config + +#### POST +##### Summary + +Modify app model config + +##### Description + +Update application model configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ModelConfigRequest](#modelconfigrequest) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Model configuration updated successfully | +| 400 | Invalid configuration | +| 404 | App not found | + +### /apps/{app_id}/name + +#### POST +##### Description + +Check if app name is available + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppNamePayload](#appnamepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Name availability checked | [AppDetail](#appdetail) | + +### /apps/{app_id}/publish-to-creators-platform + +#### POST +##### Summary + +Publish app to Creators Platform + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/server + +#### GET +##### Description + +Get MCP server configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | MCP server configuration retrieved successfully | [AppMCPServerResponse](#appmcpserverresponse) | + +#### POST +##### Description + +Create MCP server configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MCPServerCreatePayload](#mcpservercreatepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | MCP server configuration created successfully | [AppMCPServerResponse](#appmcpserverresponse) | +| 403 | Insufficient permissions | | + +#### PUT +##### Description + +Update MCP server configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MCPServerUpdatePayload](#mcpserverupdatepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | MCP server configuration updated successfully | [AppMCPServerResponse](#appmcpserverresponse) | +| 403 | Insufficient permissions | | +| 404 | Server not found | | + +### /apps/{app_id}/site + +#### POST +##### Description + +Update application site configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppSiteUpdatePayload](#appsiteupdatepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Site configuration updated successfully | [AppSiteResponse](#appsiteresponse) | +| 403 | Insufficient permissions | | +| 404 | App not found | | + +### /apps/{app_id}/site-enable + +#### POST +##### Description + +Enable or disable app site + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppSiteStatusPayload](#appsitestatuspayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Site status updated successfully | [AppDetail](#appdetail) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/site/access-token-reset + +#### POST +##### Description + +Reset access token for application site + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Access token reset successfully | [AppSiteResponse](#appsiteresponse) | +| 403 | Insufficient permissions (admin/owner required) | | +| 404 | App or site not found | | + +### /apps/{app_id}/statistics/average-response-time + +#### GET +##### Description + +Get average response time statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Average response time statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/average-session-interactions + +#### GET +##### Description + +Get average session interaction statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Average session interaction statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/daily-conversations + +#### GET +##### Description + +Get daily conversation statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Daily conversation statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/daily-end-users + +#### GET +##### Description + +Get daily terminal/end-user statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Daily terminal statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/daily-messages + +#### GET +##### Description + +Get daily message statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Daily message statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/token-costs + +#### GET +##### Description + +Get daily token cost statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Daily token cost statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/tokens-per-second + +#### GET +##### Description + +Get tokens per second statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Tokens per second statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/user-satisfaction-rate + +#### GET +##### Description + +Get user satisfaction rate statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | User satisfaction rate statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/text-to-audio + +#### POST +##### Description + +Convert text to speech for chat messages + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TextToSpeechPayload](#texttospeechpayload) | +| app_id | path | App ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Text to speech conversion successful | +| 400 | Bad request - Invalid parameters | + +### /apps/{app_id}/text-to-audio/voices + +#### GET +##### Description + +Get available TTS voices for a specific language + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TextToSpeechVoiceQuery](#texttospeechvoicequery) | +| app_id | path | App ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | TTS voices retrieved successfully | [ object ] | +| 400 | Invalid language parameter | | + +### /apps/{app_id}/trace + +#### GET +##### Summary + +Get app trace + +##### Description + +Get app tracing configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Trace configuration retrieved successfully | + +#### POST +##### Description + +Update app tracing configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppTracePayload](#apptracepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Trace configuration updated successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/trace-config + +#### DELETE +##### Summary + +Delete an existing trace app configuration + +##### Description + +Delete an existing tracing configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TraceProviderQuery](#traceproviderquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Tracing configuration deleted successfully | +| 400 | Invalid request parameters or configuration not found | + +#### GET +##### Description + +Get tracing configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TraceProviderQuery](#traceproviderquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Tracing configuration retrieved successfully | object | +| 400 | Invalid request parameters | | + +#### PATCH +##### Summary + +Update an existing trace app configuration + +##### Description + +Update an existing tracing configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TraceConfigPayload](#traceconfigpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Tracing configuration updated successfully | object | +| 400 | Invalid request parameters or configuration not found | | + +#### POST +##### Summary + +Create a new trace app configuration + +##### Description + +Create a new tracing configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TraceConfigPayload](#traceconfigpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Tracing configuration created successfully | object | +| 400 | Invalid request parameters or configuration already exists | | + +### /apps/{app_id}/trigger-enable + +#### POST +##### Summary + +Update app trigger (enable/disable) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| payload | body | | Yes | [ParserEnable](#parserenable) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [WorkflowTriggerResponse](#workflowtriggerresponse) | + +### /apps/{app_id}/triggers + +#### GET +##### Summary + +Get app triggers list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [WorkflowTriggerListResponse](#workflowtriggerlistresponse) | + +### /apps/{app_id}/workflow-app-logs + +#### GET +##### Summary + +Get workflow app logs + +##### Description + +Get workflow application execution logs + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowAppLogQuery](#workflowapplogquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow app logs retrieved successfully | [WorkflowAppLogPaginationResponse](#workflowapplogpaginationresponse) | + +### /apps/{app_id}/workflow-archived-logs + +#### GET +##### Summary + +Get workflow archived logs + +##### Description + +Get workflow archived execution logs + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowAppLogQuery](#workflowapplogquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow archived logs retrieved successfully | [WorkflowArchivedLogPaginationResponse](#workflowarchivedlogpaginationresponse) | + +### /apps/{app_id}/workflow-runs + +#### GET +##### Summary + +Get workflow run list + +##### Description + +Get workflow run list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| last_id | query | Last run ID for pagination | No | string | +| limit | query | Number of items per page (1-100) | No | integer | +| status | query | Workflow run status filter | No | string | +| triggered_from | query | Filter by trigger source: debugging or app-run. Default: debugging | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow runs retrieved successfully | [WorkflowRunPaginationResponse](#workflowrunpaginationresponse) | + +### /apps/{app_id}/workflow-runs/count + +#### GET +##### Summary + +Get workflow runs count statistics + +##### Description + +Get workflow runs count statistics + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| status | query | Workflow run status filter | No | string | +| time_range | query | Filter by time range (optional): e.g., 7d (7 days), 4h (4 hours), 30m (30 minutes), 30s (30 seconds). Filters by created_at field. | No | string | +| triggered_from | query | Filter by trigger source: debugging or app-run. Default: debugging | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow runs count retrieved successfully | [WorkflowRunCountResponse](#workflowruncountresponse) | + +### /apps/{app_id}/workflow-runs/tasks/{task_id}/stop + +#### POST +##### Summary + +Stop workflow task + +##### Description + +Stop running workflow task + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| task_id | path | Task ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Task stopped successfully | +| 403 | Permission denied | +| 404 | Task not found | + +### /apps/{app_id}/workflow-runs/{run_id} + +#### GET +##### Summary + +Get workflow run detail + +##### Description + +Get workflow run detail + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| run_id | path | Workflow run ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow run detail retrieved successfully | [WorkflowRunDetailResponse](#workflowrundetailresponse) | +| 404 | Workflow run not found | | + +### /apps/{app_id}/workflow-runs/{run_id}/export + +#### GET +##### Description + +Generate a download URL for an archived workflow run. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| run_id | path | Workflow run ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Export URL generated | [WorkflowRunExportResponse](#workflowrunexportresponse) | + +### /apps/{app_id}/workflow-runs/{run_id}/node-executions + +#### GET +##### Summary + +Get workflow run node execution list + +##### Description + +Get workflow run node execution list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| run_id | path | Workflow run ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Node executions retrieved successfully | [WorkflowRunNodeExecutionListResponse](#workflowrunnodeexecutionlistresponse) | +| 404 | Workflow run not found | | + +### /apps/{app_id}/workflow/comments + +#### GET +##### Summary + +Get all comments for a workflow + +##### Description + +Get all comments for a workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Comments retrieved successfully | [WorkflowCommentBasic](#workflowcommentbasic) | + +#### POST +##### Summary + +Create a new workflow comment + +##### Description + +Create a new workflow comment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowCommentCreatePayload](#workflowcommentcreatepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Comment created successfully | [WorkflowCommentCreate](#workflowcommentcreate) | + +### /apps/{app_id}/workflow/comments/mention-users + +#### GET +##### Summary + +Get all users in current tenant for mentions + +##### Description + +Get all users in current tenant for mentions + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Mentionable users retrieved successfully | [WorkflowCommentMentionUsersPayload](#workflowcommentmentionuserspayload) | + +### /apps/{app_id}/workflow/comments/{comment_id} + +#### DELETE +##### Summary + +Delete a workflow comment + +##### Description + +Delete a workflow comment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Comment deleted successfully | + +#### GET +##### Summary + +Get a specific workflow comment + +##### Description + +Get a specific workflow comment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Comment retrieved successfully | [WorkflowCommentDetail](#workflowcommentdetail) | + +#### PUT +##### Summary + +Update a workflow comment + +##### Description + +Update a workflow comment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowCommentUpdatePayload](#workflowcommentupdatepayload) | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Comment updated successfully | [WorkflowCommentUpdate](#workflowcommentupdate) | + +### /apps/{app_id}/workflow/comments/{comment_id}/replies + +#### POST +##### Summary + +Add a reply to a workflow comment + +##### Description + +Add a reply to a workflow comment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowCommentReplyPayload](#workflowcommentreplypayload) | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Reply created successfully | [WorkflowCommentReplyCreate](#workflowcommentreplycreate) | + +### /apps/{app_id}/workflow/comments/{comment_id}/replies/{reply_id} + +#### DELETE +##### Summary + +Delete a comment reply + +##### Description + +Delete a comment reply + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | +| reply_id | path | Reply ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Reply deleted successfully | + +#### PUT +##### Summary + +Update a comment reply + +##### Description + +Update a comment reply + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowCommentReplyPayload](#workflowcommentreplypayload) | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | +| reply_id | path | Reply ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Reply updated successfully | [WorkflowCommentReplyUpdate](#workflowcommentreplyupdate) | + +### /apps/{app_id}/workflow/comments/{comment_id}/resolve + +#### POST +##### Summary + +Resolve a workflow comment + +##### Description + +Resolve a workflow comment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Comment resolved successfully | [WorkflowCommentResolve](#workflowcommentresolve) | + +### /apps/{app_id}/workflow/statistics/average-app-interactions + +#### GET +##### Description + +Get workflow average app interaction statistics + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowStatisticQuery](#workflowstatisticquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Average app interaction statistics retrieved successfully | + +### /apps/{app_id}/workflow/statistics/daily-conversations + +#### GET +##### Description + +Get workflow daily runs statistics + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowStatisticQuery](#workflowstatisticquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Daily runs statistics retrieved successfully | + +### /apps/{app_id}/workflow/statistics/daily-terminals + +#### GET +##### Description + +Get workflow daily terminals statistics + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowStatisticQuery](#workflowstatisticquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Daily terminals statistics retrieved successfully | + +### /apps/{app_id}/workflow/statistics/token-costs + +#### GET +##### Description + +Get workflow daily token cost statistics + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowStatisticQuery](#workflowstatisticquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Daily token cost statistics retrieved successfully | + +### /apps/{app_id}/workflows + +#### GET +##### Summary + +Get published workflows + +##### Description + +Get all published workflows for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowListQuery](#workflowlistquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Published workflows retrieved successfully | [WorkflowPagination](#workflowpagination) | + +### /apps/{app_id}/workflows/default-workflow-block-configs + +#### GET +##### Summary + +Get default block config + +##### Description + +Get default block configurations for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Default block configurations retrieved successfully | + +### /apps/{app_id}/workflows/default-workflow-block-configs/{block_type} + +#### GET +##### Summary + +Get default block config + +##### Description + +Get default block configuration by type + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DefaultBlockConfigQuery](#defaultblockconfigquery) | +| app_id | path | Application ID | Yes | string | +| block_type | path | Block type | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Default block configuration retrieved successfully | +| 404 | Block type not found | + +### /apps/{app_id}/workflows/draft + +#### GET +##### Summary + +Get draft workflow + +##### Description + +Get draft workflow for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Draft workflow retrieved successfully | [Workflow](#workflow) | +| 404 | Draft workflow not found | | + +#### POST +##### Summary + +Sync draft workflow + +##### Description + +Sync draft workflow configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [SyncDraftWorkflowPayload](#syncdraftworkflowpayload) | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Draft workflow synced successfully | [SyncDraftWorkflowResponse](#syncdraftworkflowresponse) | +| 400 | Invalid workflow configuration | | +| 403 | Permission denied | | + +### /apps/{app_id}/workflows/draft/conversation-variables + +#### GET +##### Description + +Get conversation variables for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Conversation variables retrieved successfully | [WorkflowDraftVariableList](#workflowdraftvariablelist) | +| 404 | Draft workflow not found | | + +#### POST +##### Description + +Update conversation variables for workflow draft + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConversationVariableUpdatePayload](#conversationvariableupdatepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Conversation variables updated successfully | + +### /apps/{app_id}/workflows/draft/environment-variables + +#### GET +##### Summary + +Get draft workflow + +##### Description + +Get environment variables for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Environment variables retrieved successfully | +| 404 | Draft workflow not found | + +#### POST +##### Description + +Update environment variables for workflow draft + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EnvironmentVariableUpdatePayload](#environmentvariableupdatepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Environment variables updated successfully | + +### /apps/{app_id}/workflows/draft/features + +#### POST +##### Description + +Update draft workflow features + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowFeaturesPayload](#workflowfeaturespayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow features updated successfully | + +### /apps/{app_id}/workflows/draft/human-input/nodes/{node_id}/delivery-test + +#### POST +##### Summary + +Test human input delivery + +##### Description + +Test human input delivery for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HumanInputDeliveryTestPayload](#humaninputdeliverytestpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/workflows/draft/human-input/nodes/{node_id}/form/preview + +#### POST +##### Summary + +Preview human input form content and placeholders + +##### Description + +Get human input form preview for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HumanInputFormPreviewPayload](#humaninputformpreviewpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/workflows/draft/human-input/nodes/{node_id}/form/run + +#### POST +##### Summary + +Submit human input form preview + +##### Description + +Submit human input form preview for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HumanInputFormSubmitPayload](#humaninputformsubmitpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/workflows/draft/iteration/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow iteration node + +##### Description + +Run draft workflow iteration node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [IterationNodeRunPayload](#iterationnoderunpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow iteration node run started successfully | +| 403 | Permission denied | +| 404 | Node not found | + +### /apps/{app_id}/workflows/draft/loop/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow loop node + +##### Description + +Run draft workflow loop node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [LoopNodeRunPayload](#loopnoderunpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow loop node run started successfully | +| 403 | Permission denied | +| 404 | Node not found | + +### /apps/{app_id}/workflows/draft/nodes/{node_id}/last-run + +#### GET +##### Description + +Get last run result for draft workflow node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Node last run retrieved successfully | [WorkflowRunNodeExecutionResponse](#workflowrunnodeexecutionresponse) | +| 403 | Permission denied | | +| 404 | Node last run not found | | + +### /apps/{app_id}/workflows/draft/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow node + +##### Description + +Run draft workflow node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DraftWorkflowNodeRunPayload](#draftworkflownoderunpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Node run started successfully | [WorkflowRunNodeExecutionResponse](#workflowrunnodeexecutionresponse) | +| 403 | Permission denied | | +| 404 | Node not found | | + +### /apps/{app_id}/workflows/draft/nodes/{node_id}/trigger/run + +#### POST +##### Summary + +Poll for trigger events and execute single node when event arrives + +##### Description + +Poll for trigger events and execute single node when event arrives + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Trigger event received and node executed successfully | +| 403 | Permission denied | +| 500 | Internal server error | + +### /apps/{app_id}/workflows/draft/nodes/{node_id}/variables + +#### DELETE +##### Description + +Delete all variables for a specific node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| node_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Node variables deleted successfully | + +#### GET +##### Description + +Get variables for a specific node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Node variables retrieved successfully | [WorkflowDraftVariableList](#workflowdraftvariablelist) | + +### /apps/{app_id}/workflows/draft/run + +#### POST +##### Summary + +Run draft workflow + +##### Description + +Run draft workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DraftWorkflowRunPayload](#draftworkflowrunpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Draft workflow run started successfully | +| 403 | Permission denied | + +### /apps/{app_id}/workflows/draft/system-variables + +#### GET +##### Description + +Get system variables for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | System variables retrieved successfully | [WorkflowDraftVariableList](#workflowdraftvariablelist) | + +### /apps/{app_id}/workflows/draft/trigger/run + +#### POST +##### Summary + +Poll for trigger events and execute full workflow when event arrives + +##### Description + +Poll for trigger events and execute full workflow when event arrives + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DraftWorkflowTriggerRunRequest](#draftworkflowtriggerrunrequest) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Trigger event received and workflow executed successfully | +| 403 | Permission denied | +| 500 | Internal server error | + +### /apps/{app_id}/workflows/draft/trigger/run-all + +#### POST +##### Summary + +Full workflow debug when the start node is a trigger + +##### Description + +Full workflow debug when the start node is a trigger + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DraftWorkflowTriggerRunAllPayload](#draftworkflowtriggerrunallpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow executed successfully | +| 403 | Permission denied | +| 500 | Internal server error | + +### /apps/{app_id}/workflows/draft/variables + +#### DELETE +##### Description + +Delete all draft workflow variables + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Workflow variables deleted successfully | + +#### GET +##### Summary + +Get draft workflow + +##### Description + +Get draft workflow variables + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowDraftVariableListQuery](#workflowdraftvariablelistquery) | +| app_id | path | Application ID | Yes | string | +| limit | query | Number of items per page (1-100) | No | string | +| page | query | Page number (1-100000) | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow variables retrieved successfully | [WorkflowDraftVariableListWithoutValue](#workflowdraftvariablelistwithoutvalue) | + +### /apps/{app_id}/workflows/draft/variables/{variable_id} + +#### DELETE +##### Description + +Delete a workflow variable + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| variable_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Variable deleted successfully | +| 404 | Variable not found | + +#### GET +##### Description + +Get a specific workflow variable + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| variable_id | path | Variable ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Variable retrieved successfully | [WorkflowDraftVariable](#workflowdraftvariable) | +| 404 | Variable not found | | + +#### PATCH +##### Description + +Update a workflow variable + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowDraftVariableUpdatePayload](#workflowdraftvariableupdatepayload) | +| app_id | path | | Yes | string | +| variable_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Variable updated successfully | [WorkflowDraftVariable](#workflowdraftvariable) | +| 404 | Variable not found | | + +### /apps/{app_id}/workflows/draft/variables/{variable_id}/reset + +#### PUT +##### Description + +Reset a workflow variable to its default value + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| variable_id | path | Variable ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Variable reset successfully | [WorkflowDraftVariable](#workflowdraftvariable) | +| 204 | Variable reset (no content) | | +| 404 | Variable not found | | + +### /apps/{app_id}/workflows/publish + +#### GET +##### Summary + +Get published workflow + +##### Description + +Get published workflow for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Published workflow retrieved successfully | [Workflow](#workflow) | +| 404 | Published workflow not found | | + +#### POST +##### Summary + +Publish workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [PublishWorkflowPayload](#publishworkflowpayload) | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/workflows/triggers/webhook + +#### GET +##### Summary + +Get webhook trigger for a node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| payload | body | | Yes | [Parser](#parser) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [WebhookTriggerResponse](#webhooktriggerresponse) | + +### /apps/{app_id}/workflows/{workflow_id} + +#### DELETE +##### Summary + +Delete workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| workflow_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Summary + +Update workflow attributes + +##### Description + +Update workflow by ID + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowUpdatePayload](#workflowupdatepayload) | +| app_id | path | Application ID | Yes | string | +| workflow_id | path | Workflow ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow updated successfully | [Workflow](#workflow) | +| 403 | Permission denied | | +| 404 | Workflow not found | | + +### /apps/{app_id}/workflows/{workflow_id}/restore + +#### POST +##### Description + +Restore a published workflow version into the draft workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| workflow_id | path | Published workflow ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow restored successfully | +| 400 | Source workflow must be published | +| 404 | Workflow not found | + +### /apps/{resource_id}/api-keys + +#### GET +##### Summary + +Get all API keys for an app + +##### Description + +Get all API keys for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| resource_id | path | App ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | API keys retrieved successfully | [ApiKeyList](#apikeylist) | + +#### POST +##### Summary + +Create a new API key for an app + +##### Description + +Create a new API key for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| resource_id | path | App ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | API key created successfully | [ApiKeyItem](#apikeyitem) | +| 400 | Maximum keys exceeded | | + +### /apps/{resource_id}/api-keys/{api_key_id} + +#### DELETE +##### Summary + +Delete an API key for an app + +##### Description + +Delete an API key for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| api_key_id | path | API key ID | Yes | string | +| resource_id | path | App ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | API key deleted successfully | + +### /apps/{server_id}/server/refresh + +#### GET +##### Description + +Refresh MCP server configuration and regenerate server code + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| server_id | path | Server ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | MCP server refreshed successfully | [AppMCPServerResponse](#appmcpserverresponse) | +| 403 | Insufficient permissions | | +| 404 | Server not found | | + +### /auth/plugin/datasource/default-list + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/list + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/{provider_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceCredentialPayload](#datasourcecredentialpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/{provider_id}/custom-client + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceCustomClientPayload](#datasourcecustomclientpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/{provider_id}/default + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceDefaultPayload](#datasourcedefaultpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/{provider_id}/delete + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceCredentialDeletePayload](#datasourcecredentialdeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/{provider_id}/update + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceCredentialUpdatePayload](#datasourcecredentialupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/{provider_id}/update-name + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceUpdateNamePayload](#datasourceupdatenamepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /billing/invoices + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /billing/partners/{partner_key}/tenants + +#### PUT +##### Description + +Sync partner tenants bindings + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [PartnerTenantsPayload](#partnertenantspayload) | +| partner_key | path | Partner key | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Tenants synced to partner successfully | +| 400 | Invalid partner information | + +### /billing/subscription + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /code-based-extension + +#### GET +##### Description + +Get code-based extension data by module name + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| module | query | Extension module name | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [CodeBasedExtensionResponse](#codebasedextensionresponse) | + +### /compliance/download + +#### GET +##### Description + +Get compliance document download link + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ComplianceDownloadQuery](#compliancedownloadquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /data-source/integrates + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /data-source/integrates/{binding_id}/{action} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | | Yes | string | +| binding_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | | Yes | string | +| binding_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets + +#### GET +##### Description + +Get list of datasets + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| ids | query | Filter by dataset IDs (list) | No | string | +| include_all | query | Include all datasets (default: false) | No | string | +| keyword | query | Search keyword | No | string | +| limit | query | Number of items per page (default: 20) | No | string | +| page | query | Page number (default: 1) | No | string | +| tag_ids | query | Filter by tag IDs (list) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Datasets retrieved successfully | + +#### POST +##### Description + +Create a new dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DatasetCreatePayload](#datasetcreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 201 | Dataset created successfully | +| 400 | Invalid request parameters | + +### /datasets/api-base-info + +#### GET +##### Description + +Get dataset API base information + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | API base info retrieved successfully | + +### /datasets/api-keys + +#### GET +##### Description + +Get dataset API keys + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | API keys retrieved successfully | [ApiKeyList](#apikeylist) | + +#### POST +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | API key created successfully | [ApiKeyItem](#apikeyitem) | +| 400 | Maximum keys exceeded | | + +### /datasets/api-keys/{api_key_id} + +#### DELETE +##### Description + +Delete dataset API key + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| api_key_id | path | API key ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | API key deleted successfully | + +### /datasets/batch_import_status/{job_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| job_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| job_id | path | | Yes | string | +| payload | body | | Yes | [BatchImportPayload](#batchimportpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/external + +#### POST +##### Description + +Create external knowledge dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ExternalDatasetCreatePayload](#externaldatasetcreatepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | External dataset created successfully | [DatasetDetail](#datasetdetail) | +| 400 | Invalid parameters | | +| 403 | Permission denied | | + +### /datasets/external-knowledge-api + +#### GET +##### Description + +Get external knowledge API templates + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| keyword | query | Search keyword | No | string | +| limit | query | Number of items per page (default: 20) | No | string | +| page | query | Page number (default: 1) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | External API templates retrieved successfully | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ExternalKnowledgeApiPayload](#externalknowledgeapipayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/external-knowledge-api/{external_knowledge_api_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| external_knowledge_api_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Description + +Get external knowledge API template details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| external_knowledge_api_id | path | External knowledge API ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | External API template retrieved successfully | +| 404 | Template not found | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ExternalKnowledgeApiPayload](#externalknowledgeapipayload) | +| external_knowledge_api_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/external-knowledge-api/{external_knowledge_api_id}/use-check + +#### GET +##### Description + +Check if external knowledge API is being used + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| external_knowledge_api_id | path | External knowledge API ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Usage check completed successfully | + +### /datasets/indexing-estimate + +#### POST +##### Description + +Estimate dataset indexing cost + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [IndexingEstimatePayload](#indexingestimatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Indexing estimate calculated successfully | + +### /datasets/init + +#### POST +##### Description + +Initialize dataset with documents + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [KnowledgeConfig](#knowledgeconfig) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Dataset initialized successfully | [DatasetAndDocumentResponse](#datasetanddocumentresponse) | +| 400 | Invalid request parameters | | + +### /datasets/metadata/built-in + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/notion-indexing-estimate + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [NotionEstimatePayload](#notionestimatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/process-rule + +#### GET +##### Description + +Get dataset document processing rules + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| document_id | query | Document ID (optional) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Process rules retrieved successfully | + +### /datasets/retrieval-setting + +#### GET +##### Description + +Get dataset retrieval settings + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Retrieval settings retrieved successfully | + +### /datasets/retrieval-setting/{vector_type} + +#### GET +##### Description + +Get mock dataset retrieval settings by vector type + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| vector_type | path | Vector store type | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Mock retrieval settings retrieved successfully | + +### /datasets/{dataset_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Description + +Get dataset details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Dataset retrieved successfully | [DatasetDetail](#datasetdetail) | +| 403 | Permission denied | | +| 404 | Dataset not found | | + +#### PATCH +##### Description + +Update dataset details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DatasetUpdatePayload](#datasetupdatepayload) | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Dataset updated successfully | [DatasetDetail](#datasetdetail) | +| 403 | Permission denied | | +| 404 | Dataset not found | | + +### /datasets/{dataset_id}/api-keys/{status} + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| status | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/auto-disable-logs + +#### GET +##### Description + +Get dataset auto disable logs + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Auto disable logs retrieved successfully | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/batch/{batch}/indexing-estimate + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| batch | path | | Yes | string | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/batch/{batch}/indexing-status + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| batch | path | | Yes | string | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Description + +Get documents in a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| fetch | query | Fetch full details (default: false) | No | string | +| keyword | query | Search keyword | No | string | +| limit | query | Number of items per page (default: 20) | No | string | +| page | query | Page number (default: 1) | No | string | +| sort | query | Sort order (default: -created_at) | No | string | +| status | query | Filter documents by display status | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Documents retrieved successfully | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [KnowledgeConfig](#knowledgeconfig) | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Documents created successfully | [DatasetAndDocumentResponse](#datasetanddocumentresponse) | + +### /datasets/{dataset_id}/documents/download-zip + +#### POST +##### Summary + +Stream a ZIP archive containing the requested uploaded documents + +##### Description + +Download selected dataset documents as a single ZIP archive (upload-file only) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| payload | body | | Yes | [DocumentBatchDownloadZipPayload](#documentbatchdownloadzippayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/generate-summary + +#### POST +##### Summary + +Generate summary index for specified documents + +##### Description + +Generate summary index for documents +This endpoint checks if the dataset configuration supports summary generation +(indexing_technique must be 'high_quality' and summary_index_setting.enable must be true), +then asynchronously generates summary indexes for the provided documents. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [GenerateSummaryPayload](#generatesummarypayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Summary generation started successfully | +| 400 | Invalid request or dataset configuration | +| 403 | Permission denied | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/documents/metadata + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| payload | body | | Yes | [MetadataOperationData](#metadataoperationdata) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/status/{action}/batch + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | | Yes | string | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Description + +Get document details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| metadata | query | Metadata inclusion (all/only/without) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document retrieved successfully | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/download + +#### GET +##### Description + +Get a signed download URL for a dataset document's original uploaded file + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/indexing-estimate + +#### GET +##### Description + +Estimate document indexing cost + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Indexing estimate calculated successfully | +| 400 | Document already finished | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/indexing-status + +#### GET +##### Description + +Get document indexing status + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Indexing status retrieved successfully | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/metadata + +#### PUT +##### Description + +Update document metadata + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DocumentMetadataUpdatePayload](#documentmetadataupdatepayload) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document metadata updated successfully | +| 403 | Permission denied | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/notion/sync + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/pipeline-execution-log + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/processing/pause + +#### PATCH +##### Summary + +pause document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/processing/resume + +#### PATCH +##### Summary + +recover document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/processing/{action} + +#### PATCH +##### Description + +Update document processing status (pause/resume) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | Action to perform (pause/resume) | Yes | string | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Processing status updated successfully | +| 400 | Invalid action | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/rename + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| payload | body | | Yes | [DocumentRenamePayload](#documentrenamepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Document renamed successfully | [DocumentResponse](#documentresponse) | + +### /datasets/{dataset_id}/documents/{document_id}/segment + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| payload | body | | Yes | [SegmentCreatePayload](#segmentcreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/segment/{action} + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | | Yes | string | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/segments + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/segments/batch_import + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| payload | body | | Yes | [BatchImportPayload](#batchimportpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/segments/{segment_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | +| payload | body | | Yes | [SegmentUpdatePayload](#segmentupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | +| payload | body | | Yes | [ChildChunkCreatePayload](#childchunkcreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks/{child_chunk_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| child_chunk_id | path | | Yes | string | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| child_chunk_id | path | | Yes | string | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | +| payload | body | | Yes | [ChildChunkUpdatePayload](#childchunkupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/summary-status + +#### GET +##### Summary + +Get summary index generation status for a document + +##### Description + +Get summary index generation status for a document +Returns: +- total_segments: Total number of segments in the document +- summary_status: Dictionary with status counts + - completed: Number of summaries completed + - generating: Number of summaries being generated + - error: Number of summaries with errors + - not_started: Number of segments without summary records +- summaries: List of summary records with status and content preview + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Summary status retrieved successfully | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/website-sync + +#### GET +##### Summary + +sync website document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/error-docs + +#### GET +##### Description + +Get dataset error documents + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Error documents retrieved successfully | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/external-hit-testing + +#### POST +##### Description + +Test external knowledge retrieval for dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ExternalHitTestingPayload](#externalhittestingpayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | External hit testing completed successfully | +| 400 | Invalid parameters | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/hit-testing + +#### POST +##### Description + +Test dataset knowledge retrieval + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HitTestingPayload](#hittestingpayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Hit testing completed successfully | [HitTestingResponse](#hittestingresponse) | +| 400 | Invalid parameters | | +| 404 | Dataset not found | | + +### /datasets/{dataset_id}/indexing-status + +#### GET +##### Description + +Get dataset indexing status + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Indexing status retrieved successfully | + +### /datasets/{dataset_id}/metadata + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| payload | body | | Yes | [MetadataArgs](#metadataargs) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/metadata/built-in/{action} + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | | Yes | string | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/metadata/{metadata_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| metadata_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| metadata_id | path | | Yes | string | +| payload | body | | Yes | [MetadataUpdatePayload](#metadataupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/notion/sync + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/permission-part-users + +#### GET +##### Description + +Get dataset permission user list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Permission users retrieved successfully | +| 403 | Permission denied | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/queries + +#### GET +##### Description + +Get dataset query history + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Query history retrieved successfully | [DatasetQueryDetail](#datasetquerydetail) | + +### /datasets/{dataset_id}/related-apps + +#### GET +##### Description + +Get applications related to dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Related apps retrieved successfully | [RelatedAppList](#relatedapplist) | + +### /datasets/{dataset_id}/retry + +#### POST +##### Summary + +retry document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| payload | body | | Yes | [DocumentRetryPayload](#documentretrypayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/use-check + +#### GET +##### Description + +Check if dataset is in use + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Dataset use status retrieved successfully | + +### /datasets/{resource_id}/api-keys + +#### GET +##### Summary + +Get all API keys for a dataset + +##### Description + +Get all API keys for a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| resource_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | API keys retrieved successfully | [ApiKeyList](#apikeylist) | + +#### POST +##### Summary + +Create a new API key for a dataset + +##### Description + +Create a new API key for a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| resource_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | API key created successfully | [ApiKeyItem](#apikeyitem) | +| 400 | Maximum keys exceeded | | + +### /datasets/{resource_id}/api-keys/{api_key_id} + +#### DELETE +##### Summary + +Delete an API key for a dataset + +##### Description + +Delete an API key for a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| api_key_id | path | API key ID | Yes | string | +| resource_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | API key deleted successfully | + +### /email-code-login + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EmailPayload](#emailpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /email-code-login/validity + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EmailCodeLoginPayload](#emailcodeloginpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /email-register + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /email-register/send-email + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /email-register/validity + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /explore/apps + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| language | query | Language code for recommended app localization | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [RecommendedAppListResponse](#recommendedapplistresponse) | + +### /explore/apps/{app_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /features + +#### GET +##### Summary + +Get feature configuration for current tenant + +##### Description + +Get feature configuration for current tenant + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [FeatureResponse](#featureresponse) | + +### /files/support-type + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /files/upload + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [UploadConfig](#uploadconfig) | + +#### POST +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | File uploaded successfully | [FileResponse](#fileresponse) | + +### /files/{file_id}/preview + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| file_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /forgot-password + +#### POST +##### Description + +Send password reset email + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ForgotPasswordSendPayload](#forgotpasswordsendpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Email sent successfully | [ForgotPasswordEmailResponse](#forgotpasswordemailresponse) | +| 400 | Invalid email or rate limit exceeded | | + +### /forgot-password/resets + +#### POST +##### Description + +Reset password with verification token + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ForgotPasswordResetPayload](#forgotpasswordresetpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Password reset successfully | [ForgotPasswordResetResponse](#forgotpasswordresetresponse) | +| 400 | Invalid token or password mismatch | | + +### /forgot-password/validity + +#### POST +##### Description + +Verify password reset code + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ForgotPasswordCheckPayload](#forgotpasswordcheckpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Code verified successfully | [ForgotPasswordCheckResponse](#forgotpasswordcheckresponse) | +| 400 | Invalid code or token | | + +### /form/human_input/{form_token} + +#### GET +##### Summary + +Get human input form definition by form token + +##### Description + +GET /console/api/form/human_input/ + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| form_token | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Summary + +Submit human input form by form token + +##### Description + +POST /console/api/form/human_input/ + +Request body: +{ + "inputs": { + "content": "User input content" + }, + "action": "Approve" +} + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| form_token | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /info + +#### POST +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [TenantInfoResponse](#tenantinforesponse) | + +### /installed-apps + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [InstalledAppListResponse](#installedapplistresponse) | + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/audio-to-text + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/chat-messages + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [ChatMessagePayload](#chatmessagepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/chat-messages/{task_id}/stop + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/completion-messages + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [CompletionMessageExplorePayload](#completionmessageexplorepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/completion-messages/{task_id}/stop + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/conversations + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [ConversationListQuery](#conversationlistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/conversations/{c_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | | Yes | string | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/conversations/{c_id}/name + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | | Yes | string | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [ConversationRenamePayload](#conversationrenamepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/conversations/{c_id}/pin + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | | Yes | string | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/conversations/{c_id}/unpin + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | | Yes | string | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/messages + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [MessageListQuery](#messagelistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/messages/{message_id}/feedbacks + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| message_id | path | | Yes | string | +| payload | body | | Yes | [MessageFeedbackPayload](#messagefeedbackpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/messages/{message_id}/more-like-this + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| message_id | path | | Yes | string | +| payload | body | | Yes | [MoreLikeThisQuery](#morelikethisquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/messages/{message_id}/suggested-questions + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| message_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/meta + +#### GET +##### Summary + +Get app meta + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/parameters + +#### GET +##### Summary + +Retrieve app parameters + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/saved-messages + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [SavedMessageListQuery](#savedmessagelistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [SavedMessageCreatePayload](#savedmessagecreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/saved-messages/{message_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| message_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/text-to-audio + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [TextToAudioPayload](#texttoaudiopayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/workflows/run + +#### POST +##### Summary + +Run workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [WorkflowRunPayload](#workflowrunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/workflows/tasks/{task_id}/stop + +#### POST +##### Summary + +Stop workflow task + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /instruction-generate + +#### POST +##### Description + +Generate instruction for workflow nodes or general use + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [InstructionGeneratePayload](#instructiongeneratepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Instruction generated successfully | +| 400 | Invalid request parameters or flow/workflow not found | +| 402 | Provider quota exceeded | + +### /instruction-generate/template + +#### POST +##### Description + +Get instruction generation template + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [InstructionTemplatePayload](#instructiontemplatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Template retrieved successfully | +| 400 | Invalid request parameters | + +### /login + +#### POST +##### Summary + +Authenticate user and login + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [LoginPayload](#loginpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /logout + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /mcp/oauth/callback + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /notification + +#### GET +##### Description + +Return the active in-product notification for the current user in their interface language (falls back to English if unavailable). The notification is NOT marked as seen here; call POST /notification/dismiss when the user explicitly closes the modal. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success — inspect should_show to decide whether to render the modal | +| 401 | Unauthorized | + +### /notification/dismiss + +#### POST +##### Description + +Mark a notification as dismissed for the current user. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 401 | Unauthorized | + +### /notion/pages/{page_id}/{page_type}/preview + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| page_id | path | | Yes | string | +| page_type | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| page_id | path | | Yes | string | +| page_type | path | | Yes | string | +| payload | body | | Yes | [NotionEstimatePayload](#notionestimatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /notion/pre-import/pages + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/authorize/{provider} + +#### GET +##### Description + +Handle OAuth callback and complete login process + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | OAuth provider name (github/google) | Yes | string | +| code | query | Authorization code from OAuth provider | No | string | +| state | query | Optional state parameter (used for invite token) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 302 | Redirect to console with access token | +| 400 | OAuth process failed | + +### /oauth/data-source/binding/{provider} + +#### GET +##### Description + +Bind OAuth data source with authorization code + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | Data source provider name (notion) | Yes | string | +| code | query | Authorization code from OAuth provider | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Data source binding success | [OAuthDataSourceBindingResponse](#oauthdatasourcebindingresponse) | +| 400 | Invalid provider or code | | + +### /oauth/data-source/callback/{provider} + +#### GET +##### Description + +Handle OAuth callback from data source provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | Data source provider name (notion) | Yes | string | +| code | query | Authorization code from OAuth provider | No | string | +| error | query | Error message from OAuth provider | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 302 | Redirect to console with result | +| 400 | Invalid provider | + +### /oauth/data-source/{provider} + +#### GET +##### Description + +Get OAuth authorization URL for data source provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | Data source provider name (notion) | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Authorization URL or internal setup success | [OAuthDataSourceResponse](#oauthdatasourceresponse) | +| 400 | Invalid provider | | +| 403 | Admin privileges required | | + +### /oauth/data-source/{provider}/{binding_id}/sync + +#### GET +##### Description + +Sync data from OAuth data source + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| binding_id | path | Data source binding ID | Yes | string | +| provider | path | Data source provider name (notion) | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Data source sync success | [OAuthDataSourceSyncResponse](#oauthdatasourcesyncresponse) | +| 400 | Invalid provider or sync failed | | + +### /oauth/login/{provider} + +#### GET +##### Description + +Initiate OAuth login process + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | OAuth provider name (github/google) | Yes | string | +| invite_token | query | Optional invitation token | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 302 | Redirect to OAuth authorization URL | +| 400 | Invalid provider | + +### /oauth/plugin/{provider_id}/datasource/callback + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/plugin/{provider_id}/datasource/get-authorization-url + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/plugin/{provider}/tool/authorization-url + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/plugin/{provider}/tool/callback + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/plugin/{provider}/trigger/callback + +#### GET +##### Summary + +Handle OAuth callback for trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/provider + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/provider/account + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/provider/authorize + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/provider/token + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipeline/customized/templates/{template_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| template_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| template_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| template_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipeline/dataset + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [RagPipelineDatasetImportPayload](#ragpipelinedatasetimportpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipeline/empty-dataset + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipeline/templates + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipeline/templates/{template_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| template_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/datasource-plugins + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/imports + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [RagPipelineImportPayload](#ragpipelineimportpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/imports/{import_id}/confirm + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| import_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/imports/{pipeline_id}/check-dependencies + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/recommended-plugins + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/transform/datasets/{dataset_id} + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/customized/publish + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [Payload](#payload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/exports + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflow-runs + +#### GET +##### Summary + +Get workflow run list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow runs retrieved successfully | [WorkflowRunPaginationResponse](#workflowrunpaginationresponse) | + +### /rag/pipelines/{pipeline_id}/workflow-runs/tasks/{task_id}/stop + +#### POST +##### Summary + +Stop workflow task + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflow-runs/{run_id} + +#### GET +##### Summary + +Get workflow run detail + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| run_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow run detail retrieved successfully | [WorkflowRunDetailResponse](#workflowrundetailresponse) | + +### /rag/pipelines/{pipeline_id}/workflow-runs/{run_id}/node-executions + +#### GET +##### Summary + +Get workflow run node execution list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| run_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Node executions retrieved successfully | [WorkflowRunNodeExecutionListResponse](#workflowrunnodeexecutionlistresponse) | + +### /rag/pipelines/{pipeline_id}/workflows + +#### GET +##### Summary + +Get published workflows + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/default-workflow-block-configs + +#### GET +##### Summary + +Get default block config + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/default-workflow-block-configs/{block_type} + +#### GET +##### Summary + +Get default block config + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| block_type | path | | Yes | string | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft + +#### GET +##### Summary + +Get draft rag pipeline's workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Summary + +Sync draft workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/datasource/nodes/{node_id}/run + +#### POST +##### Summary + +Run rag pipeline datasource + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceNodeRunPayload](#datasourcenoderunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/datasource/variables-inspect + +#### POST +##### Summary + +Set datasource variables + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceVariablesPayload](#datasourcevariablespayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Datasource variables set successfully | [WorkflowRunNodeExecutionResponse](#workflowrunnodeexecutionresponse) | + +### /rag/pipelines/{pipeline_id}/workflows/draft/environment-variables + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/iteration/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow iteration node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [NodeRunPayload](#noderunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/loop/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow loop node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [NodeRunPayload](#noderunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/nodes/{node_id}/last-run + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Node last run retrieved successfully | [WorkflowRunNodeExecutionResponse](#workflowrunnodeexecutionresponse) | + +### /rag/pipelines/{pipeline_id}/workflows/draft/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [NodeRunRequiredPayload](#noderunrequiredpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Node run started successfully | [WorkflowRunNodeExecutionResponse](#workflowrunnodeexecutionresponse) | + +### /rag/pipelines/{pipeline_id}/workflows/draft/nodes/{node_id}/variables + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/pre-processing/parameters + +#### GET +##### Summary + +Get first step parameters of rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/processing/parameters + +#### GET +##### Summary + +Get second step parameters of rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/run + +#### POST +##### Summary + +Run draft workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [DraftWorkflowRunPayload](#draftworkflowrunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/system-variables + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/variables + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/variables/{variable_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| variable_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| variable_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| variable_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/variables/{variable_id}/reset + +#### PUT +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| variable_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/publish + +#### GET +##### Summary + +Get published pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Summary + +Publish workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/published/datasource/nodes/{node_id}/preview + +#### POST +##### Summary + +Run datasource content preview + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [Parser](#parser) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/published/datasource/nodes/{node_id}/run + +#### POST +##### Summary + +Run rag pipeline datasource + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceNodeRunPayload](#datasourcenoderunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/published/pre-processing/parameters + +#### GET +##### Summary + +Get first step parameters of rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/published/processing/parameters + +#### GET +##### Summary + +Get second step parameters of rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/published/run + +#### POST +##### Summary + +Run published workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [PublishedWorkflowRunPayload](#publishedworkflowrunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/{workflow_id} + +#### DELETE +##### Summary + +Delete a published workflow version that is not currently active on the pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| workflow_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Summary + +Update workflow attributes + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| workflow_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/{workflow_id}/restore + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| workflow_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /refresh-token + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /remote-files/upload + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /remote-files/{url} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| url | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /reset-password + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EmailPayload](#emailpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rule-code-generate + +#### POST +##### Description + +Generate code rules using LLM + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [RuleCodeGeneratePayload](#rulecodegeneratepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Code rules generated successfully | +| 400 | Invalid request parameters | +| 402 | Provider quota exceeded | + +### /rule-generate + +#### POST +##### Description + +Generate rule configuration using LLM + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [RuleGeneratePayload](#rulegeneratepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Rule configuration generated successfully | +| 400 | Invalid request parameters | +| 402 | Provider quota exceeded | + +### /rule-structured-output-generate + +#### POST +##### Description + +Generate structured output rules using LLM + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [RuleStructuredOutputPayload](#rulestructuredoutputpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Structured output generated successfully | +| 400 | Invalid request parameters | +| 402 | Provider quota exceeded | + +### /spec/schema-definitions + +#### GET +##### Summary + +Get system JSON Schema definitions specification + +##### Description + +Used for frontend component type mapping + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /system-features + +#### GET +##### Summary + +Get system-wide feature configuration + +##### Description + +Get system-wide feature configuration +NOTE: This endpoint is unauthenticated by design, as it provides system features +data required for dashboard initialization. + +Authentication would create circular dependency (can't login without dashboard loading). + +Only non-sensitive configuration data should be returned by this endpoint. + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [SystemFeatureResponse](#systemfeatureresponse) | + +### /tag-bindings + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagBindingPayload](#tagbindingpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /tag-bindings/remove + +#### POST +##### Description + +Remove one or more tag bindings from a target. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagBindingRemovePayload](#tagbindingremovepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /tags + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| keyword | query | Search keyword for tag name. | No | string | +| type | query | Tag type filter. Can be "knowledge" or "app". | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ [TagResponse](#tagresponse) ] | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagBasePayload](#tagbasepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /tags/{tag_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| tag_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| tag_id | path | | Yes | string | +| payload | body | | Yes | [TagBasePayload](#tagbasepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /test/retrieval + +#### POST +##### Description + +Bedrock retrieval test (internal use only) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [BedrockRetrievalPayload](#bedrockretrievalpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Bedrock retrieval test completed | + +### /trial-apps/{app_id} + +#### GET +##### Summary + +Get app detail + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/audio-to-text + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/chat-messages + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| payload | body | | Yes | [ChatRequest](#chatrequest) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/completion-messages + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| payload | body | | Yes | [CompletionRequest](#completionrequest) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/datasets + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/messages/{message_id}/suggested-questions + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| message_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/parameters + +#### GET +##### Summary + +Retrieve app parameters + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/site + +#### GET +##### Summary + +Retrieve app site info + +##### Description + +Returns the site configuration for the application including theme, icons, and text. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/text-to-audio + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| payload | body | | Yes | [TextToSpeechRequest](#texttospeechrequest) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/workflows + +#### GET +##### Summary + +Get workflow detail + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/workflows/run + +#### POST +##### Summary + +Run workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| payload | body | | Yes | [WorkflowRunRequest](#workflowrunrequest) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/workflows/tasks/{task_id}/stop + +#### POST +##### Summary + +Stop workflow task + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /website/crawl + +#### POST +##### Description + +Crawl website content + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WebsiteCrawlPayload](#websitecrawlpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Website crawl initiated successfully | +| 400 | Invalid crawl parameters | + +### /website/crawl/status/{job_id} + +#### GET +##### Description + +Get website crawl status + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WebsiteCrawlStatusQuery](#websitecrawlstatusquery) | +| job_id | path | Crawl job ID | Yes | string | +| provider | query | Crawl provider (firecrawl/watercrawl/jinareader) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Crawl status retrieved successfully | +| 400 | Invalid provider | +| 404 | Crawl job not found | + +### /workflow/{workflow_run_id}/events + +#### GET +##### Summary + +Get workflow execution events stream after resume + +##### Description + +GET /console/api/workflow//events + +Returns Server-Sent Events stream. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| workflow_run_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workflow/{workflow_run_id}/pause-details + +#### GET +##### Summary + +Get workflow pause details + +##### Description + +Get workflow pause details +GET /console/api/workflow//pause-details + +Returns information about why and where the workflow is paused. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| workflow_run_id | path | Workflow run ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow pause details retrieved successfully | [WorkflowPauseDetailsResponse](#workflowpausedetailsresponse) | +| 404 | Workflow run not found | | + +### /workspaces + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current + +#### POST +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [TenantInfoResponse](#tenantinforesponse) | + +### /workspaces/current/agent-provider/{provider_name} + +#### GET +##### Description + +Get specific agent provider details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_name | path | Agent provider name | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | object | + +### /workspaces/current/agent-providers + +#### GET +##### Description + +Get list of available agent providers + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ object ] | + +### /workspaces/current/dataset-operators + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [AccountWithRoleList](#accountwithrolelist) | + +### /workspaces/current/default-model + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserGetDefault](#parsergetdefault) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPostDefault](#parserpostdefault) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/endpoints + +#### POST +##### Description + +Create a new plugin endpoint + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointCreatePayload](#endpointcreatepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint created successfully | [EndpointCreateResponse](#endpointcreateresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/endpoints/create + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for creating a plugin endpoint. Use POST /workspaces/current/endpoints instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointCreatePayload](#endpointcreatepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint created successfully | [EndpointCreateResponse](#endpointcreateresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/endpoints/delete + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for deleting a plugin endpoint. Use DELETE /workspaces/current/endpoints/{id} instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointIdPayload](#endpointidpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint deleted successfully | [EndpointDeleteResponse](#endpointdeleteresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/endpoints/disable + +#### POST +##### Description + +Disable a plugin endpoint + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointIdPayload](#endpointidpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint disabled successfully | [EndpointDisableResponse](#endpointdisableresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/endpoints/enable + +#### POST +##### Description + +Enable a plugin endpoint + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointIdPayload](#endpointidpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint enabled successfully | [EndpointEnableResponse](#endpointenableresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/endpoints/list + +#### GET +##### Description + +List plugin endpoints with pagination + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointListQuery](#endpointlistquery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [EndpointListResponse](#endpointlistresponse) | + +### /workspaces/current/endpoints/list/plugin + +#### GET +##### Description + +List endpoints for a specific plugin + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointListForPluginQuery](#endpointlistforpluginquery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [PluginEndpointListResponse](#pluginendpointlistresponse) | + +### /workspaces/current/endpoints/update + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for updating a plugin endpoint. Use PATCH /workspaces/current/endpoints/{id} instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [LegacyEndpointUpdatePayload](#legacyendpointupdatepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint updated successfully | [EndpointUpdateResponse](#endpointupdateresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/endpoints/{id} + +#### DELETE +##### Description + +Delete a plugin endpoint + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| id | path | Endpoint ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint deleted successfully | [EndpointDeleteResponse](#endpointdeleteresponse) | +| 403 | Admin privileges required | | + +#### PATCH +##### Description + +Update a plugin endpoint + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointUpdatePayload](#endpointupdatepayload) | +| id | path | Endpoint ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint updated successfully | [EndpointUpdateResponse](#endpointupdateresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/members + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [AccountWithRoleList](#accountwithrolelist) | + +### /workspaces/current/members/invite-email + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MemberInvitePayload](#memberinvitepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/members/owner-transfer-check + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [OwnerTransferCheckPayload](#ownertransfercheckpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/members/send-owner-transfer-confirm-email + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [OwnerTransferEmailPayload](#ownertransferemailpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/members/{member_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| member_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/members/{member_id}/owner-transfer + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| member_id | path | | Yes | string | +| payload | body | | Yes | [OwnerTransferPayload](#ownertransferpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/members/{member_id}/update-role + +#### PUT +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| member_id | path | | Yes | string | +| payload | body | | Yes | [MemberRoleUpdatePayload](#memberroleupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserModelList](#parsermodellist) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/checkout-url + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/credentials + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCredentialDelete](#parsercredentialdelete) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCredentialId](#parsercredentialid) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCredentialCreate](#parsercredentialcreate) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PUT +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCredentialUpdate](#parsercredentialupdate) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/credentials/switch + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCredentialSwitch](#parsercredentialswitch) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/credentials/validate + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCredentialValidate](#parsercredentialvalidate) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserDeleteModels](#parserdeletemodels) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserPostModels](#parserpostmodels) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/credentials + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserDeleteCredential](#parserdeletecredential) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserGetCredentials](#parsergetcredentials) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCreateCredential](#parsercreatecredential) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PUT +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserUpdateCredential](#parserupdatecredential) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/credentials/switch + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserSwitch](#parserswitch) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/credentials/validate + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserValidate](#parservalidate) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/disable + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserDeleteModels](#parserdeletemodels) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/enable + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserDeleteModels](#parserdeletemodels) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/load-balancing-configs/credentials-validate + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [LoadBalancingCredentialPayload](#loadbalancingcredentialpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/load-balancing-configs/{config_id}/credentials-validate + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| config_id | path | | Yes | string | +| provider | path | | Yes | string | +| payload | body | | Yes | [LoadBalancingCredentialPayload](#loadbalancingcredentialpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/parameter-rules + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserParameter](#parserparameter) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/preferred-provider-type + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserPreferredProviderType](#parserpreferredprovidertype) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/models/model-types/{model_type} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| model_type | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/permission + +#### GET +##### Summary + +Get workspace permission settings + +##### Description + +Returns permission flags that control workspace features like member invitations and owner transfer. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/asset + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserAsset](#parserasset) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/debugging-key + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/fetch-manifest + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPluginIdentifierQuery](#parserpluginidentifierquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/icon + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserIcon](#parsericon) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/install/github + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserGithubInstall](#parsergithubinstall) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/install/marketplace + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPluginIdentifiers](#parserpluginidentifiers) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/install/pkg + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPluginIdentifiers](#parserpluginidentifiers) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/list + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserList](#parserlist) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/list/installations/ids + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserLatest](#parserlatest) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/list/latest-versions + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserLatest](#parserlatest) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/marketplace/pkg + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPluginIdentifierQuery](#parserpluginidentifierquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/parameters/dynamic-options + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserDynamicOptions](#parserdynamicoptions) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/parameters/dynamic-options-with-credentials + +#### POST +##### Summary + +Fetch dynamic options using credentials directly (for edit mode) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserDynamicOptionsWithCredentials](#parserdynamicoptionswithcredentials) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/permission/change + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPermissionChange](#parserpermissionchange) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/permission/fetch + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/preferences/autoupgrade/exclude + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserExcludePlugin](#parserexcludeplugin) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/preferences/change + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPreferencesChange](#parserpreferenceschange) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/preferences/fetch + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/readme + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserReadme](#parserreadme) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/tasks + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserTasks](#parsertasks) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/tasks/delete_all + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/tasks/{task_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/tasks/{task_id}/delete + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/tasks/{task_id}/delete/{identifier} + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| identifier | path | | Yes | string | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/uninstall + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserUninstall](#parseruninstall) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/upgrade/github + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserGithubUpgrade](#parsergithubupgrade) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/upgrade/marketplace + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserMarketplaceUpgrade](#parsermarketplaceupgrade) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/upload/bundle + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/upload/github + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserGithubUpload](#parsergithubupload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/upload/pkg + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-labels + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/add + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ApiToolProviderAddPayload](#apitoolprovideraddpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/delete + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ApiToolProviderDeletePayload](#apitoolproviderdeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/get + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/remote + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/schema + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ApiToolSchemaPayload](#apitoolschemapayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/test/pre + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ApiToolTestPayload](#apitooltestpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/tools + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/update + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ApiToolProviderUpdatePayload](#apitoolproviderupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/add + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [BuiltinToolAddPayload](#builtintooladdpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/credential/info + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/credential/schema/{credential_type} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| credential_type | path | | Yes | string | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/credentials + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/default-credential + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [BuiltinProviderDefaultCredentialPayload](#builtinproviderdefaultcredentialpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/delete + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [BuiltinToolCredentialDeletePayload](#builtintoolcredentialdeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/icon + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/info + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/oauth/client-schema + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/oauth/custom-client + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ToolOAuthCustomClientPayload](#tooloauthcustomclientpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/tools + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/update + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [BuiltinToolUpdatePayload](#builtintoolupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/mcp + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MCPProviderDeletePayload](#mcpproviderdeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MCPProviderCreatePayload](#mcpprovidercreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PUT +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MCPProviderUpdatePayload](#mcpproviderupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/mcp/auth + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MCPAuthPayload](#mcpauthpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/mcp/tools/{provider_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/mcp/update/{provider_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/workflow/create + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowToolCreatePayload](#workflowtoolcreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/workflow/delete + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowToolDeletePayload](#workflowtooldeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/workflow/get + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/workflow/tools + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/workflow/update + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowToolUpdatePayload](#workflowtoolupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-providers + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tools/api + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tools/builtin + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tools/mcp + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tools/workflow + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/icon + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/info + +#### GET +##### Summary + +Get info for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/oauth/client + +#### DELETE +##### Summary + +Remove custom OAuth client configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Summary + +Get OAuth client configuration for a provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Summary + +Configure custom OAuth client for a provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [TriggerOAuthClientPayload](#triggeroauthclientpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/builder/build/{subscription_builder_id} + +#### POST +##### Summary + +Build a subscription instance for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| subscription_builder_id | path | | Yes | string | +| payload | body | | Yes | [TriggerSubscriptionBuilderUpdatePayload](#triggersubscriptionbuilderupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/builder/create + +#### POST +##### Summary + +Add a new subscription instance for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [TriggerSubscriptionBuilderCreatePayload](#triggersubscriptionbuildercreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/builder/logs/{subscription_builder_id} + +#### GET +##### Summary + +Get the request logs for a subscription instance for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| subscription_builder_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/builder/update/{subscription_builder_id} + +#### POST +##### Summary + +Update a subscription instance for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| subscription_builder_id | path | | Yes | string | +| payload | body | | Yes | [TriggerSubscriptionBuilderUpdatePayload](#triggersubscriptionbuilderupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/builder/verify-and-update/{subscription_builder_id} + +#### POST +##### Summary + +Verify and update a subscription instance for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| subscription_builder_id | path | | Yes | string | +| payload | body | | Yes | [TriggerSubscriptionBuilderVerifyPayload](#triggersubscriptionbuilderverifypayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/builder/{subscription_builder_id} + +#### GET +##### Summary + +Get a subscription instance for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| subscription_builder_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/list + +#### GET +##### Summary + +List all trigger subscriptions for the current tenant's provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/oauth/authorize + +#### GET +##### Summary + +Initiate OAuth authorization flow for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/verify/{subscription_id} + +#### POST +##### Summary + +Verify credentials for an existing subscription (edit mode only) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| subscription_id | path | | Yes | string | +| payload | body | | Yes | [TriggerSubscriptionBuilderVerifyPayload](#triggersubscriptionbuilderverifypayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{subscription_id}/subscriptions/delete + +#### POST +##### Summary + +Delete a subscription instance + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| subscription_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{subscription_id}/subscriptions/update + +#### POST +##### Summary + +Update a subscription instance + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| subscription_id | path | | Yes | string | +| payload | body | | Yes | [TriggerSubscriptionBuilderUpdatePayload](#triggersubscriptionbuilderupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/triggers + +#### GET +##### Summary + +List all trigger providers for the current tenant + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/custom-config + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkspaceCustomConfigPayload](#workspacecustomconfigpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/custom-config/webapp-logo/upload + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/info + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkspaceInfoPayload](#workspaceinfopayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/switch + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [SwitchWorkspacePayload](#switchworkspacepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/{tenant_id}/model-providers/{provider}/{icon_type}/{lang} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| icon_type | path | | Yes | string | +| lang | path | | Yes | string | +| provider | path | | Yes | string | +| tenant_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +--- +## default +Default namespace + +### /explore/banners + +#### GET +##### Summary + +Get banner list + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +--- +### Models + +#### APIBasedExtensionListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| APIBasedExtensionListResponse | array | | | + +#### APIBasedExtensionPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| api_endpoint | string | API endpoint URL | Yes | +| api_key | string | API key for authentication | Yes | +| name | string | Extension name | Yes | + +#### APIBasedExtensionResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| api_endpoint | string | | Yes | +| api_key | string | | Yes | +| created_at | | | No | +| id | string | | Yes | +| name | string | | Yes | + +#### Account + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| avatar | | | No | +| created_at | | | No | +| email | string | | Yes | +| id | string | | Yes | +| interface_language | | | No | +| interface_theme | | | No | +| is_password_set | boolean | | Yes | +| last_login_at | | | No | +| last_login_ip | | | No | +| name | string | | Yes | +| timezone | | | No | + +#### AccountAvatarPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| avatar | string | | Yes | + +#### AccountAvatarQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| avatar | string | Avatar file ID | Yes | + +#### AccountDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| token | string | | Yes | + +#### AccountDeletionFeedbackPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| feedback | string | | Yes | + +#### AccountInitPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| interface_language | string | | Yes | +| invitation_code | | | No | +| timezone | string | | Yes | + +#### AccountIntegrateListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [AccountIntegrateResponse](#accountintegrateresponse) ] | | Yes | + +#### AccountIntegrateResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| is_bound | boolean | | Yes | +| link | | | No | +| provider | string | | Yes | + +#### AccountInterfaceLanguagePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| interface_language | string | | Yes | + +#### AccountInterfaceThemePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| interface_theme | string | *Enum:* `"dark"`, `"light"` | Yes | + +#### AccountNamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | + +#### AccountPasswordPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_password | string | | Yes | +| password | | | No | +| repeat_new_password | string | | Yes | + +#### AccountTimezonePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| timezone | string | | Yes | + +#### AccountWithRole + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| avatar | | | No | +| created_at | | | No | +| email | string | | Yes | +| id | string | | Yes | +| last_active_at | | | No | +| last_login_at | | | No | +| name | string | | Yes | +| role | string | | Yes | +| status | string | | Yes | + +#### AccountWithRoleList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| accounts | [ [AccountWithRole](#accountwithrole) ] | | Yes | + +#### ActivateCheckQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | | | No | +| token | string | | Yes | +| workspace_id | | | No | + +#### ActivatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | | | No | +| interface_language | string | | Yes | +| name | string | | Yes | +| timezone | string | | Yes | +| token | string | | Yes | +| workspace_id | | | No | + +#### ActivationCheckResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | | Activation data if valid | No | +| is_valid | boolean | Whether token is valid | Yes | + +#### ActivationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Operation result | Yes | + +#### AdvancedChatWorkflowRunForListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | | No | +| created_at | | | No | +| created_by_account | | | No | +| elapsed_time | | | No | +| exceptions_count | | | No | +| finished_at | | | No | +| id | string | | Yes | +| message_id | | | No | +| retry_index | | | No | +| status | | | No | +| total_steps | | | No | +| total_tokens | | | No | +| version | | | No | + +#### AdvancedChatWorkflowRunPaginationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [AdvancedChatWorkflowRunForListResponse](#advancedchatworkflowrunforlistresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | + +#### AdvancedChatWorkflowRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | | No | +| files | | | No | +| inputs | | | No | +| parent_message_id | | | No | +| query | string | | No | + +#### AdvancedPromptTemplateQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_mode | string | Application mode | Yes | +| has_context | string | Whether has context | No | +| model_mode | string | Model mode | Yes | +| model_name | string | Model name | Yes | + +#### AgentLogQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | Conversation UUID | Yes | +| message_id | string | Message UUID | Yes | + +#### AgentThought + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| chain_id | | | No | +| created_at | | | No | +| files | [ string ] | | Yes | +| id | string | | Yes | +| message_chain_id | | | No | +| message_id | string | | Yes | +| observation | | | No | +| position | integer | | Yes | +| thought | | | No | +| tool | | | No | +| tool_input | | | No | +| tool_labels | [JSONValue](#jsonvalue) | | Yes | + +#### Annotation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| created_at | | | No | +| hit_count | | | No | +| id | string | | Yes | +| question | | | No | + +#### AnnotationCountResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| count | integer | Number of annotations | Yes | + +#### AnnotationExportList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [Annotation](#annotation) ] | | Yes | + +#### AnnotationFilePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | string | Message ID | Yes | + +#### AnnotationHitHistory + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| annotation_content | | | No | +| annotation_question | | | No | +| created_at | | | No | +| id | string | | Yes | +| question | | | No | +| score | | | No | +| source | | | No | + +#### AnnotationHitHistoryList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [AnnotationHitHistory](#annotationhithistory) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### AnnotationList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [Annotation](#annotation) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### AnnotationListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword | string | Search keyword | No | +| limit | integer | Page size | No | +| page | integer | Page number | No | + +#### AnnotationReplyPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| embedding_model_name | string | Embedding model name | Yes | +| embedding_provider_name | string | Embedding provider name | Yes | +| score_threshold | number | Score threshold for annotation matching | Yes | + +#### AnnotationReplyStatusQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| action | string | *Enum:* `"disable"`, `"enable"` | Yes | + +#### AnnotationSettingUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| score_threshold | number | Score threshold | Yes | + +#### ApiKeyAuthBindingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| category | string | | Yes | +| credentials | object | | Yes | +| provider | string | | Yes | + +#### ApiKeyItem + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| id | string | | Yes | +| last_used_at | | | No | +| token | string | | Yes | +| type | string | | Yes | + +#### ApiKeyList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [ApiKeyItem](#apikeyitem) ] | | Yes | + +#### ApiProviderSchemaType + +Enum class for api provider schema type. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ApiProviderSchemaType | string | Enum class for api provider schema type. | | + +#### ApiToolProviderAddPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| custom_disclaimer | string | | No | +| icon | object | | Yes | +| labels | | | No | +| privacy_policy | | | No | +| provider | string | | Yes | +| schema | string | | Yes | +| schema_type | [ApiProviderSchemaType](#apiproviderschematype) | | Yes | + +#### ApiToolProviderDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| provider | string | | Yes | + +#### ApiToolProviderUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| custom_disclaimer | string | | No | +| icon | object | | Yes | +| labels | | | No | +| original_provider | string | | Yes | +| privacy_policy | | | No | +| provider | string | | Yes | +| schema | string | | Yes | +| schema_type | [ApiProviderSchemaType](#apiproviderschematype) | | Yes | + +#### ApiToolSchemaPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| schema | string | | Yes | + +#### ApiToolTestPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| parameters | object | | Yes | +| provider_name | | | No | +| schema | string | | Yes | +| schema_type | [ApiProviderSchemaType](#apiproviderschematype) | | Yes | +| tool_name | string | | Yes | + +#### AppApiStatusPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enable_api | boolean | Enable or disable API | Yes | + +#### AppDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| access_mode | | | No | +| app_model_config | | | No | +| created_at | | | No | +| created_by | | | No | +| description | | | No | +| enable_api | boolean | | Yes | +| enable_site | boolean | | Yes | +| icon | | | No | +| icon_background | | | No | +| id | string | | Yes | +| mode_compatible_with_agent | string | | Yes | +| name | string | | Yes | +| tags | [ [Tag](#tag) ] | | No | +| tracing | | | No | +| updated_at | | | No | +| updated_by | | | No | +| use_icon_as_answer_icon | | | No | +| workflow | | | No | + +#### AppDetailKernel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| icon | string | | No | +| icon_background | string | | No | +| icon_type | string | | No | +| icon_url | object | | No | +| id | string | | No | +| mode | string | | No | +| name | string | | No | + +#### AppDetailWithSite + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| access_mode | | | No | +| api_base_url | | | No | +| app_model_config | | | No | +| created_at | | | No | +| created_by | | | No | +| deleted_tools | [ [DeletedTool](#deletedtool) ] | | No | +| description | | | No | +| enable_api | boolean | | Yes | +| enable_site | boolean | | Yes | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| id | string | | Yes | +| max_active_requests | | | No | +| mode_compatible_with_agent | string | | Yes | +| name | string | | Yes | +| site | | | No | +| tags | [ [Tag](#tag) ] | | No | +| tracing | | | No | +| updated_at | | | No | +| updated_by | | | No | +| use_icon_as_answer_icon | | | No | +| workflow | | | No | + +#### AppExportQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| include_secret | boolean | Include secrets in export | No | +| workflow_id | | Specific workflow ID to export | No | + +#### AppExportResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | string | | Yes | + +#### AppIconPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| icon | | Icon data | No | +| icon_background | | Icon background color | No | +| icon_type | | Icon type | No | + +#### AppImportPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_id | | | No | +| description | | | No | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| mode | string | Import mode | Yes | +| name | | | No | +| yaml_content | | | No | +| yaml_url | | | No | + +#### AppListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| is_created_by_me | | Filter by creator | No | +| limit | integer | Page size (1-100) | No | +| mode | string | App mode filter
*Enum:* `"advanced-chat"`, `"agent-chat"`, `"all"`, `"channel"`, `"chat"`, `"completion"`, `"workflow"` | No | +| name | | Filter by app name | No | +| page | integer | Page number (1-99999) | No | +| tag_ids | | Filter by tag IDs | No | + +#### AppMCPServerResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| description | string | | Yes | +| id | string | | Yes | +| name | string | | Yes | +| parameters | | | Yes | +| server_code | string | | Yes | +| status | [AppMCPServerStatus](#appmcpserverstatus) | | Yes | +| updated_at | | | No | + +#### AppMCPServerStatus + +AppMCPServer Status Enum + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| AppMCPServerStatus | string | AppMCPServer Status Enum | | + +#### AppNamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | Name to check | Yes | + +#### AppPagination + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| has_next | boolean | | Yes | +| items | [ [AppPartial](#apppartial) ] | | Yes | +| page | integer | | Yes | +| per_page | integer | | Yes | +| total | integer | | Yes | + +#### AppPartial + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| access_mode | | | No | +| app_model_config | | | No | +| author_name | | | No | +| create_user_name | | | No | +| created_at | | | No | +| created_by | | | No | +| desc_or_prompt | | | No | +| has_draft_trigger | | | No | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| id | string | | Yes | +| max_active_requests | | | No | +| mode_compatible_with_agent | string | | Yes | +| name | string | | Yes | +| tags | [ [Tag](#tag) ] | | No | +| updated_at | | | No | +| updated_by | | | No | +| use_icon_as_answer_icon | | | No | +| workflow | | | No | + +#### AppSiteResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_id | string | | Yes | +| code | | | No | +| copyright | | | No | +| custom_disclaimer | | | No | +| customize_domain | | | No | +| customize_token_strategy | string | | Yes | +| default_language | string | | Yes | +| description | | | No | +| icon | | | No | +| icon_background | | | No | +| privacy_policy | | | No | +| prompt_public | boolean | | Yes | +| show_workflow_steps | boolean | | Yes | +| title | string | | Yes | +| use_icon_as_answer_icon | boolean | | Yes | + +#### AppSiteStatusPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enable_site | boolean | Enable or disable site | Yes | + +#### AppSiteUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| chat_color_theme | | | No | +| chat_color_theme_inverted | | | No | +| copyright | | | No | +| custom_disclaimer | | | No | +| customize_domain | | | No | +| customize_token_strategy | | | No | +| default_language | | | No | +| description | | | No | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| privacy_policy | | | No | +| prompt_public | | | No | +| show_workflow_steps | | | No | +| title | | | No | +| use_icon_as_answer_icon | | | No | + +#### AppTracePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enabled | boolean | Enable or disable tracing | Yes | +| tracing_provider | | Tracing provider | No | + +#### AudioTranscriptResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| text | string | Transcribed text from audio | Yes | + +#### BatchAddNotificationAccountsPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| notification_id | string | | Yes | +| user_email | [ string ] | List of account email addresses | Yes | + +#### BatchImportPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| upload_file_id | string | | Yes | + +#### BedrockRetrievalPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| knowledge_id | string | | Yes | +| query | string | | Yes | +| retrieval_setting | [BedrockRetrievalSetting](#bedrockretrievalsetting) | | Yes | + +#### BedrockRetrievalSetting + +Retrieval settings for Amazon Bedrock knowledge base queries. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| score_threshold | number | Minimum relevance score threshold | No | +| top_k | | Maximum number of results to retrieve | No | + +#### BuiltinProviderDefaultCredentialPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | + +#### BuiltinToolAddPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| name | | | No | +| type | [CredentialType](#credentialtype) | | Yes | + +#### BuiltinToolCredentialDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | + +#### BuiltinToolUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| credentials | | | No | +| name | | | No | + +#### ButtonStyle + +Button styles for user actions. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ButtonStyle | string | Button styles for user actions. | | + +#### ChangeEmailResetPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_email | string | | Yes | +| token | string | | Yes | + +#### ChangeEmailSendPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| language | | | No | +| phase | | | No | +| token | | | No | + +#### ChangeEmailValidityPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| email | string | | Yes | +| token | string | | Yes | + +#### ChatConversationQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| annotation_status | string | Annotation status filter
*Enum:* `"all"`, `"annotated"`, `"not_annotated"` | No | +| end | | End date (YYYY-MM-DD HH:MM) | No | +| keyword | | Search keyword | No | +| limit | integer | Page size (1-100) | No | +| page | integer | Page number | No | +| sort_by | string | Sort field and direction
*Enum:* `"-created_at"`, `"-updated_at"`, `"created_at"`, `"updated_at"` | No | +| start | | Start date (YYYY-MM-DD HH:MM) | No | + +#### ChatMessagePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | Conversation ID | No | +| files | | Uploaded files | No | +| inputs | object | | Yes | +| model_config | object | | Yes | +| parent_message_id | | Parent message ID | No | +| query | string | User query | Yes | +| response_mode | string | Response mode
*Enum:* `"blocking"`, `"streaming"` | No | +| retriever_from | string | Retriever source | No | + +#### ChatMessagesQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | Conversation ID | Yes | +| first_id | | First message ID for pagination | No | +| limit | integer | Number of messages to return (1-100) | No | + +#### ChatRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | | No | +| files | | | No | +| inputs | object | | Yes | +| parent_message_id | | | No | +| query | string | | Yes | +| retriever_from | string | | No | + +#### CheckDependenciesResult + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| leaked_dependencies | [ [PluginDependency](#plugindependency) ] | | No | + +#### CheckEmailUniquePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | + +#### ChildChunkBatchUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| chunks | [ [ChildChunkUpdateArgs](#childchunkupdateargs) ] | | Yes | + +#### ChildChunkCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | Yes | + +#### ChildChunkUpdateArgs + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | Yes | +| id | | | No | + +#### ChildChunkUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | Yes | + +#### CodeBasedExtensionResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | | Extension data | Yes | +| module | string | Module name | Yes | + +#### CompletionConversationQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| annotation_status | string | Annotation status filter
*Enum:* `"all"`, `"annotated"`, `"not_annotated"` | No | +| end | | End date (YYYY-MM-DD HH:MM) | No | +| keyword | | Search keyword | No | +| limit | integer | Page size (1-100) | No | +| page | integer | Page number | No | +| start | | Start date (YYYY-MM-DD HH:MM) | No | + +#### CompletionMessageExplorePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | +| query | string | | No | +| response_mode | | | No | +| retriever_from | string | | No | + +#### CompletionMessagePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | Uploaded files | No | +| inputs | object | | Yes | +| model_config | object | | Yes | +| query | string | Query text | No | +| response_mode | string | Response mode
*Enum:* `"blocking"`, `"streaming"` | No | +| retriever_from | string | Retriever source | No | + +#### CompletionRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | +| query | string | | No | +| response_mode | | | No | +| retriever_from | string | | No | + +#### ComplianceDownloadQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| doc_name | string | Compliance document name | Yes | + +#### Condition + +Condition detail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| comparison_operator | string | *Enum:* `"<"`, `"="`, `">"`, `"after"`, `"before"`, `"contains"`, `"empty"`, `"end with"`, `"in"`, `"is"`, `"is not"`, `"not contains"`, `"not empty"`, `"not in"`, `"start with"`, `"≠"`, `"≤"`, `"≥"` | Yes | +| name | string | | Yes | +| value | | | No | + +#### ConsoleDatasetListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ids | [ string ] | Filter by dataset IDs | No | +| include_all | boolean | Include all datasets | No | +| keyword | | Search keyword | No | +| limit | integer | Number of items per page | No | +| page | integer | Page number | No | +| tag_ids | [ string ] | Filter by tag IDs | No | + +#### Conversation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| admin_feedback_stats | | | No | +| annotation | | | No | +| created_at | | | No | +| first_message | | | No | +| from_account_id | | | No | +| from_account_name | | | No | +| from_end_user_id | | | No | +| from_end_user_session_id | | | No | +| from_source | string | | Yes | +| id | string | | Yes | +| model_config | | | No | +| read_at | | | No | +| status | string | | Yes | +| updated_at | | | No | +| user_feedback_stats | | | No | + +#### ConversationAnnotation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| account | | | No | +| content | string | | Yes | +| created_at | | | No | +| id | string | | Yes | +| question | | | No | + +#### ConversationAnnotationHitHistory + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| annotation_create_account | | | No | +| created_at | | | No | +| id | string | | Yes | + +#### ConversationDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| admin_feedback_stats | | | No | +| annotated | boolean | | Yes | +| created_at | | | No | +| from_account_id | | | No | +| from_end_user_id | | | No | +| from_source | string | | Yes | +| id | string | | Yes | +| introduction | | | No | +| message_count | integer | | Yes | +| model_config | | | No | +| status | string | | Yes | +| updated_at | | | No | +| user_feedback_stats | | | No | + +#### ConversationListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | | No | +| limit | integer | | No | +| pinned | | | No | + +#### ConversationMessageDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| first_message | | | No | +| from_account_id | | | No | +| from_end_user_id | | | No | +| from_source | string | | Yes | +| id | string | | Yes | +| model_config | | | No | +| status | string | | Yes | + +#### ConversationPagination + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| has_next | boolean | | Yes | +| items | [ [Conversation](#conversation) ] | | Yes | +| page | integer | | Yes | +| per_page | integer | | Yes | +| total | integer | | Yes | + +#### ConversationRenamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| auto_generate | boolean | | No | +| name | | | No | + +#### ConversationVariable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| id | string | | No | +| name | string | | No | +| value | object | | No | +| value_type | string | | No | + +#### ConversationVariableResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| description | | | No | +| id | string | | Yes | +| name | string | | Yes | +| updated_at | | | No | +| value | | | No | +| value_type | string | | Yes | + +#### ConversationVariableUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_variables | [ object ] | Conversation variables for the draft workflow | Yes | + +#### ConversationVariablesQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | Conversation ID to filter variables | Yes | + +#### ConversationWithSummary + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| admin_feedback_stats | | | No | +| annotated | boolean | | Yes | +| created_at | | | No | +| from_account_id | | | No | +| from_account_name | | | No | +| from_end_user_id | | | No | +| from_end_user_session_id | | | No | +| from_source | string | | Yes | +| id | string | | Yes | +| message_count | integer | | Yes | +| model_config | | | No | +| name | string | | Yes | +| read_at | | | No | +| status | string | | Yes | +| status_count | | | No | +| summary_or_query | string | | Yes | +| updated_at | | | No | +| user_feedback_stats | | | No | + +#### ConversationWithSummaryPagination + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| has_next | boolean | | Yes | +| items | [ [ConversationWithSummary](#conversationwithsummary) ] | | Yes | +| page | integer | | Yes | +| per_page | integer | | Yes | +| total | integer | | Yes | + +#### ConvertToWorkflowPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| name | | | No | + +#### CopyAppPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | Description for the copied app | No | +| icon | | Icon | No | +| icon_background | | Icon background color | No | +| icon_type | | Icon type | No | +| name | | Name for the copied app | No | + +#### CreateAnnotationPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| annotation_reply | | Annotation reply data | No | +| answer | | Answer text | No | +| content | | Content text | No | +| message_id | | Message ID | No | +| question | | Question text | No | + +#### CreateAppPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | App description (max 400 chars) | No | +| icon | | Icon | No | +| icon_background | | Icon background color | No | +| icon_type | | Icon type | No | +| mode | string | App mode
*Enum:* `"advanced-chat"`, `"agent-chat"`, `"chat"`, `"completion"`, `"workflow"` | Yes | +| name | string | App name | Yes | + +#### CredentialType + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| CredentialType | string | | | + +#### DataSource + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| info_list | [InfoList](#infolist) | | Yes | + +#### DataSourceIntegrate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| disabled | boolean | | No | +| id | string | | No | +| is_bound | boolean | | No | +| link | string | | No | +| provider | string | | No | +| source_info | [DataSourceIntegrateWorkspace](#datasourceintegrateworkspace) | | No | + +#### DataSourceIntegrateIcon + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| emoji | string | | No | +| type | string | | No | +| url | string | | No | + +#### DataSourceIntegrateList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [DataSourceIntegrate](#datasourceintegrate) ] | | No | + +#### DataSourceIntegratePage + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| page_icon | [DataSourceIntegrateIcon](#datasourceintegrateicon) | | No | +| page_id | string | | No | +| page_name | string | | No | +| parent_id | string | | No | +| type | string | | No | + +#### DataSourceIntegrateWorkspace + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| pages | [ [DataSourceIntegratePage](#datasourceintegratepage) ] | | No | +| total | integer | | No | +| workspace_icon | string | | No | +| workspace_id | string | | No | +| workspace_name | string | | No | + +#### DatasetAndDocumentResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| batch | string | | Yes | +| dataset | [DatasetResponse](#datasetresponse) | | Yes | +| documents | [ [DocumentResponse](#documentresponse) ] | | Yes | + +#### DatasetBase + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| created_by | string | | No | +| data_source_type | string | | No | +| description | string | | No | +| id | string | | No | +| indexing_technique | string | | No | +| name | string | | No | +| permission | string | | No | + +#### DatasetContent + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | No | +| content_type | string | | No | +| file_info | [DatasetFileInfo](#datasetfileinfo) | | No | + +#### DatasetCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| external_knowledge_api_id | | | No | +| external_knowledge_id | | | No | +| indexing_technique | | | No | +| name | string | | Yes | +| permission | | | No | +| provider | string | | No | + +#### DatasetDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_count | integer | | No | +| author_name | string | | No | +| built_in_field_enabled | boolean | | No | +| chunk_structure | string | | No | +| created_at | object | | No | +| created_by | string | | No | +| data_source_type | string | | No | +| description | string | | No | +| doc_form | string | | No | +| doc_metadata | [ [DatasetDocMetadata](#datasetdocmetadata) ] | | No | +| document_count | integer | | No | +| embedding_available | boolean | | No | +| embedding_model | string | | No | +| embedding_model_provider | string | | No | +| enable_api | boolean | | No | +| external_knowledge_info | [ExternalKnowledgeInfo](#externalknowledgeinfo) | | No | +| external_retrieval_model | [ExternalRetrievalModel](#externalretrievalmodel) | | No | +| icon_info | [DatasetIconInfo](#dataseticoninfo) | | No | +| id | string | | No | +| indexing_technique | string | | No | +| is_multimodal | boolean | | No | +| is_published | boolean | | No | +| name | string | | No | +| permission | string | | No | +| pipeline_id | string | | No | +| provider | string | | No | +| retrieval_model_dict | [DatasetRetrievalModel](#datasetretrievalmodel) | | No | +| runtime_mode | string | | No | +| summary_index_setting | [_AnonymousInlineModel_b1954337d565](#_anonymousinlinemodel_b1954337d565) | | No | +| tags | [ [Tag](#tag) ] | | No | +| total_available_documents | integer | | No | +| total_documents | integer | | No | +| updated_at | object | | No | +| updated_by | string | | No | +| word_count | integer | | No | + +#### DatasetDocMetadata + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | No | +| name | string | | No | +| type | string | | No | + +#### DatasetFileInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| extension | string | | No | +| id | string | | No | +| mime_type | string | | No | +| name | string | | No | +| size | integer | | No | +| source_url | string | | No | + +#### DatasetIconInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| icon | string | | No | +| icon_background | string | | No | +| icon_type | string | | No | +| icon_url | string | | No | + +#### DatasetKeywordSetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword_weight | number | | No | + +#### DatasetPermissionEnum + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| DatasetPermissionEnum | string | | | + +#### DatasetQueryDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| created_by | string | | No | +| created_by_role | string | | No | +| id | string | | No | +| queries | [DatasetContent](#datasetcontent) | | No | +| source | string | | No | +| source_app_id | string | | No | + +#### DatasetRerankingModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| reranking_model_name | string | | No | +| reranking_provider_name | string | | No | + +#### DatasetResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by | | | No | +| data_source_type | | | No | +| description | | | No | +| id | string | | Yes | +| indexing_technique | | | No | +| name | string | | Yes | +| permission | | | No | + +#### DatasetRetrievalModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| reranking_enable | boolean | | No | +| reranking_mode | string | | No | +| reranking_model | [DatasetRerankingModel](#datasetrerankingmodel) | | No | +| score_threshold | number | | No | +| score_threshold_enabled | boolean | | No | +| search_method | string | | No | +| top_k | integer | | No | +| weights | [DatasetWeightedScore](#datasetweightedscore) | | No | + +#### DatasetUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | | No | +| embedding_model | | | No | +| embedding_model_provider | | | No | +| external_knowledge_api_id | | | No | +| external_knowledge_id | | | No | +| external_retrieval_model | | | No | +| icon_info | | | No | +| indexing_technique | | | No | +| is_multimodal | | | No | +| name | | | No | +| partial_member_list | | | No | +| permission | | | No | +| retrieval_model | | | No | +| summary_index_setting | | | No | + +#### DatasetVectorSetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| embedding_model_name | string | | No | +| embedding_provider_name | string | | No | +| vector_weight | number | | No | + +#### DatasetWeightedScore + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword_setting | [DatasetKeywordSetting](#datasetkeywordsetting) | | No | +| vector_setting | [DatasetVectorSetting](#datasetvectorsetting) | | No | +| weight_type | string | | No | + +#### DatasourceCredentialDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | + +#### DatasourceCredentialPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| name | | | No | + +#### DatasourceCredentialUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| credentials | | | No | +| name | | | No | + +#### DatasourceCustomClientPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| client_params | | | No | +| enable_oauth_custom_client | | | No | + +#### DatasourceDefaultPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | + +#### DatasourceNodeRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | | | No | +| datasource_type | string | | Yes | +| inputs | object | | Yes | + +#### DatasourceUpdateNamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| name | string | | Yes | + +#### DatasourceVariablesPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| datasource_info | object | | Yes | +| datasource_type | string | | Yes | +| start_node_id | string | | Yes | +| start_node_title | string | | Yes | + +#### DebugPermission + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| DebugPermission | string | | | + +#### DefaultBlockConfigQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| q | | | No | + +#### DeletedTool + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| provider_id | string | | Yes | +| tool_name | string | | Yes | +| type | string | | Yes | + +#### DocumentBatchDownloadZipPayload + +Request payload for bulk downloading documents as a zip archive. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| document_ids | [ string (uuid) ] | | Yes | + +#### DocumentMetadataOperation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| document_id | string | | Yes | +| metadata_list | [ [MetadataDetail](#metadatadetail) ] | | Yes | +| partial_update | boolean | | No | + +#### DocumentMetadataResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | +| name | string | | Yes | +| type | string | | Yes | +| value | | | No | + +#### DocumentMetadataUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| doc_metadata | | | No | +| doc_type | | | No | + +#### DocumentRenamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | + +#### DocumentResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| archived | | | No | +| created_at | | | No | +| created_by | | | No | +| created_from | | | No | +| data_source_detail_dict | | | No | +| data_source_info_dict | | | No | +| data_source_type | | | No | +| dataset_process_rule_id | | | No | +| disabled_at | | | No | +| disabled_by | | | No | +| display_status | | | No | +| doc_form | | | No | +| doc_metadata_details | [ [DocumentMetadataResponse](#documentmetadataresponse) ] | | No | +| enabled | | | No | +| error | | | No | +| hit_count | | | No | +| id | string | | Yes | +| indexing_status | | | No | +| name | string | | Yes | +| need_summary | | | No | +| position | | | No | +| summary_index_status | | | No | +| tokens | | | No | +| word_count | | | No | + +#### DocumentRetryPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| document_ids | [ string ] | | Yes | + +#### DocumentWithSegmentsResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| archived | | | No | +| completed_segments | | | No | +| created_at | | | No | +| created_by | | | No | +| created_from | | | No | +| data_source_detail_dict | | | No | +| data_source_info_dict | | | No | +| data_source_type | | | No | +| dataset_process_rule_id | | | No | +| disabled_at | | | No | +| disabled_by | | | No | +| display_status | | | No | +| doc_form | | | No | +| doc_metadata_details | [ [DocumentMetadataResponse](#documentmetadataresponse) ] | | No | +| enabled | | | No | +| error | | | No | +| hit_count | | | No | +| id | string | | Yes | +| indexing_status | | | No | +| name | string | | Yes | +| need_summary | | | No | +| position | | | No | +| process_rule_dict | | | No | +| summary_index_status | | | No | +| tokens | | | No | +| total_segments | | | No | +| word_count | | | No | + +#### DraftWorkflowNodeRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | +| query | string | | No | + +#### DraftWorkflowRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| datasource_info_list | [ object ] | | Yes | +| datasource_type | string | | Yes | +| inputs | object | | Yes | +| start_node_id | string | | Yes | + +#### DraftWorkflowSyncPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_variables | | | No | +| environment_variables | | | No | +| features | | | No | +| graph | object | | Yes | +| hash | | | No | +| rag_pipeline_variables | | | No | + +#### DraftWorkflowTriggerRunAllPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| node_ids | [ string ] | | Yes | + +#### DraftWorkflowTriggerRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| node_id | string | | Yes | + +#### DraftWorkflowTriggerRunRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| node_id | string | Node ID | Yes | + +#### EducationActivatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| institution | string | | Yes | +| role | string | | Yes | +| token | string | | Yes | + +#### EducationAutocompleteQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keywords | string | | Yes | +| limit | integer | | No | +| page | integer | | No | + +#### EducationAutocompleteResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| curr_page | | | No | +| data | [ string ] | | No | +| has_next | | | No | + +#### EducationStatusResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| allow_refresh | | | No | +| expire_at | | | No | +| is_student | | | No | +| result | | | No | + +#### EducationVerifyResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| token | | | No | + +#### EmailCodeLoginPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| email | string | | Yes | +| language | | | No | +| token | string | | Yes | + +#### EmailPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| language | | | No | + +#### EmailRegisterResetPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_password | string | | Yes | +| password_confirm | string | | Yes | +| token | string | | Yes | + +#### EmailRegisterSendPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | Email address | Yes | +| language | | Language code | No | + +#### EmailRegisterValidityPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| email | string | | Yes | +| token | string | | Yes | + +#### EndpointCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | +| plugin_unique_identifier | string | | Yes | +| settings | object | | Yes | + +#### EndpointCreateResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| success | boolean | Operation success | Yes | + +#### EndpointDeleteResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| success | boolean | Operation success | Yes | + +#### EndpointDisableResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| success | boolean | Operation success | Yes | + +#### EndpointEnableResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| success | boolean | Operation success | Yes | + +#### EndpointIdPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| endpoint_id | string | | Yes | + +#### EndpointListForPluginQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| page | integer | | Yes | +| page_size | integer | | Yes | +| plugin_id | string | | Yes | + +#### EndpointListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| page | integer | | Yes | +| page_size | integer | | Yes | + +#### EndpointListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| endpoints | [ object ] | Endpoint information | Yes | + +#### EndpointUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | +| settings | object | | Yes | + +#### EndpointUpdateResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| success | boolean | Operation success | Yes | + +#### EnvironmentVariableUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| environment_variables | [ object ] | Environment variables for the draft workflow | Yes | + +#### ExecutionContentType + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ExecutionContentType | string | | | + +#### ExternalApiTemplateListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword | | Search keyword | No | +| limit | integer | Number of items per page | No | +| page | integer | Page number | No | + +#### ExternalDatasetCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | | No | +| external_knowledge_api_id | string | | Yes | +| external_knowledge_id | string | | Yes | +| external_retrieval_model | | | No | +| name | string | | Yes | + +#### ExternalHitTestingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| external_retrieval_model | | | No | +| metadata_filtering_conditions | | | No | +| query | string | | Yes | + +#### ExternalKnowledgeApiPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | +| settings | object | | Yes | + +#### ExternalKnowledgeInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| external_knowledge_api_endpoint | string | | No | +| external_knowledge_api_id | string | | No | +| external_knowledge_api_name | string | | No | +| external_knowledge_id | string | | No | + +#### ExternalRetrievalModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| score_threshold | number | | No | +| score_threshold_enabled | boolean | | No | +| top_k | integer | | No | + +#### FeatureResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| features | object | Feature configuration object | No | + +#### Feedback + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| from_account | | | No | +| from_end_user_id | | | No | +| from_source | string | | Yes | +| rating | string | | Yes | + +#### FeedbackExportQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| end_date | | End date (YYYY-MM-DD) | No | +| format | string | Export format
*Enum:* `"csv"`, `"json"` | No | +| from_source | | Filter by feedback source | No | +| has_comment | | Only include feedback with comments | No | +| rating | | Filter by rating | No | +| start_date | | Start date (YYYY-MM-DD) | No | + +#### FeedbackStat + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| dislike | integer | | Yes | +| like | integer | | Yes | + +#### FileInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| file_ids | [ string ] | | Yes | + +#### FileResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | | No | +| created_at | | | No | +| created_by | | | No | +| extension | | | No | +| file_key | | | No | +| id | string | | Yes | +| mime_type | | | No | +| name | string | | Yes | +| original_url | | | No | +| preview_url | | | No | +| size | integer | | Yes | +| source_url | | | No | +| tenant_id | | | No | +| user_id | | | No | + +#### ForgotPasswordCheckPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| email | string | | Yes | +| token | string | | Yes | + +#### ForgotPasswordCheckResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | Email address | Yes | +| is_valid | boolean | Whether code is valid | Yes | +| token | string | New reset token | Yes | + +#### ForgotPasswordEmailResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | | Error code if account not found | No | +| data | | Reset token | No | +| result | string | Operation result | Yes | + +#### ForgotPasswordResetPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_password | string | | Yes | +| password_confirm | string | | Yes | +| token | string | | Yes | + +#### ForgotPasswordResetResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Operation result | Yes | + +#### ForgotPasswordSendPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| language | | | No | + +#### FormInput + +Form input definition. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| default | | | No | +| output_variable_name | string | | Yes | +| type | [FormInputType](#forminputtype) | | Yes | + +#### FormInputDefault + +Default configuration for form inputs. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| selector | [ string ] | | No | +| type | [PlaceholderType](#placeholdertype) | | Yes | +| value | string | | No | + +#### FormInputType + +Form input types. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| FormInputType | string | Form input types. | | + +#### GenerateSummaryPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| document_list | [ string ] | | Yes | + +#### Github + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| github_plugin_unique_identifier | string | | Yes | +| package | string | | Yes | +| repo | string | | Yes | +| version | string | | Yes | + +#### HitTestingChildChunk + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| id | | | No | +| position | | | No | +| score | | | No | + +#### HitTestingDocument + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data_source_type | | | No | +| doc_metadata | | | No | +| doc_type | | | No | +| id | | | No | +| name | | | No | + +#### HitTestingFile + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| extension | | | No | +| id | | | No | +| mime_type | | | No | +| name | | | No | +| size | | | No | +| source_url | | | No | + +#### HitTestingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| attachment_ids | | | No | +| external_retrieval_model | | | No | +| query | string | | Yes | +| retrieval_model | | | No | + +#### HitTestingRecord + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| child_chunks | [ [HitTestingChildChunk](#hittestingchildchunk) ] | | No | +| files | [ [HitTestingFile](#hittestingfile) ] | | No | +| score | | | No | +| segment | | | No | +| summary | | | No | +| tsne_position | | | No | + +#### HitTestingResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| query | string | | Yes | +| records | [ [HitTestingRecord](#hittestingrecord) ] | | No | + +#### HitTestingSegment + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| answer | | | No | +| completed_at | | | No | +| content | | | No | +| created_at | | | No | +| created_by | | | No | +| disabled_at | | | No | +| disabled_by | | | No | +| document | | | No | +| document_id | | | No | +| enabled | | | No | +| error | | | No | +| hit_count | | | No | +| id | | | No | +| index_node_hash | | | No | +| index_node_id | | | No | +| indexing_at | | | No | +| keywords | [ string ] | | No | +| position | | | No | +| sign_content | | | No | +| status | | | No | +| stopped_at | | | No | +| tokens | | | No | +| word_count | | | No | + +#### HumanInputContent + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| form_definition | | | No | +| form_submission_data | | | No | +| submitted | boolean | | Yes | +| type | [ExecutionContentType](#executioncontenttype) | | No | +| workflow_run_id | string | | Yes | + +#### HumanInputDeliveryTestPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| delivery_method_id | string | Delivery method ID | Yes | +| inputs | object | Values used to fill missing upstream variables referenced in form_content | No | + +#### HumanInputFormDefinition + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| actions | [ [UserAction](#useraction) ] | | No | +| display_in_ui | boolean | | No | +| expiration_time | integer | | Yes | +| form_content | string | | Yes | +| form_id | string | | Yes | +| form_token | | | No | +| inputs | [ [FormInput](#forminput) ] | | No | +| node_id | string | | Yes | +| node_title | string | | Yes | +| resolved_default_values | object | | No | + +#### HumanInputFormPreviewPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| inputs | object | Values used to fill missing upstream variables referenced in form_content | No | + +#### HumanInputFormSubmissionData + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| action_id | string | | Yes | +| action_text | string | | Yes | +| node_id | string | | Yes | +| node_title | string | | Yes | +| rendered_content | string | | Yes | + +#### HumanInputFormSubmitPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| action | string | Selected action ID | Yes | +| form_inputs | object | Values the user provides for the form's own fields | Yes | +| inputs | object | Values used to fill missing upstream variables referenced in form_content | Yes | + +#### HumanInputPauseTypeResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| backstage_input_url | | | No | +| form_id | string | | Yes | +| type | string | | Yes | + +#### IconType + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| IconType | string | | | + +#### Import + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_id | | | No | +| app_mode | | | No | +| current_dsl_version | string | | No | +| error | string | | No | +| id | string | | Yes | +| imported_dsl_version | string | | No | +| status | [ImportStatus](#importstatus) | | Yes | + +#### ImportStatus + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ImportStatus | string | | | + +#### IncludeSecretQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| include_secret | string | | No | + +#### IndexingEstimatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| dataset_id | | | No | +| doc_form | string | | No | +| doc_language | string | | No | +| indexing_technique | string | | Yes | +| info_list | object | | Yes | +| process_rule | object | | Yes | + +#### InfoList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data_source_type | string | *Enum:* `"notion_import"`, `"upload_file"`, `"website_crawl"` | Yes | +| file_info_list | | | No | +| notion_info_list | | | No | +| website_info_list | | | No | + +#### Inner + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model | | | No | +| model_type | [ModelType](#modeltype) | | Yes | +| provider | | | No | + +#### InsertExploreAppPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_id | string | | Yes | +| can_trial | boolean | | No | +| category | string | | Yes | +| copyright | | | No | +| custom_disclaimer | | | No | +| desc | | | No | +| language | string | | Yes | +| position | integer | | Yes | +| privacy_policy | | | No | +| trial_limit | integer | | No | + +#### InsertExploreBannerPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| category | string | | Yes | +| description | string | | Yes | +| img-src | string | | Yes | +| language | string | | No | +| link | string | | Yes | +| sort | integer | | Yes | +| title | string | | Yes | + +#### InstallPermission + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| InstallPermission | string | | | + +#### InstalledAppCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_id | string | | Yes | + +#### InstalledAppInfoResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| id | string | | Yes | +| mode | | | No | +| name | | | No | +| use_icon_as_answer_icon | | | No | + +#### InstalledAppListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| installed_apps | [ [InstalledAppResponse](#installedappresponse) ] | | Yes | + +#### InstalledAppResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app | [InstalledAppInfoResponse](#installedappinforesponse) | | Yes | +| app_owner_tenant_id | string | | Yes | +| editable | boolean | | Yes | +| id | string | | Yes | +| is_pinned | boolean | | Yes | +| last_used_at | | | No | +| uninstallable | boolean | | Yes | + +#### InstalledAppUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| is_pinned | | | No | + +#### InstalledAppsListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_id | | App ID to filter by | No | + +#### InstructionGeneratePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| current | string | Current instruction text | No | +| flow_id | string | Workflow/Flow ID | Yes | +| ideal_output | string | Expected ideal output | No | +| instruction | string | Instruction for generation | Yes | +| language | string | Programming language (javascript/python) | No | +| model_config | [ModelConfig](#modelconfig) | Model configuration | Yes | +| node_id | string | Node ID for workflow context | No | + +#### InstructionTemplatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| type | string | Instruction template type | Yes | + +#### IterationNodeRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| inputs | | | No | + +#### JSONValue + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| JSONValue | | | | + +#### KnowledgeConfig + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data_source | | | No | +| doc_form | string | | No | +| doc_language | string | | No | +| duplicate | boolean | | No | +| embedding_model | | | No | +| embedding_model_provider | | | No | +| indexing_technique | string | *Enum:* `"economy"`, `"high_quality"` | Yes | +| is_multimodal | boolean | | No | +| name | | | No | +| original_document_id | | | No | +| process_rule | | | No | +| retrieval_model | | | No | +| summary_index_setting | | | No | + +#### LLMMode + +Enum class for large language model mode. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| LLMMode | string | Enum class for large language model mode. | | + +#### LangContentPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| body | string | | Yes | +| lang | string | Language tag: 'zh' \| 'en' \| 'jp' | Yes | +| subtitle | | | No | +| title | string | | Yes | +| title_pic_url | | | No | + +#### LegacyEndpointUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| endpoint_id | string | | Yes | +| name | string | | Yes | +| settings | object | | Yes | + +#### LoadBalancingCredentialPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### LoadBalancingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| configs | | | No | +| enabled | | | No | + +#### LoginPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| invite_token | | Invitation token | No | +| password | string | | Yes | +| remember_me | boolean | Remember me flag | No | + +#### LoopNodeRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| inputs | | | No | + +#### MCPAuthPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| authorization_code | | | No | +| provider_id | string | | Yes | + +#### MCPProviderCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| authentication | | | No | +| configuration | | | No | +| headers | | | No | +| icon | string | | Yes | +| icon_background | string | | No | +| icon_type | string | | Yes | +| name | string | | Yes | +| server_identifier | string | | Yes | +| server_url | string | | Yes | + +#### MCPProviderDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| provider_id | string | | Yes | + +#### MCPProviderUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| authentication | | | No | +| configuration | | | No | +| headers | | | No | +| icon | string | | Yes | +| icon_background | string | | No | +| icon_type | string | | Yes | +| name | string | | Yes | +| provider_id | string | | Yes | +| server_identifier | string | | Yes | +| server_url | string | | Yes | + +#### MCPServerCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | Server description | No | +| parameters | object | Server parameters configuration | Yes | + +#### MCPServerUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | Server description | No | +| id | string | Server ID | Yes | +| parameters | object | Server parameters configuration | Yes | +| status | | Server status | No | + +#### Marketplace + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| marketplace_plugin_unique_identifier | string | | Yes | +| version | | | No | + +#### MemberInvitePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| emails | [ string ] | | No | +| language | | | No | +| role | [TenantAccountRole](#tenantaccountrole) | | Yes | + +#### MemberRoleUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| role | string | | Yes | + +#### MessageDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| agent_thoughts | [ [AgentThought](#agentthought) ] | | Yes | +| annotation | | | No | +| annotation_hit_history | | | No | +| answer_tokens | integer | | Yes | +| conversation_id | string | | Yes | +| created_at | | | No | +| error | | | No | +| feedbacks | [ [Feedback](#feedback) ] | | Yes | +| from_account_id | | | No | +| from_end_user_id | | | No | +| from_source | string | | Yes | +| id | string | | Yes | +| inputs | object | | Yes | +| message | [JSONValue](#jsonvalue) | | Yes | +| message_files | [ [MessageFile](#messagefile) ] | | Yes | +| message_metadata_dict | [JSONValue](#jsonvalue) | | Yes | +| message_tokens | integer | | Yes | +| parent_message_id | | | No | +| provider_response_latency | number | | Yes | +| query | string | | Yes | +| re_sign_file_url_answer | string | | Yes | +| status | string | | Yes | +| workflow_run_id | | | No | + +#### MessageDetailResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| agent_thoughts | [ [AgentThought](#agentthought) ] | | No | +| annotation | | | No | +| annotation_hit_history | | | No | +| answer_tokens | | | No | +| conversation_id | string | | Yes | +| created_at | | | No | +| error | | | No | +| extra_contents | [ [HumanInputContent](#humaninputcontent) ] | | No | +| feedbacks | [ [Feedback](#feedback) ] | | No | +| from_account_id | | | No | +| from_end_user_id | | | No | +| from_source | string | | Yes | +| id | string | | Yes | +| inputs | object | | Yes | +| message | | | No | +| message_files | [ [MessageFile](#messagefile) ] | | No | +| message_metadata_dict | | | No | +| message_tokens | | | No | +| parent_message_id | | | No | +| provider_response_latency | | | No | +| query | string | | Yes | +| re_sign_file_url_answer | string | | Yes | +| status | string | | Yes | +| workflow_run_id | | | No | + +#### MessageFeedbackPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| message_id | string | Message ID | Yes | +| rating | | | No | + +#### MessageFile + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| belongs_to | | | No | +| filename | string | | Yes | +| id | string | | Yes | +| mime_type | | | No | +| size | | | No | +| transfer_method | string | | Yes | +| type | string | | Yes | +| upload_file_id | | | No | +| url | | | No | + +#### MessageInfiniteScrollPaginationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [MessageDetailResponse](#messagedetailresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | + +#### MessageListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | Conversation UUID | Yes | +| first_id | | First message ID for pagination | No | +| limit | integer | Number of messages to return (1-100) | No | + +#### MetadataArgs + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | +| type | string | *Enum:* `"number"`, `"string"`, `"time"` | Yes | + +#### MetadataDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | +| name | string | | Yes | +| value | | | No | + +#### MetadataFilteringCondition + +Metadata Filtering Condition. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conditions | | | No | +| logical_operator | | | No | + +#### MetadataOperationData + +Metadata operation data + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| operation_data | [ [DocumentMetadataOperation](#documentmetadataoperation) ] | | Yes | + +#### MetadataUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | + +#### ModelConfig + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| completion_params | object | | No | +| mode | [LLMMode](#llmmode) | | Yes | +| name | string | | Yes | +| provider | string | | Yes | + +#### ModelConfigPartial + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by | | | No | +| model_dict | | | No | +| pre_prompt | | | No | +| updated_at | | | No | +| updated_by | | | No | + +#### ModelConfigRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| agent_mode | | Agent mode configuration | No | +| configs | | Model configuration parameters | No | +| dataset_configs | | Dataset configurations | No | +| model | | Model name | No | +| more_like_this | | More like this configuration | No | +| opening_statement | | Opening statement | No | +| provider | | Model provider | No | +| retrieval_model | | Retrieval model configuration | No | +| speech_to_text | | Speech to text configuration | No | +| suggested_questions | | Suggested questions | No | +| text_to_speech | | Text to speech configuration | No | +| tools | | Available tools | No | + +#### ModelType + +Enum class for model type. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ModelType | string | Enum class for model type. | | + +#### MoreLikeThisQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| response_mode | string | *Enum:* `"blocking"`, `"streaming"` | Yes | + +#### NodeIdQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| node_id | string | | Yes | + +#### NodeRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| inputs | | | No | + +#### NodeRunRequiredPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| inputs | object | | Yes | + +#### NotionEstimatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| doc_form | string | | No | +| doc_language | string | | No | +| notion_info_list | [ object ] | | Yes | +| process_rule | object | | Yes | + +#### NotionIcon + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| emoji | | | No | +| type | string | | Yes | +| url | | | No | + +#### NotionInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| pages | [ [NotionPage](#notionpage) ] | | Yes | +| workspace_id | string | | Yes | + +#### NotionIntegrateInfoList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| notion_info | [ [NotionIntegrateWorkspace](#notionintegrateworkspace) ] | | No | + +#### NotionIntegratePage + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| is_bound | boolean | | No | +| page_icon | [DataSourceIntegrateIcon](#datasourceintegrateicon) | | No | +| page_id | string | | No | +| page_name | string | | No | +| parent_id | string | | No | +| type | string | | No | + +#### NotionIntegrateWorkspace + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| pages | [ [NotionIntegratePage](#notionintegratepage) ] | | No | +| workspace_icon | string | | No | +| workspace_id | string | | No | +| workspace_name | string | | No | + +#### NotionPage + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| page_icon | | | No | +| page_id | string | | Yes | +| page_name | string | | Yes | +| type | string | | Yes | + +#### OAuthDataSourceBindingResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Operation result | Yes | + +#### OAuthDataSourceResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | string | Authorization URL or 'internal' for internal setup | Yes | + +#### OAuthDataSourceSyncResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Operation result | Yes | + +#### OwnerTransferCheckPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| token | string | | Yes | + +#### OwnerTransferEmailPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| language | | | No | + +#### OwnerTransferPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| token | string | | Yes | + +#### Package + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| plugin_unique_identifier | string | | Yes | +| version | | | No | + +#### PaginatedConversationVariableResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [ConversationVariableResponse](#conversationvariableresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### Parser + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | | | No | +| datasource_type | string | | Yes | +| inputs | object | | Yes | + +#### ParserAsset + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| file_name | string | | Yes | +| plugin_unique_identifier | string | | Yes | + +#### ParserCreateCredential + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | +| name | | | No | + +#### ParserCredentialCreate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| name | | | No | + +#### ParserCredentialDelete + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | + +#### ParserCredentialId + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | | | No | + +#### ParserCredentialSwitch + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | + +#### ParserCredentialUpdate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| credentials | object | | Yes | +| name | | | No | + +#### ParserCredentialValidate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | + +#### ParserDeleteCredential + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### ParserDeleteModels + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### ParserDynamicOptions + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| action | string | | Yes | +| credential_id | | | No | +| parameter | string | | Yes | +| plugin_id | string | | Yes | +| provider | string | | Yes | +| provider_type | string | *Enum:* `"tool"`, `"trigger"` | Yes | + +#### ParserDynamicOptionsWithCredentials + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| action | string | | Yes | +| credential_id | string | | Yes | +| credentials | object | | Yes | +| parameter | string | | Yes | +| plugin_id | string | | Yes | +| provider | string | | Yes | + +#### ParserEnable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enable_trigger | boolean | | Yes | +| trigger_id | string | | Yes | + +#### ParserExcludePlugin + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| plugin_id | string | | Yes | + +#### ParserGetCredentials + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| config_from | | | No | +| credential_id | | | No | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### ParserGetDefault + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model_type | [ModelType](#modeltype) | | Yes | + +#### ParserGithubInstall + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| package | string | | Yes | +| plugin_unique_identifier | string | | Yes | +| repo | string | | Yes | +| version | string | | Yes | + +#### ParserGithubUpgrade + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_plugin_unique_identifier | string | | Yes | +| original_plugin_unique_identifier | string | | Yes | +| package | string | | Yes | +| repo | string | | Yes | +| version | string | | Yes | + +#### ParserGithubUpload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| package | string | | Yes | +| repo | string | | Yes | +| version | string | | Yes | + +#### ParserIcon + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| filename | string | | Yes | +| tenant_id | string | | Yes | + +#### ParserLatest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| plugin_ids | [ string ] | | Yes | + +#### ParserList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| page | integer | Page number | No | +| page_size | integer | Page size (1-256) | No | + +#### ParserMarketplaceUpgrade + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_plugin_unique_identifier | string | | Yes | +| original_plugin_unique_identifier | string | | Yes | + +#### ParserModelList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model_type | | | No | + +#### ParserParameter + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model | string | | Yes | + +#### ParserPermissionChange + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| debug_permission | [DebugPermission](#debugpermission) | | Yes | +| install_permission | [InstallPermission](#installpermission) | | Yes | + +#### ParserPluginIdentifierQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| plugin_unique_identifier | string | | Yes | + +#### ParserPluginIdentifiers + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| plugin_unique_identifiers | [ string ] | | Yes | + +#### ParserPostDefault + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model_settings | [ [Inner](#inner) ] | | Yes | + +#### ParserPostModels + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| config_from | | | No | +| credential_id | | | No | +| load_balancing | | | No | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### ParserPreferencesChange + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| auto_upgrade | [PluginAutoUpgradeSettingsPayload](#pluginautoupgradesettingspayload) | | Yes | +| permission | [PluginPermissionSettingsPayload](#pluginpermissionsettingspayload) | | Yes | + +#### ParserPreferredProviderType + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| preferred_provider_type | string | *Enum:* `"custom"`, `"system"` | Yes | + +#### ParserReadme + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| language | string | | No | +| plugin_unique_identifier | string | | Yes | + +#### ParserSwitch + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### ParserTasks + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| page | integer | Page number | No | +| page_size | integer | Page size (1-256) | No | + +#### ParserUninstall + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| plugin_installation_id | string | | Yes | + +#### ParserUpdateCredential + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| credentials | object | | Yes | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | +| name | | | No | + +#### ParserValidate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### PartnerTenantsPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| click_id | string | Click Id from partner referral link | Yes | + +#### PausedNodeResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| node_id | string | | Yes | +| node_title | string | | Yes | +| pause_type | [HumanInputPauseTypeResponse](#humaninputpausetyperesponse) | | Yes | + +#### Payload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| icon_info | | | No | +| name | string | | Yes | + +#### PipelineVariable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| allow_file_extension | [ string ] | | No | +| allow_file_upload_methods | [ string ] | | No | +| allowed_file_types | [ string ] | | No | +| belong_to_node_id | string | | No | +| default_value | object | | No | +| label | string | | No | +| max_length | integer | | No | +| options | [ string ] | | No | +| placeholder | string | | No | +| required | boolean | | No | +| tooltips | string | | No | +| type | string | | No | +| unit | string | | No | +| variable | string | | No | + +#### PlaceholderType + +Default value types for form inputs. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| PlaceholderType | string | Default value types for form inputs. | | + +#### PluginAutoUpgradeSettingsPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| exclude_plugins | [ string ] | | No | +| include_plugins | [ string ] | | No | +| strategy_setting | [StrategySetting](#strategysetting) | | No | +| upgrade_mode | [UpgradeMode](#upgrademode) | | No | +| upgrade_time_of_day | integer | | No | + +#### PluginDependency + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| current_identifier | | | No | +| type | [Type](#type) | | Yes | +| value | | | Yes | + +#### PluginEndpointListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| endpoints | [ object ] | Endpoint information | Yes | + +#### PluginPermissionSettingsPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| debug_permission | [DebugPermission](#debugpermission) | | No | +| install_permission | [InstallPermission](#installpermission) | | No | + +#### PreProcessingRule + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enabled | boolean | | Yes | +| id | string | | Yes | + +#### ProcessRule + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| mode | string | *Enum:* `"automatic"`, `"custom"`, `"hierarchical"` | Yes | +| rules | | | No | + +#### PublishWorkflowPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| marked_comment | | | No | +| marked_name | | | No | + +#### PublishedWorkflowRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| datasource_info_list | [ object ] | | Yes | +| datasource_type | string | | Yes | +| inputs | object | | Yes | +| is_preview | boolean | | No | +| original_document_id | | | No | +| response_mode | string | *Enum:* `"blocking"`, `"streaming"` | No | +| start_node_id | string | | Yes | + +#### RagPipelineDatasetImportPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| yaml_content | string | | Yes | + +#### RagPipelineImport + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| current_dsl_version | string | | No | +| dataset_id | string | | No | +| error | string | | No | +| id | string | | No | +| imported_dsl_version | string | | No | +| pipeline_id | string | | No | +| status | string | | No | + +#### RagPipelineImportCheckDependencies + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| leaked_dependencies | [ [RagPipelineLeakedDependency](#ragpipelineleakeddependency) ] | | No | + +#### RagPipelineImportPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | | No | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| mode | string | | Yes | +| name | | | No | +| pipeline_id | | | No | +| yaml_content | | | No | +| yaml_url | | | No | + +#### RagPipelineLeakedDependency + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| current_identifier | string | | No | +| type | string | | No | +| value | object | | No | + +#### RagPipelineRecommendedPluginQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| type | string | | No | + +#### RecommendedAppInfoResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| id | string | | Yes | +| mode | | | No | +| name | | | No | + +#### RecommendedAppListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| categories | [ string ] | | Yes | +| recommended_apps | [ [RecommendedAppResponse](#recommendedappresponse) ] | | Yes | + +#### RecommendedAppResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app | | | No | +| app_id | string | | Yes | +| can_trial | | | No | +| categories | [ string ] | | No | +| copyright | | | No | +| custom_disclaimer | | | No | +| description | | | No | +| is_listed | | | No | +| position | | | No | +| privacy_policy | | | No | + +#### RecommendedAppsQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| language | | Language code for recommended app localization | No | + +#### RelatedAppList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [AppDetailKernel](#appdetailkernel) ] | | No | +| total | integer | | No | + +#### RerankingModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| reranking_model_name | | | No | +| reranking_provider_name | | | No | + +#### ResultResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | | Yes | + +#### RetrievalMethod + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| RetrievalMethod | string | | | + +#### RetrievalModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| metadata_filtering_conditions | | | No | +| reranking_enable | boolean | | Yes | +| reranking_mode | | | No | +| reranking_model | | | No | +| score_threshold | | | No | +| score_threshold_enabled | boolean | | Yes | +| search_method | [RetrievalMethod](#retrievalmethod) | | Yes | +| top_k | integer | | Yes | +| weights | | | No | + +#### Rule + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| parent_mode | | | No | +| pre_processing_rules | | | No | +| segmentation | | | No | +| subchunk_segmentation | | | No | + +#### RuleCodeGeneratePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code_language | string | Programming language for code generation | No | +| instruction | string | Rule generation instruction | Yes | +| model_config | [ModelConfig](#modelconfig) | Model configuration | Yes | +| no_variable | boolean | Whether to exclude variables | No | + +#### RuleGeneratePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| instruction | string | Rule generation instruction | Yes | +| model_config | [ModelConfig](#modelconfig) | Model configuration | Yes | +| no_variable | boolean | Whether to exclude variables | No | + +#### RuleStructuredOutputPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| instruction | string | Structured output generation instruction | Yes | +| model_config | [ModelConfig](#modelconfig) | Model configuration | Yes | + +#### SavedMessageCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | string | | Yes | + +#### SavedMessageListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | | No | +| limit | integer | | No | + +#### SegmentCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| answer | | | No | +| attachment_ids | | | No | +| content | string | | Yes | +| keywords | | | No | + +#### SegmentListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enabled | string | | No | +| hit_count_gte | | | No | +| keyword | | | No | +| limit | integer | | No | +| page | integer | | No | +| status | [ string ] | | No | + +#### SegmentUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| answer | | | No | +| attachment_ids | | | No | +| content | string | | Yes | +| keywords | | | No | +| regenerate_child_chunks | boolean | | No | +| summary | | | No | + +#### Segmentation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| chunk_overlap | integer | | No | +| max_tokens | integer | | Yes | +| separator | string | | No | + +#### SimpleAccount + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| id | string | | Yes | +| name | string | | Yes | + +#### SimpleEndUser + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | +| is_anonymous | boolean | | Yes | +| session_id | | | No | +| type | string | | Yes | + +#### SimpleMessageDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| answer | string | | Yes | +| inputs | object | | Yes | +| message | string | | Yes | +| query | string | | Yes | + +#### SimpleModelConfig + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model_dict | | | No | +| pre_prompt | | | No | + +#### Site + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_base_url | | | No | +| chat_color_theme | | | No | +| chat_color_theme_inverted | | | No | +| code | | | No | +| copyright | | | No | +| created_at | | | No | +| created_by | | | No | +| custom_disclaimer | | | No | +| customize_domain | | | No | +| customize_token_strategy | | | No | +| default_language | | | No | +| description | | | No | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| privacy_policy | | | No | +| prompt_public | | | No | +| show_workflow_steps | | | No | +| title | | | No | +| updated_at | | | No | +| updated_by | | | No | +| use_icon_as_answer_icon | | | No | + +#### StatisticTimeRangeQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| end | | End date (YYYY-MM-DD HH:MM) | No | +| start | | Start date (YYYY-MM-DD HH:MM) | No | + +#### StatusCount + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| failed | integer | | Yes | +| partial_success | integer | | Yes | +| paused | integer | | Yes | +| success | integer | | Yes | + +#### StrategySetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| StrategySetting | string | | | + +#### SubscriptionQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| interval | string | Billing interval
*Enum:* `"month"`, `"year"` | Yes | +| plan | string | Subscription plan
*Enum:* `"professional"`, `"team"` | Yes | + +#### SuggestedQuestionsResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ string ] | Suggested question | Yes | + +#### SwitchWorkspacePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tenant_id | string | | Yes | + +#### SyncDraftWorkflowPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_variables | [ object ] | | No | +| environment_variables | [ object ] | | No | +| features | object | | Yes | +| graph | object | | Yes | +| hash | | | No | + +#### SyncDraftWorkflowResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| hash | string | | No | +| result | string | | No | +| updated_at | string | | No | + +#### SystemFeatureResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| features | object | System feature configuration object | No | + +#### Tag + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | +| name | string | | Yes | +| type | string | | Yes | + +#### TagBasePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | Tag name | Yes | +| type | [TagType](#tagtype) | | Yes | + +#### TagBindingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tag_ids | [ string ] | Tag IDs to bind | Yes | +| target_id | string | Target ID to bind tags to | Yes | +| type | [TagType](#tagtype) | | Yes | + +#### TagBindingRemovePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tag_ids | [ string ] | Tag IDs to remove | Yes | +| target_id | string | Target ID to unbind tag from | Yes | +| type | [TagType](#tagtype) | | Yes | + +#### TagListQueryParam + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword | | Search keyword | No | +| type | string | Tag type filter
*Enum:* `""`, `"app"`, `"knowledge"` | No | + +#### TagResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| binding_count | | | No | +| id | string | | Yes | +| name | string | | Yes | +| type | | | No | + +#### TagType + +Tag type + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| TagType | string | Tag type | | + +#### TenantAccountRole + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| TenantAccountRole | string | | | + +#### TenantInfoResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| custom_config | | | No | +| id | string | | Yes | +| in_trial | | | No | +| name | | | No | +| next_credit_reset_date | | | No | +| plan | | | No | +| role | | | No | +| status | | | No | +| trial_credits | | | No | +| trial_credits_used | | | No | +| trial_end_reason | | | No | + +#### TextToAudioPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | | Message ID | No | +| streaming | | Enable streaming response | No | +| text | | Text to convert to audio | No | +| voice | | Voice to use for TTS | No | + +#### TextToSpeechPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | | Message ID | No | +| streaming | | Whether to stream audio | No | +| text | string | Text to convert | Yes | +| voice | | Voice name | No | + +#### TextToSpeechRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | | | No | +| streaming | | | No | +| text | | | No | +| voice | | | No | + +#### TextToSpeechVoiceQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| language | string | Language code | Yes | + +#### ToolOAuthCustomClientPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| client_params | | | No | +| enable_oauth_custom_client | | | No | + +#### ToolParameterForm + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ToolParameterForm | string | | | + +#### TraceConfigPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tracing_config | object | Tracing configuration data | Yes | +| tracing_provider | string | Tracing provider name | Yes | + +#### TraceProviderQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tracing_provider | string | Tracing provider name | Yes | + +#### TrialAppDetailWithSite + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| access_mode | string | | No | +| api_base_url | string | | No | +| created_at | object | | No | +| created_by | string | | No | +| deleted_tools | [ [TrialDeletedTool](#trialdeletedtool) ] | | No | +| description | string | | No | +| enable_api | boolean | | No | +| enable_site | boolean | | No | +| icon | string | | No | +| icon_background | string | | No | +| icon_type | string | | No | +| icon_url | object | | No | +| id | string | | No | +| max_active_requests | integer | | No | +| mode | string | | No | +| model_config | [TrialAppModelConfig](#trialappmodelconfig) | | No | +| name | string | | No | +| site | [TrialSite](#trialsite) | | No | +| tags | [ [TrialTag](#trialtag) ] | | No | +| updated_at | object | | No | +| updated_by | string | | No | +| use_icon_as_answer_icon | boolean | | No | +| workflow | [TrialWorkflowPartial](#trialworkflowpartial) | | No | + +#### TrialAppModelConfig + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| agent_mode | object | | No | +| annotation_reply | object | | No | +| chat_prompt_config | object | | No | +| completion_prompt_config | object | | No | +| created_at | object | | No | +| created_by | string | | No | +| dataset_configs | object | | No | +| dataset_query_variable | string | | No | +| external_data_tools | object | | No | +| file_upload | object | | No | +| model | object | | No | +| more_like_this | object | | No | +| opening_statement | string | | No | +| pre_prompt | string | | No | +| prompt_type | string | | No | +| retriever_resource | object | | No | +| sensitive_word_avoidance | object | | No | +| speech_to_text | object | | No | +| suggested_questions | object | | No | +| suggested_questions_after_answer | object | | No | +| text_to_speech | object | | No | +| updated_at | object | | No | +| updated_by | string | | No | +| user_input_form | object | | No | + +#### TrialConversationVariable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| id | string | | No | +| name | string | | No | +| value | object | | No | +| value_type | string | | No | + +#### TrialDeletedTool + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| provider_id | string | | No | +| tool_name | string | | No | +| type | string | | No | + +#### TrialPipelineVariable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| allow_file_extension | [ string ] | | No | +| allow_file_upload_methods | [ string ] | | No | +| allowed_file_types | [ string ] | | No | +| belong_to_node_id | string | | No | +| default_value | object | | No | +| label | string | | No | +| max_length | integer | | No | +| options | [ string ] | | No | +| placeholder | string | | No | +| required | boolean | | No | +| tooltips | string | | No | +| type | string | | No | +| unit | string | | No | +| variable | string | | No | + +#### TrialSimpleAccount + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | No | +| id | string | | No | +| name | string | | No | + +#### TrialSite + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| access_token | string | | No | +| app_base_url | string | | No | +| chat_color_theme | string | | No | +| chat_color_theme_inverted | boolean | | No | +| code | string | | No | +| copyright | string | | No | +| created_at | object | | No | +| created_by | string | | No | +| custom_disclaimer | string | | No | +| customize_domain | string | | No | +| customize_token_strategy | string | | No | +| default_language | string | | No | +| description | string | | No | +| icon | string | | No | +| icon_background | string | | No | +| icon_type | string | | No | +| icon_url | object | | No | +| privacy_policy | string | | No | +| prompt_public | boolean | | No | +| show_workflow_steps | boolean | | No | +| title | string | | No | +| updated_at | object | | No | +| updated_by | string | | No | +| use_icon_as_answer_icon | boolean | | No | + +#### TrialTag + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | No | +| name | string | | No | +| type | string | | No | + +#### TrialWorkflow + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_variables | [ [TrialConversationVariable](#trialconversationvariable) ] | | No | +| created_at | object | | No | +| created_by | [TrialSimpleAccount](#trialsimpleaccount) | | No | +| environment_variables | [ object ] | | No | +| features | object | | No | +| graph | object | | No | +| hash | string | | No | +| id | string | | No | +| marked_comment | string | | No | +| marked_name | string | | No | +| rag_pipeline_variables | [ [TrialPipelineVariable](#trialpipelinevariable) ] | | No | +| tool_published | boolean | | No | +| updated_at | object | | No | +| updated_by | [TrialSimpleAccount](#trialsimpleaccount) | | No | +| version | string | | No | + +#### TrialWorkflowPartial + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| created_by | string | | No | +| id | string | | No | +| updated_at | object | | No | +| updated_by | string | | No | + +#### TriggerOAuthClientPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| client_params | | | No | +| enabled | | | No | + +#### TriggerSubscriptionBuilderCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_type | string | | No | + +#### TriggerSubscriptionBuilderUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | | | No | +| name | | | No | +| parameters | | | No | +| properties | | | No | + +#### TriggerSubscriptionBuilderVerifyPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | + +#### Type + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| Type | string | | | + +#### UpdateAnnotationPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| annotation_reply | | | No | +| answer | | | No | +| content | | | No | +| question | | | No | + +#### UpdateAppPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | App description (max 400 chars) | No | +| icon | | Icon | No | +| icon_background | | Icon background color | No | +| icon_type | | Icon type | No | +| max_active_requests | | Maximum active requests | No | +| name | string | App name | Yes | +| use_icon_as_answer_icon | | Use icon as answer icon | No | + +#### UpgradeMode + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| UpgradeMode | string | | | + +#### UploadConfig + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| attachment_image_file_size_limit | | | No | +| audio_file_size_limit | integer | | Yes | +| batch_count_limit | integer | | Yes | +| file_size_limit | integer | | Yes | +| file_upload_limit | | | No | +| image_file_batch_limit | integer | | Yes | +| image_file_size_limit | integer | | Yes | +| single_chunk_attachment_limit | integer | | Yes | +| video_file_size_limit | integer | | Yes | +| workflow_file_upload_limit | integer | | Yes | + +#### UpsertNotificationPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| contents | [ [LangContentPayload](#langcontentpayload) ] | | Yes | +| end_time | | RFC3339, e.g. 2026-03-20T23:59:59Z | No | +| frequency | string | 'once' \| 'every_page_load' | No | +| notification_id | | Omit to create; supply UUID to update | No | +| start_time | | RFC3339, e.g. 2026-03-01T00:00:00Z | No | +| status | string | 'active' \| 'inactive' | No | + +#### UserAction + +User action configuration. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| button_style | [ButtonStyle](#buttonstyle) | | No | +| id | string | | Yes | +| title | string | | Yes | + +#### WebhookTriggerResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| id | string | | Yes | +| node_id | string | | Yes | +| webhook_debug_url | string | | Yes | +| webhook_id | string | | Yes | +| webhook_url | string | | Yes | + +#### WebsiteCrawlPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| options | object | | Yes | +| provider | string | *Enum:* `"firecrawl"`, `"jinareader"`, `"watercrawl"` | Yes | +| url | string | | Yes | + +#### WebsiteCrawlStatusQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| provider | string | *Enum:* `"firecrawl"`, `"jinareader"`, `"watercrawl"` | Yes | + +#### WebsiteInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| job_id | string | | Yes | +| only_main_content | boolean | | No | +| provider | string | | Yes | +| urls | [ string ] | | Yes | + +#### WeightKeywordSetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword_weight | number | | Yes | + +#### WeightModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword_setting | | | No | +| vector_setting | | | No | +| weight_type | | | No | + +#### WeightVectorSetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| embedding_model_name | string | | Yes | +| embedding_provider_name | string | | Yes | +| vector_weight | number | | Yes | + +#### Workflow + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_variables | [ [ConversationVariable](#conversationvariable) ] | | No | +| created_at | object | | No | +| created_by | [SimpleAccount](#simpleaccount) | | No | +| environment_variables | [ object ] | | No | +| features | object | | No | +| graph | object | | No | +| hash | string | | No | +| id | string | | No | +| marked_comment | string | | No | +| marked_name | string | | No | +| rag_pipeline_variables | [ [PipelineVariable](#pipelinevariable) ] | | No | +| tool_published | boolean | | No | +| updated_at | object | | No | +| updated_by | [SimpleAccount](#simpleaccount) | | No | +| version | string | | No | + +#### WorkflowAppLogPaginationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [WorkflowAppLogPartialResponse](#workflowapplogpartialresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### WorkflowAppLogPartialResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by_account | | | No | +| created_by_end_user | | | No | +| created_by_role | | | No | +| created_from | | | No | +| details | | | No | +| id | string | | Yes | +| workflow_run | | | No | + +#### WorkflowAppLogQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at__after | | Filter logs created after this timestamp | No | +| created_at__before | | Filter logs created before this timestamp | No | +| created_by_account | | Filter by account | No | +| created_by_end_user_session_id | | Filter by end user session ID | No | +| detail | boolean | Whether to return detailed logs | No | +| keyword | | Search keyword for filtering logs | No | +| limit | integer | Number of items per page (1-100) | No | +| page | integer | Page number (1-99999) | No | +| status | | Execution status filter (succeeded, failed, stopped, partial-succeeded) | No | + +#### WorkflowArchivedLogPaginationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [WorkflowArchivedLogPartialResponse](#workflowarchivedlogpartialresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### WorkflowArchivedLogPartialResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by_account | | | No | +| created_by_end_user | | | No | +| id | string | | Yes | +| trigger_metadata | | | No | +| workflow_run | | | No | + +#### WorkflowCommentBasic + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | No | +| created_at | object | | No | +| created_by | string | | No | +| created_by_account | [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) | | No | +| id | string | | No | +| mention_count | integer | | No | +| participants | [ [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) ] | | No | +| position_x | number | | No | +| position_y | number | | No | +| reply_count | integer | | No | +| resolved | boolean | | No | +| resolved_at | object | | No | +| resolved_by | string | | No | +| resolved_by_account | [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) | | No | +| updated_at | object | | No | + +#### WorkflowCommentCreate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| id | string | | No | + +#### WorkflowCommentCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | Comment content | Yes | +| mentioned_user_ids | [ string ] | Mentioned user IDs | No | +| position_x | number | Comment X position | Yes | +| position_y | number | Comment Y position | Yes | + +#### WorkflowCommentDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | No | +| created_at | object | | No | +| created_by | string | | No | +| created_by_account | [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) | | No | +| id | string | | No | +| mentions | [ [_AnonymousInlineModel_f7ff64cce858](#_anonymousinlinemodel_f7ff64cce858) ] | | No | +| position_x | number | | No | +| position_y | number | | No | +| replies | [ [_AnonymousInlineModel_55c39c6a4b9e](#_anonymousinlinemodel_55c39c6a4b9e) ] | | No | +| resolved | boolean | | No | +| resolved_at | object | | No | +| resolved_by | string | | No | +| resolved_by_account | [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) | | No | +| updated_at | object | | No | + +#### WorkflowCommentMentionUsersPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| users | [ [AccountWithRole](#accountwithrole) ] | | Yes | + +#### WorkflowCommentReplyCreate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| id | string | | No | + +#### WorkflowCommentReplyPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | Reply content | Yes | +| mentioned_user_ids | [ string ] | Mentioned user IDs | No | + +#### WorkflowCommentReplyUpdate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | No | +| updated_at | object | | No | + +#### WorkflowCommentResolve + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | No | +| resolved | boolean | | No | +| resolved_at | object | | No | +| resolved_by | string | | No | + +#### WorkflowCommentUpdate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | No | +| updated_at | object | | No | + +#### WorkflowCommentUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | Comment content | Yes | +| mentioned_user_ids | | Mentioned user IDs. Omit to keep existing mentions. | No | +| position_x | | Comment X position | No | +| position_y | | Comment Y position | No | + +#### WorkflowDraftEnvVariable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| edited | boolean | | No | +| id | string | | No | +| name | string | | No | +| selector | [ string ] | | No | +| type | string | | No | +| value_type | string | | No | +| visible | boolean | | No | + +#### WorkflowDraftEnvVariableList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| items | [ [WorkflowDraftEnvVariable](#workflowdraftenvvariable) ] | | No | + +#### WorkflowDraftVariable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| edited | boolean | | No | +| full_content | object | | No | +| id | string | | No | +| is_truncated | boolean | | No | +| name | string | | No | +| selector | [ string ] | | No | +| type | string | | No | +| value | object | | No | +| value_type | string | | No | +| visible | boolean | | No | + +#### WorkflowDraftVariableList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| items | [ [WorkflowDraftVariable](#workflowdraftvariable) ] | | No | + +#### WorkflowDraftVariableListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| limit | integer | Items per page | No | +| page | integer | Page number | No | + +#### WorkflowDraftVariableListWithoutValue + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| items | [ [WorkflowDraftVariableWithoutValue](#workflowdraftvariablewithoutvalue) ] | | No | +| total | object | | No | + +#### WorkflowDraftVariablePatchPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | | | No | +| value | | | No | + +#### WorkflowDraftVariableUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | | Variable name | No | +| value | | Variable value | No | + +#### WorkflowDraftVariableWithoutValue + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| edited | boolean | | No | +| id | string | | No | +| is_truncated | boolean | | No | +| name | string | | No | +| selector | [ string ] | | No | +| type | string | | No | +| value_type | string | | No | +| visible | boolean | | No | + +#### WorkflowExecutionStatus + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| WorkflowExecutionStatus | string | | | + +#### WorkflowFeaturesPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| features | object | Workflow feature configuration | Yes | + +#### WorkflowListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| limit | integer | | No | +| named_only | boolean | | No | +| page | integer | | No | +| user_id | | | No | + +#### WorkflowOnlineUsersPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_ids | [ string ] | App IDs | No | + +#### WorkflowPagination + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| has_more | boolean | | No | +| items | [ [Workflow](#workflow) ] | | No | +| limit | integer | | No | +| page | integer | | No | + +#### WorkflowPartial + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by | | | No | +| id | string | | Yes | +| updated_at | | | No | +| updated_by | | | No | + +#### WorkflowPauseDetailsResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| paused_at | | | No | +| paused_nodes | [ [PausedNodeResponse](#pausednoderesponse) ] | | Yes | + +#### WorkflowRunCountQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| status | | Workflow run status filter | No | +| time_range | | Filter by time range (optional): e.g., 7d (7 days), 4h (4 hours), 30m (30 minutes), 30s (30 seconds). Filters by created_at field. | No | +| triggered_from | | Filter by trigger source: debugging or app-run. Default: debugging | No | + +#### WorkflowRunCountResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| failed | integer | | Yes | +| partial_succeeded | integer | | Yes | +| running | integer | | Yes | +| stopped | integer | | Yes | +| succeeded | integer | | Yes | +| total | integer | | Yes | + +#### WorkflowRunDetailResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by_account | | | No | +| created_by_end_user | | | No | +| created_by_role | | | No | +| elapsed_time | | | No | +| error | | | No | +| exceptions_count | | | No | +| finished_at | | | No | +| graph | | | Yes | +| id | string | | Yes | +| inputs | | | Yes | +| outputs | | | Yes | +| status | | | No | +| total_steps | | | No | +| total_tokens | | | No | +| version | | | No | + +#### WorkflowRunExportResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| presigned_url | | Pre-signed URL for download | No | +| presigned_url_expires_at | | Pre-signed URL expiration time | No | +| status | string | Export status: success/failed | Yes | + +#### WorkflowRunForArchivedLogResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| elapsed_time | | | No | +| id | string | | Yes | +| status | | | No | +| total_tokens | | | No | +| triggered_from | | | No | + +#### WorkflowRunForListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by_account | | | No | +| elapsed_time | | | No | +| exceptions_count | | | No | +| finished_at | | | No | +| id | string | | Yes | +| retry_index | | | No | +| status | | | No | +| total_steps | | | No | +| total_tokens | | | No | +| version | | | No | + +#### WorkflowRunForLogResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| elapsed_time | | | No | +| error | | | No | +| exceptions_count | | | No | +| finished_at | | | No | +| id | string | | Yes | +| status | | | No | +| total_steps | | | No | +| total_tokens | | | No | +| triggered_from | | | No | +| version | | | No | + +#### WorkflowRunListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | Last run ID for pagination | No | +| limit | integer | Number of items per page (1-100) | No | +| status | | Workflow run status filter | No | +| triggered_from | | Filter by trigger source: debugging or app-run. Default: debugging | No | + +#### WorkflowRunNodeExecutionListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [WorkflowRunNodeExecutionResponse](#workflowrunnodeexecutionresponse) ] | | Yes | + +#### WorkflowRunNodeExecutionResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by_account | | | No | +| created_by_end_user | | | No | +| created_by_role | | | No | +| elapsed_time | | | No | +| error | | | No | +| execution_metadata | | | No | +| extras | | | No | +| finished_at | | | No | +| id | string | | Yes | +| index | | | No | +| inputs | | | No | +| inputs_truncated | | | No | +| node_id | | | No | +| node_type | | | No | +| outputs | | | No | +| outputs_truncated | | | No | +| predecessor_node_id | | | No | +| process_data | | | No | +| process_data_truncated | | | No | +| status | | | No | +| title | | | No | + +#### WorkflowRunPaginationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [WorkflowRunForListResponse](#workflowrunforlistresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | + +#### WorkflowRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | + +#### WorkflowRunQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | | No | +| limit | integer | | No | + +#### WorkflowRunRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | + +#### WorkflowStatisticQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| end | | End date and time (YYYY-MM-DD HH:MM) | No | +| start | | Start date and time (YYYY-MM-DD HH:MM) | No | + +#### WorkflowToolCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | Yes | +| icon | object | | Yes | +| label | string | | Yes | +| labels | | | No | +| name | string | | Yes | +| parameters | [ [WorkflowToolParameterConfiguration](#workflowtoolparameterconfiguration) ] | | No | +| privacy_policy | | | No | +| workflow_app_id | string | | Yes | + +#### WorkflowToolDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| workflow_tool_id | string | | Yes | + +#### WorkflowToolParameterConfiguration + +Workflow tool configuration + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | The description of the parameter | Yes | +| form | [ToolParameterForm](#toolparameterform) | The form of the parameter | Yes | +| name | string | The name of the parameter | Yes | + +#### WorkflowToolUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | Yes | +| icon | object | | Yes | +| label | string | | Yes | +| labels | | | No | +| name | string | | Yes | +| parameters | [ [WorkflowToolParameterConfiguration](#workflowtoolparameterconfiguration) ] | | No | +| privacy_policy | | | No | +| workflow_tool_id | string | | Yes | + +#### WorkflowTriggerListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [WorkflowTriggerResponse](#workflowtriggerresponse) ] | | Yes | + +#### WorkflowTriggerResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| icon | string | | Yes | +| id | string | | Yes | +| node_id | string | | Yes | +| provider_name | string | | Yes | +| status | string | | Yes | +| title | string | | Yes | +| trigger_type | string | | Yes | +| updated_at | | | No | + +#### WorkflowUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| marked_comment | | | No | +| marked_name | | | No | + +#### WorkspaceCustomConfigPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| remove_webapp_brand | | | No | +| replace_webapp_logo | | | No | + +#### WorkspaceInfoPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | + +#### WorkspaceListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| limit | integer | | No | +| page | integer | | No | + +#### _AnonymousInlineModel_55c39c6a4b9e + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | No | +| created_at | object | | No | +| created_by | string | | No | +| created_by_account | [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) | | No | +| id | string | | No | + +#### _AnonymousInlineModel_6fec07cd0d85 + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| avatar_url | object | | No | +| email | string | | No | +| id | string | | No | +| name | string | | No | + +#### _AnonymousInlineModel_b1954337d565 + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enable | boolean | | No | +| model_name | string | | No | +| model_provider_name | string | | No | +| summary_prompt | string | | No | + +#### _AnonymousInlineModel_f7ff64cce858 + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| mentioned_user_account | [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) | | No | +| mentioned_user_id | string | | No | +| reply_id | string | | No | + +## FastOpenAPI Preview (OpenAPI 3.0) + +### Dify API (FastOpenAPI PoC) +FastOpenAPI proof of concept for Dify API + +#### Version: 1.0 + +--- + +##### [GET] /console/api/init +**Get initialization validation status.** + +###### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | OK | **application/json**: [InitStatusResponse](#initstatusresponse)
| + +##### [POST] /console/api/init +**Validate initialization password.** + +###### Request Body + +| Required | Schema | +| -------- | ------ | +| Yes | **application/json**: [InitValidatePayload](#initvalidatepayload)
| + +###### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Created | **application/json**: [InitValidateResponse](#initvalidateresponse)
| + +##### [GET] /console/api/ping +**Health check endpoint for connection testing.** + +###### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | OK | **application/json**: [PingResponse](#pingresponse)
| + +##### [GET] /console/api/setup +**Get system setup status. + + NOTE: This endpoint is unauthenticated by design. + + During first-time bootstrap there is no admin account yet, so frontend initialization must be + able to query setup progress before any login flow exists. + + Only bootstrap-safe status information should be returned by this endpoint. + ** + +###### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | OK | **application/json**: [SetupStatusResponse](#setupstatusresponse)
| + +##### [POST] /console/api/setup +**Initialize system setup with admin account. + + NOTE: This endpoint is unauthenticated by design for first-time bootstrap. + Access is restricted by deployment mode (`SELF_HOSTED`), one-time setup guards, + and init-password validation rather than user session authentication. + ** + +###### Request Body + +| Required | Schema | +| -------- | ------ | +| Yes | **application/json**: [SetupRequestPayload](#setuprequestpayload)
| + +###### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Created | **application/json**: [SetupResponse](#setupresponse)
| + +##### [GET] /console/api/version +**Check for application version updates.** + +###### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| current_version | query | | Yes | string | + +###### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | OK | **application/json**: [VersionResponse](#versionresponse)
| + +--- +##### Schemas + +###### ErrorSchema + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| error | { **"details"**: string, **"message"**: string, **"status"**: integer, **"type"**: string } | | Yes | + +###### InitStatusResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| status | string,
**Available values:** "finished", "not_started" | Initialization status
*Enum:* `"finished"`, `"not_started"` | Yes | + +###### InitValidatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| password | string | Initialization password | Yes | + +###### InitValidateResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Operation result | Yes | + +###### PingResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Health check result | Yes | + +###### SetupRequestPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | Admin email address | Yes | +| language | | Admin language | No | +| name | string | Admin name (max 30 characters) | Yes | +| password | string | Admin password | Yes | + +###### SetupResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Setup result | Yes | + +###### SetupStatusResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| setup_at | | Setup completion time (ISO format) | No | +| step | string,
**Available values:** "finished", "not_started" | Setup step status
*Enum:* `"finished"`, `"not_started"` | Yes | + +###### VersionFeatures + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| can_replace_logo | boolean | Whether logo replacement is supported | Yes | +| model_load_balancing_enabled | boolean | Whether model load balancing is enabled | Yes | + +###### VersionResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| can_auto_update | boolean | Whether auto-update is supported | Yes | +| features | [VersionFeatures](#versionfeatures) | Feature flags and capabilities | Yes | +| release_date | string | Release date of latest version | Yes | +| release_notes | string | Release notes for latest version | Yes | +| version | string | Latest version number | Yes | diff --git a/api/openapi/markdown/service-swagger.md b/api/openapi/markdown/service-swagger.md new file mode 100644 index 0000000000..ec5ed280f5 --- /dev/null +++ b/api/openapi/markdown/service-swagger.md @@ -0,0 +1,2754 @@ +# Service API +API for application services + +## Version: 1.0 + +### Security +**Bearer** + +| apiKey | *API Key* | +| ------ | --------- | +| Description | Type: Bearer {your-api-key} | +| In | header | +| Name | Authorization | + +--- +## service_api +Service operations + +### / + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /app/feedbacks + +#### GET +##### Summary + +Get all feedbacks for the application + +##### Description + +Get all feedbacks for the application +Returns paginated list of all feedback submitted for messages in this app. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [FeedbackListQuery](#feedbacklistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Feedbacks retrieved successfully | +| 401 | Unauthorized - invalid API token | + +### /apps/annotation-reply/{action} + +#### POST +##### Summary + +Enable or disable annotation reply feature + +##### Description + +Enable or disable annotation reply feature + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AnnotationReplyActionPayload](#annotationreplyactionpayload) | +| action | path | Action to perform: 'enable' or 'disable' | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Action completed successfully | +| 401 | Unauthorized - invalid API token | + +### /apps/annotation-reply/{action}/status/{job_id} + +#### GET +##### Summary + +Get the status of an annotation reply action job + +##### Description + +Get the status of an annotation reply action job + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | Action type | Yes | string | +| job_id | path | Job ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Job status retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Job not found | + +### /apps/annotations + +#### GET +##### Summary + +List annotations for the application + +##### Description + +List annotations for the application + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Annotations retrieved successfully | [AnnotationList](#annotationlist) | +| 401 | Unauthorized - invalid API token | | + +#### POST +##### Summary + +Create a new annotation + +##### Description + +Create a new annotation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AnnotationCreatePayload](#annotationcreatepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Annotation created successfully | [Annotation](#annotation) | +| 401 | Unauthorized - invalid API token | | + +### /apps/annotations/{annotation_id} + +#### DELETE +##### Summary + +Delete an annotation + +##### Description + +Delete an annotation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| annotation_id | path | Annotation ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Annotation deleted successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Annotation not found | + +#### PUT +##### Summary + +Update an existing annotation + +##### Description + +Update an existing annotation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AnnotationCreatePayload](#annotationcreatepayload) | +| annotation_id | path | Annotation ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Annotation updated successfully | [Annotation](#annotation) | +| 401 | Unauthorized - invalid API token | | +| 403 | Forbidden - insufficient permissions | | +| 404 | Annotation not found | | + +### /audio-to-text + +#### POST +##### Summary + +Convert audio to text using speech-to-text + +##### Description + +Convert audio to text using speech-to-text +Accepts an audio file upload and returns the transcribed text. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Audio successfully transcribed | +| 400 | Bad request - no audio or invalid audio | +| 401 | Unauthorized - invalid API token | +| 413 | Audio file too large | +| 415 | Unsupported audio type | +| 500 | Internal server error | + +### /chat-messages + +#### POST +##### Summary + +Send a message in a chat conversation + +##### Description + +Send a message in a chat conversation +This endpoint handles chat messages for chat, agent chat, and advanced chat applications. +Supports conversation management and both blocking and streaming response modes. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChatRequestPayload](#chatrequestpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Message sent successfully | +| 400 | Bad request - invalid parameters or workflow issues | +| 401 | Unauthorized - invalid API token | +| 404 | Conversation or workflow not found | +| 429 | Rate limit exceeded | +| 500 | Internal server error | + +### /chat-messages/{task_id}/stop + +#### POST +##### Summary + +Stop a running chat message generation + +##### Description + +Stop a running chat message generation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | The ID of the task to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Task stopped successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Task not found | + +### /completion-messages + +#### POST +##### Summary + +Create a completion for the given prompt + +##### Description + +Create a completion for the given prompt +This endpoint generates a completion based on the provided inputs and query. +Supports both blocking and streaming response modes. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CompletionRequestPayload](#completionrequestpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Completion created successfully | +| 400 | Bad request - invalid parameters | +| 401 | Unauthorized - invalid API token | +| 404 | Conversation not found | +| 500 | Internal server error | + +### /completion-messages/{task_id}/stop + +#### POST +##### Summary + +Stop a running completion task + +##### Description + +Stop a running completion task + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | The ID of the task to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Task stopped successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Task not found | + +### /conversations + +#### GET +##### Summary + +List all conversations for the current user + +##### Description + +List all conversations for the current user +Supports pagination using last_id and limit parameters. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConversationListQuery](#conversationlistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Conversations retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Last conversation not found | + +### /conversations/{c_id} + +#### DELETE +##### Summary + +Delete a specific conversation + +##### Description + +Delete a specific conversation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Conversation deleted successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Conversation not found | + +### /conversations/{c_id}/name + +#### POST +##### Summary + +Rename a conversation or auto-generate a name + +##### Description + +Rename a conversation or auto-generate a name + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConversationRenamePayload](#conversationrenamepayload) | +| c_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Conversation renamed successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Conversation not found | + +### /conversations/{c_id}/variables + +#### GET +##### Summary + +List all variables for a conversation + +##### Description + +List all variables for a conversation +Conversational variables are only available for chat applications. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConversationVariablesQuery](#conversationvariablesquery) | +| c_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Variables retrieved successfully | [ConversationVariableInfiniteScrollPaginationResponse](#conversationvariableinfinitescrollpaginationresponse) | +| 401 | Unauthorized - invalid API token | | +| 404 | Conversation not found | | + +### /conversations/{c_id}/variables/{variable_id} + +#### PUT +##### Summary + +Update a conversation variable's value + +##### Description + +Update a conversation variable's value +Allows updating the value of a specific conversation variable. +The value must match the variable's expected type. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConversationVariableUpdatePayload](#conversationvariableupdatepayload) | +| c_id | path | Conversation ID | Yes | string | +| variable_id | path | Variable ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Variable updated successfully | [ConversationVariableResponse](#conversationvariableresponse) | +| 400 | Bad request - type mismatch | | +| 401 | Unauthorized - invalid API token | | +| 404 | Conversation or variable not found | | + +### /datasets + +#### GET +##### Summary + +Resource for getting datasets + +##### Description + +List all datasets + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Datasets retrieved successfully | +| 401 | Unauthorized - invalid API token | + +#### POST +##### Summary + +Resource for creating datasets + +##### Description + +Create a new dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DatasetCreatePayload](#datasetcreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Dataset created successfully | +| 400 | Bad request - invalid parameters | +| 401 | Unauthorized - invalid API token | + +### /datasets/pipeline/file-upload + +#### POST +##### Summary + +Upload a file for use in conversations + +##### Description + +Upload a file to a knowledgebase pipeline +Accepts a single file upload via multipart/form-data. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 201 | File uploaded successfully | +| 400 | Bad request - no file or invalid file | +| 401 | Unauthorized - invalid API token | +| 413 | File too large | +| 415 | Unsupported file type | + +### /datasets/tags + +#### DELETE +##### Summary + +Delete a knowledge type tag + +##### Description + +Delete a knowledge type tag + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagDeletePayload](#tagdeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Tag deleted successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | + +#### GET +##### Summary + +Get all knowledge type tags + +##### Description + +Get all knowledge type tags + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Tags retrieved successfully | +| 401 | Unauthorized - invalid API token | + +#### PATCH +##### Description + +Update a knowledge type tag + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagUpdatePayload](#tagupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Tag updated successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | + +#### POST +##### Summary + +Add a knowledge type tag + +##### Description + +Add a knowledge type tag + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagCreatePayload](#tagcreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Tag created successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | + +### /datasets/tags/binding + +#### POST +##### Description + +Bind tags to a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagBindingPayload](#tagbindingpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Tags bound successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | + +### /datasets/tags/unbinding + +#### POST +##### Description + +Unbind tags from a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagUnbindingPayload](#tagunbindingpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Tags unbound successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | + +### /datasets/{dataset_id} + +#### DELETE +##### Summary + +Deletes a dataset given its ID + +##### Description + +Delete a dataset +Args: + _: ignore + dataset_id (UUID): The ID of the dataset to be deleted. + +Returns: + dict: A dictionary with a key 'result' and a value 'success' + if the dataset was successfully deleted. Omitted in HTTP response. + int: HTTP status code 204 indicating that the operation was successful. + +Raises: + NotFound: If the dataset with the given ID does not exist. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Dataset deleted successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | +| 409 | Conflict - dataset is in use | + +#### GET +##### Description + +Get a specific dataset by ID + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Dataset retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Dataset not found | + +#### PATCH +##### Description + +Update an existing dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DatasetUpdatePayload](#datasetupdatepayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Dataset updated successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/document/create-by-file + +#### POST +##### Description + +Create a new document by uploading a file + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document created successfully | +| 400 | Bad request - invalid file or parameters | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/document/create-by-text + +#### POST +##### Description + +Create a new document by providing text content + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DocumentTextCreatePayload](#documenttextcreatepayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document created successfully | +| 400 | Bad request - invalid parameters | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/document/create_by_file + +#### POST +##### Description + +Create a new document by uploading a file + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document created successfully | +| 400 | Bad request - invalid file or parameters | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/document/create_by_text + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for creating a new document by providing text content. Use /datasets/{dataset_id}/document/create-by-text instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DocumentTextCreatePayload](#documenttextcreatepayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document created successfully | +| 400 | Bad request - invalid parameters | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/documents + +#### GET +##### Description + +List all documents in a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Documents retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/documents/download-zip + +#### POST +##### Description + +Download selected uploaded documents as a single ZIP archive + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DocumentBatchDownloadZipPayload](#documentbatchdownloadzippayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | ZIP archive generated successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Document or dataset not found | + +### /datasets/{dataset_id}/documents/metadata + +#### POST +##### Summary + +Update metadata for multiple documents + +##### Description + +Update metadata for multiple documents + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MetadataOperationData](#metadataoperationdata) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Documents metadata updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/documents/status/{action} + +#### PATCH +##### Summary + +Batch update document status + +##### Description + +Batch update document status +Args: + tenant_id: tenant id + dataset_id: dataset id + action: action to perform (Literal["enable", "disable", "archive", "un_archive"]) + +Returns: + dict: A dictionary with a key 'result' and a value 'success' + int: HTTP status code 200 indicating that the operation was successful. + +Raises: + NotFound: If the dataset with the given ID does not exist. + Forbidden: If the user does not have permission. + InvalidActionError: If the action is invalid or cannot be performed. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | Action to perform: 'enable', 'disable', 'archive', or 'un_archive' | Yes | string | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document status updated successfully | +| 400 | Bad request - invalid action | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/documents/{batch}/indexing-status + +#### GET +##### Description + +Get indexing status for documents in a batch + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| batch | path | Batch ID | Yes | string | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Indexing status retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset or documents not found | + +### /datasets/{dataset_id}/documents/{document_id} + +#### DELETE +##### Summary + +Delete document + +##### Description + +Delete a document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Document deleted successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - document is archived | +| 404 | Document not found | + +#### GET +##### Description + +Get a specific document by ID + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Document not found | + +#### PATCH +##### Description + +Update an existing document by uploading a file + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/download + +#### GET +##### Description + +Get a signed download URL for a document's original uploaded file + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Download URL generated successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Document or upload file not found | + +### /datasets/{dataset_id}/documents/{document_id}/segments + +#### GET +##### Description + +List segments in a document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [SegmentListQuery](#segmentlistquery) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Segments retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset or document not found | + +#### POST +##### Description + +Create segments in a document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [SegmentCreatePayload](#segmentcreatepayload) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Segments created successfully | +| 400 | Bad request - segments data is missing | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset or document not found | + +### /datasets/{dataset_id}/documents/{document_id}/segments/{segment_id} + +#### DELETE +##### Description + +Delete a specific segment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| segment_id | path | Segment ID to delete | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Segment deleted successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, or segment not found | + +#### GET +##### Description + +Get a specific segment by ID + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Segment retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, or segment not found | + +#### POST +##### Description + +Update a specific segment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [SegmentUpdatePayload](#segmentupdatepayload) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| segment_id | path | Segment ID to update | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Segment updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, or segment not found | + +### /datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks + +#### GET +##### Description + +List child chunks for a segment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChildChunkListQuery](#childchunklistquery) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| segment_id | path | Parent segment ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Child chunks retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, or segment not found | + +#### POST +##### Description + +Create a new child chunk for a segment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChildChunkCreatePayload](#childchunkcreatepayload) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| segment_id | path | Parent segment ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Child chunk created successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, or segment not found | + +### /datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks/{child_chunk_id} + +#### DELETE +##### Description + +Delete a specific child chunk + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| child_chunk_id | path | Child chunk ID to delete | Yes | string | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| segment_id | path | Parent segment ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Child chunk deleted successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, segment, or child chunk not found | + +#### PATCH +##### Description + +Update a specific child chunk + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChildChunkUpdatePayload](#childchunkupdatepayload) | +| child_chunk_id | path | Child chunk ID to update | Yes | string | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| segment_id | path | Parent segment ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Child chunk updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, segment, or child chunk not found | + +### /datasets/{dataset_id}/documents/{document_id}/update-by-file + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for updating an existing document by uploading a file. Use PATCH /datasets/{dataset_id}/documents/{document_id} instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/update-by-text + +#### POST +##### Description + +Update an existing document by providing text content + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DocumentTextUpdate](#documenttextupdate) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/update_by_file + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for updating an existing document by uploading a file. Use PATCH /datasets/{dataset_id}/documents/{document_id} instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/update_by_text + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for updating an existing document by providing text content. Use /datasets/{dataset_id}/documents/{document_id}/update-by-text instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DocumentTextUpdate](#documenttextupdate) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Document not found | + +### /datasets/{dataset_id}/hit-testing + +#### POST +##### Summary + +Perform hit testing on a dataset + +##### Description + +Perform hit testing on a dataset +Tests retrieval performance for the specified dataset. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HitTestingPayload](#hittestingpayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Hit testing results | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/metadata + +#### GET +##### Summary + +Get all metadata for a dataset + +##### Description + +Get all metadata for a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Metadata retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +#### POST +##### Summary + +Create metadata for a dataset + +##### Description + +Create metadata for a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MetadataArgs](#metadataargs) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 201 | Metadata created successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/metadata/built-in + +#### GET +##### Summary + +Get all built-in metadata fields + +##### Description + +Get all built-in metadata fields + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Built-in fields retrieved successfully | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/metadata/built-in/{action} + +#### POST +##### Summary + +Enable or disable built-in metadata field + +##### Description + +Enable or disable built-in metadata field + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | Action to perform: 'enable' or 'disable' | Yes | string | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Action completed successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/metadata/{metadata_id} + +#### DELETE +##### Summary + +Delete metadata + +##### Description + +Delete metadata + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| metadata_id | path | Metadata ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Metadata deleted successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset or metadata not found | + +#### PATCH +##### Summary + +Update metadata name + +##### Description + +Update metadata name + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MetadataUpdatePayload](#metadataupdatepayload) | +| dataset_id | path | Dataset ID | Yes | string | +| metadata_id | path | Metadata ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Metadata updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset or metadata not found | + +### /datasets/{dataset_id}/pipeline/datasource-plugins + +#### GET +##### Summary + +Resource for getting datasource plugins + +##### Description + +List all datasource plugins for a rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| is_published | query | Whether to get published or draft datasource plugins (true for published, false for draft, default: true) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Datasource plugins retrieved successfully | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/pipeline/datasource/nodes/{node_id}/run + +#### POST +##### Summary + +Resource for getting datasource plugins + +##### Description + +Run a datasource node for a rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| node_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Datasource node run successfully | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/pipeline/run + +#### POST +##### Summary + +Resource for running a rag pipeline + +##### Description + +Run a datasource node for a rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Pipeline run successfully | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/retrieve + +#### POST +##### Summary + +Perform hit testing on a dataset + +##### Description + +Perform hit testing on a dataset +Tests retrieval performance for the specified dataset. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HitTestingPayload](#hittestingpayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Hit testing results | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/tags + +#### GET +##### Summary + +Get all knowledge type tags + +##### Description + +Get tags bound to a specific dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Tags retrieved successfully | +| 401 | Unauthorized - invalid API token | + +### /end-users/{end_user_id} + +#### GET +##### Summary + +Get end user detail + +##### Description + +Get an end user by ID +This endpoint is scoped to the current app token's tenant/app to prevent +cross-tenant/app access when an end-user ID is known. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| end_user_id | path | End user ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | End user retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | End user not found | + +### /files/upload + +#### POST +##### Summary + +Upload a file for use in conversations + +##### Description + +Upload a file for use in conversations +Accepts a single file upload via multipart/form-data. + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | File uploaded successfully | [FileResponse](#fileresponse) | +| 400 | Bad request - no file or invalid file | | +| 401 | Unauthorized - invalid API token | | +| 413 | File too large | | +| 415 | Unsupported file type | | + +### /files/{file_id}/preview + +#### GET +##### Summary + +Preview/Download a file that was uploaded via Service API + +##### Description + +Preview or download a file uploaded via Service API +Provides secure file preview/download functionality. +Files can only be accessed if they belong to messages within the requesting app's context. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [FilePreviewQuery](#filepreviewquery) | +| file_id | path | UUID of the file to preview | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | File retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - file access denied | +| 404 | File not found | + +### /form/human_input/{form_token} + +#### GET +##### Description + +Get a paused human input form by token + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| form_token | path | Human input form token | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Form retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Form not found | +| 412 | Form already submitted or expired | + +#### POST +##### Description + +Submit a paused human input form by token + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HumanInputFormSubmitPayload](#humaninputformsubmitpayload) | +| form_token | path | Human input form token | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Form submitted successfully | +| 400 | Bad request - invalid submission data | +| 401 | Unauthorized - invalid API token | +| 404 | Form not found | +| 412 | Form already submitted or expired | + +### /info + +#### GET +##### Summary + +Get app information + +##### Description + +Get basic application information +Returns basic information about the application including name, description, tags, and mode. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Application info retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Application not found | + +### /messages + +#### GET +##### Summary + +List messages in a conversation + +##### Description + +List messages in a conversation +Retrieves messages with pagination support using first_id. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MessageListQuery](#messagelistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Messages retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Conversation or first message not found | + +### /messages/{message_id}/feedbacks + +#### POST +##### Summary + +Submit feedback for a message + +##### Description + +Submit feedback for a message +Allows users to rate messages as like/dislike and provide optional feedback content. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MessageFeedbackPayload](#messagefeedbackpayload) | +| message_id | path | Message ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Feedback submitted successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Message not found | + +### /messages/{message_id}/suggested + +#### GET +##### Summary + +Get suggested follow-up questions for a message + +##### Description + +Get suggested follow-up questions for a message +Returns AI-generated follow-up questions based on the message content. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| message_id | path | Message ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Suggested questions retrieved successfully | +| 400 | Suggested questions feature is disabled | +| 401 | Unauthorized - invalid API token | +| 404 | Message not found | +| 500 | Internal server error | + +### /meta + +#### GET +##### Summary + +Get app metadata + +##### Description + +Get application metadata +Returns metadata about the application including configuration and settings. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Metadata retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Application not found | + +### /parameters + +#### GET +##### Summary + +Retrieve app parameters + +##### Description + +Retrieve application input parameters and configuration +Returns the input form parameters and configuration for the application. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Parameters retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Application not found | + +### /site + +#### GET +##### Summary + +Retrieve app site info + +##### Description + +Get application site configuration +Returns the site configuration for the application including theme, icons, and text. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Site configuration retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - site not found or tenant archived | + +### /text-to-audio + +#### POST +##### Summary + +Convert text to audio using text-to-speech + +##### Description + +Convert text to audio using text-to-speech +Converts the provided text to audio using the specified voice. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TextToAudioPayload](#texttoaudiopayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Text successfully converted to audio | +| 400 | Bad request - invalid parameters | +| 401 | Unauthorized - invalid API token | +| 500 | Internal server error | + +### /workflow/{task_id}/events + +#### GET +##### Description + +Get workflow execution events stream after resume + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | Workflow run ID | Yes | string | +| continue_on_pause | query | Whether to keep the stream open across workflow_paused events,specify `"true"` to keep the stream open for `workflow_paused` events. | No | string | +| include_state_snapshot | query | Whether to replay from persisted state snapshot, specify `"true"` to include a status snapshot of executed nodes | No | string | +| user | query | End user identifier (query param) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | SSE event stream | +| 401 | Unauthorized - invalid API token | +| 404 | Workflow run not found | + +### /workflows/logs + +#### GET +##### Summary + +Get workflow app logs + +##### Description + +Get workflow execution logs +Returns paginated workflow execution logs with filtering options. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowLogQuery](#workflowlogquery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Logs retrieved successfully | [WorkflowAppLogPaginationResponse](#workflowapplogpaginationresponse) | +| 401 | Unauthorized - invalid API token | | + +### /workflows/run + +#### POST +##### Summary + +Execute a workflow + +##### Description + +Execute a workflow +Runs a workflow with the provided inputs and returns the results. +Supports both blocking and streaming response modes. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowRunPayload](#workflowrunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow executed successfully | +| 400 | Bad request - invalid parameters or workflow issues | +| 401 | Unauthorized - invalid API token | +| 404 | Workflow not found | +| 429 | Rate limit exceeded | +| 500 | Internal server error | + +### /workflows/run/{workflow_run_id} + +#### GET +##### Summary + +Get a workflow task running detail + +##### Description + +Get workflow run details +Returns detailed information about a specific workflow run. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| workflow_run_id | path | Workflow run ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow run details retrieved successfully | [WorkflowRunResponse](#workflowrunresponse) | +| 401 | Unauthorized - invalid API token | | +| 404 | Workflow run not found | | + +### /workflows/tasks/{task_id}/stop + +#### POST +##### Summary + +Stop a running workflow task + +##### Description + +Stop a running workflow task + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | Task ID to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Task stopped successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Task not found | + +### /workflows/{workflow_id}/run + +#### POST +##### Summary + +Run specific workflow by ID + +##### Description + +Execute a specific workflow by ID +Executes a specific workflow version identified by its ID. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowRunPayload](#workflowrunpayload) | +| workflow_id | path | Workflow ID to execute | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow executed successfully | +| 400 | Bad request - invalid parameters or workflow issues | +| 401 | Unauthorized - invalid API token | +| 404 | Workflow not found | +| 429 | Rate limit exceeded | +| 500 | Internal server error | + +### /workspaces/current/models/model-types/{model_type} + +#### GET +##### Summary + +Get available models by model type + +##### Description + +Get available models by model type +Returns a list of available models for the specified model type. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| model_type | path | Type of model to retrieve | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Models retrieved successfully | +| 401 | Unauthorized - invalid API token | + +--- +### Models + +#### Annotation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| created_at | | | No | +| hit_count | | | No | +| id | string | | Yes | +| question | | | No | + +#### AnnotationCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| answer | string | Annotation answer | Yes | +| question | string | Annotation question | Yes | + +#### AnnotationList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [Annotation](#annotation) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### AnnotationReplyActionPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| embedding_model_name | string | Embedding model name | Yes | +| embedding_provider_name | string | Embedding provider name | Yes | +| score_threshold | number | Score threshold for annotation matching | Yes | + +#### ChatRequestPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| auto_generate_name | boolean | Auto generate conversation name | No | +| conversation_id | | Conversation UUID | No | +| files | | | No | +| inputs | object | | Yes | +| query | string | | Yes | +| response_mode | | | No | +| retriever_from | string | | No | +| workflow_id | | Workflow ID for advanced chat | No | + +#### ChildChunkCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | Yes | + +#### ChildChunkListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword | | | No | +| limit | integer | | No | +| page | integer | | No | + +#### ChildChunkUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | Yes | + +#### CompletionRequestPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | +| query | string | | No | +| response_mode | | | No | +| retriever_from | string | | No | + +#### Condition + +Condition detail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| comparison_operator | string | *Enum:* `"<"`, `"="`, `">"`, `"after"`, `"before"`, `"contains"`, `"empty"`, `"end with"`, `"in"`, `"is"`, `"is not"`, `"not contains"`, `"not empty"`, `"not in"`, `"start with"`, `"≠"`, `"≤"`, `"≥"` | Yes | +| name | string | | Yes | +| value | | | No | + +#### ConversationListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | Last conversation ID for pagination | No | +| limit | integer | Number of conversations to return | No | +| sort_by | string | Sort order for conversations
*Enum:* `"-created_at"`, `"-updated_at"`, `"created_at"`, `"updated_at"` | No | + +#### ConversationRenamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| auto_generate | boolean | | No | +| name | | | No | + +#### ConversationVariableInfiniteScrollPaginationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [ConversationVariableResponse](#conversationvariableresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | + +#### ConversationVariableResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| description | | | No | +| id | string | | Yes | +| name | string | | Yes | +| updated_at | | | No | +| value | | | No | +| value_type | string | | Yes | + +#### ConversationVariableUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| value | | | Yes | + +#### ConversationVariablesQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | Last variable ID for pagination | No | +| limit | integer | Number of variables to return | No | +| variable_name | | Filter variables by name | No | + +#### DataSetTag + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| binding_count | | | No | +| id | string | | Yes | +| name | string | | Yes | +| type | string | | Yes | + +#### DatasetCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | Dataset description (max 400 chars) | No | +| embedding_model | | | No | +| embedding_model_provider | | | No | +| external_knowledge_api_id | | | No | +| external_knowledge_id | | | No | +| indexing_technique | | | No | +| name | string | | Yes | +| permission | | | No | +| provider | string | | No | +| retrieval_model | | | No | +| summary_index_setting | | | No | + +#### DatasetListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| include_all | boolean | Include all datasets | No | +| keyword | | Search keyword | No | +| limit | integer | Number of items per page | No | +| page | integer | Page number | No | +| tag_ids | [ string ] | Filter by tag IDs | No | + +#### DatasetPermissionEnum + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| DatasetPermissionEnum | string | | | + +#### DatasetUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | Dataset description (max 400 chars) | No | +| embedding_model | | | No | +| embedding_model_provider | | | No | +| external_knowledge_api_id | | | No | +| external_knowledge_id | | | No | +| external_retrieval_model | | | No | +| indexing_technique | | | No | +| name | | | No | +| partial_member_list | | | No | +| permission | | | No | +| retrieval_model | | | No | + +#### DatasourceNodeRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | | | No | +| datasource_type | string | | Yes | +| inputs | object | | Yes | +| is_published | boolean | | Yes | + +#### DocumentBatchDownloadZipPayload + +Request payload for bulk downloading documents as a zip archive. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| document_ids | [ string (uuid) ] | | Yes | + +#### DocumentListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword | | Search keyword | No | +| limit | integer | Number of items per page | No | +| page | integer | Page number | No | +| status | | Document status filter | No | + +#### DocumentMetadataOperation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| document_id | string | | Yes | +| metadata_list | [ [MetadataDetail](#metadatadetail) ] | | Yes | +| partial_update | boolean | | No | + +#### DocumentTextCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| doc_form | string | | No | +| doc_language | string | | No | +| embedding_model | | | No | +| embedding_model_provider | | | No | +| indexing_technique | | | No | +| name | string | | Yes | +| original_document_id | | | No | +| process_rule | | | No | +| retrieval_model | | | No | +| text | string | | Yes | + +#### DocumentTextUpdate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| doc_form | string | | No | +| doc_language | string | | No | +| name | | | No | +| process_rule | | | No | +| retrieval_model | | | No | +| text | | | No | + +#### FeedbackListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| limit | integer | Number of feedbacks per page | No | +| page | integer | Page number | No | + +#### FilePreviewQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| as_attachment | boolean | Download as attachment | No | + +#### FileResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | | No | +| created_at | | | No | +| created_by | | | No | +| extension | | | No | +| file_key | | | No | +| id | string | | Yes | +| mime_type | | | No | +| name | string | | Yes | +| original_url | | | No | +| preview_url | | | No | +| size | integer | | Yes | +| source_url | | | No | +| tenant_id | | | No | +| user_id | | | No | + +#### HitTestingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| attachment_ids | | | No | +| external_retrieval_model | | | No | +| query | string | | Yes | +| retrieval_model | | | No | + +#### HumanInputFormSubmitPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| action | string | | Yes | +| inputs | object | | Yes | + +#### JsonValue + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| JsonValue | | | | + +#### MessageFeedbackPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| rating | | | No | + +#### MessageListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | Conversation UUID | Yes | +| first_id | | First message ID for pagination | No | +| limit | integer | Number of messages to return (1-100) | No | + +#### MetadataArgs + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | +| type | string | *Enum:* `"number"`, `"string"`, `"time"` | Yes | + +#### MetadataDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | +| name | string | | Yes | +| value | | | No | + +#### MetadataFilteringCondition + +Metadata Filtering Condition. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conditions | | | No | +| logical_operator | | | No | + +#### MetadataOperationData + +Metadata operation data + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| operation_data | [ [DocumentMetadataOperation](#documentmetadataoperation) ] | | Yes | + +#### MetadataUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | + +#### PipelineRunApiEntity + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| datasource_info_list | [ object ] | | Yes | +| datasource_type | string | | Yes | +| inputs | object | | Yes | +| is_published | boolean | | Yes | +| response_mode | string | | Yes | +| start_node_id | string | | Yes | + +#### PreProcessingRule + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enabled | boolean | | Yes | +| id | string | | Yes | + +#### ProcessRule + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| mode | string | *Enum:* `"automatic"`, `"custom"`, `"hierarchical"` | Yes | +| rules | | | No | + +#### RerankingModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| reranking_model_name | | | No | +| reranking_provider_name | | | No | + +#### RetrievalMethod + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| RetrievalMethod | string | | | + +#### RetrievalModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| metadata_filtering_conditions | | | No | +| reranking_enable | boolean | | Yes | +| reranking_mode | | | No | +| reranking_model | | | No | +| score_threshold | | | No | +| score_threshold_enabled | boolean | | Yes | +| search_method | [RetrievalMethod](#retrievalmethod) | | Yes | +| top_k | integer | | Yes | +| weights | | | No | + +#### Rule + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| parent_mode | | | No | +| pre_processing_rules | | | No | +| segmentation | | | No | +| subchunk_segmentation | | | No | + +#### SegmentCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| segments | | | No | + +#### SegmentListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword | | | No | +| status | [ string ] | | No | + +#### SegmentUpdateArgs + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| answer | | | No | +| attachment_ids | | | No | +| content | | | No | +| enabled | | | No | +| keywords | | | No | +| regenerate_child_chunks | boolean | | No | +| summary | | | No | + +#### SegmentUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| segment | [SegmentUpdateArgs](#segmentupdateargs) | | Yes | + +#### Segmentation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| chunk_overlap | integer | | No | +| max_tokens | integer | | Yes | +| separator | string | | No | + +#### SimpleAccount + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| id | string | | Yes | +| name | string | | Yes | + +#### SimpleEndUser + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | +| is_anonymous | boolean | | Yes | +| session_id | | | No | +| type | string | | Yes | + +#### TagBindingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tag_ids | [ string ] | | Yes | +| target_id | string | | Yes | + +#### TagCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | + +#### TagDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tag_id | string | | Yes | + +#### TagUnbindingPayload + +Accept the legacy single-tag Service API payload while exposing a normalized tag_ids list internally. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tag_id | | | No | +| tag_ids | [ string ] | | No | +| target_id | string | | Yes | + +#### TagUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | +| tag_id | string | | Yes | + +#### TextToAudioPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | | Message ID | No | +| streaming | | Enable streaming response | No | +| text | | Text to convert to audio | No | +| voice | | Voice to use for TTS | No | + +#### WeightKeywordSetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword_weight | number | | Yes | + +#### WeightModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword_setting | | | No | +| vector_setting | | | No | +| weight_type | | | No | + +#### WeightVectorSetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| embedding_model_name | string | | Yes | +| embedding_provider_name | string | | Yes | +| vector_weight | number | | Yes | + +#### WorkflowAppLogPaginationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [WorkflowAppLogPartialResponse](#workflowapplogpartialresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### WorkflowAppLogPartialResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by_account | | | No | +| created_by_end_user | | | No | +| created_by_role | | | No | +| created_from | | | No | +| details | | | No | +| id | string | | Yes | +| workflow_run | | | No | + +#### WorkflowLogQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at__after | | | No | +| created_at__before | | | No | +| created_by_account | | | No | +| created_by_end_user_session_id | | | No | +| keyword | | | No | +| limit | integer | | No | +| page | integer | | No | +| status | | | No | + +#### WorkflowRunForLogResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| elapsed_time | | | No | +| error | | | No | +| exceptions_count | | | No | +| finished_at | | | No | +| id | string | | Yes | +| status | | | No | +| total_steps | | | No | +| total_tokens | | | No | +| triggered_from | | | No | +| version | | | No | + +#### WorkflowRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | +| response_mode | | | No | + +#### WorkflowRunResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| elapsed_time | | | No | +| error | | | No | +| finished_at | | | No | +| id | string | | Yes | +| inputs | | | No | +| outputs | object | | No | +| status | string | | Yes | +| total_steps | | | No | +| total_tokens | | | No | +| workflow_id | string | | Yes | diff --git a/api/openapi/markdown/web-swagger.md b/api/openapi/markdown/web-swagger.md new file mode 100644 index 0000000000..c9b3b31357 --- /dev/null +++ b/api/openapi/markdown/web-swagger.md @@ -0,0 +1,1224 @@ +# Web API +Public APIs for web applications including file uploads, chat interactions, and app management + +## Version: 1.0 + +### Security +**Bearer** + +| apiKey | *API Key* | +| ------ | --------- | +| Description | Type: Bearer {your-api-key} | +| In | header | +| Name | Authorization | + +--- +## web +Web application API operations + +### /audio-to-text + +#### POST +##### Summary + +Convert audio to text + +##### Description + +Convert audio file to text using speech-to-text service. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 413 | Audio file too large | +| 415 | Unsupported audio type | +| 500 | Internal Server Error | + +### /chat-messages + +#### POST +##### Description + +Create a chat message for conversational applications. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChatMessagePayload](#chatmessagepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +### /chat-messages/{task_id}/stop + +#### POST +##### Description + +Stop a running chat message task. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | Task ID to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Task Not Found | +| 500 | Internal Server Error | + +### /completion-messages + +#### POST +##### Description + +Create a completion message for text generation applications. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CompletionMessagePayload](#completionmessagepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +### /completion-messages/{task_id}/stop + +#### POST +##### Description + +Stop a running completion message task. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | Task ID to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Task Not Found | +| 500 | Internal Server Error | + +### /conversations + +#### GET +##### Description + +Retrieve paginated list of conversations for a chat application. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| last_id | query | Last conversation ID for pagination | No | string | +| limit | query | Number of conversations to return (1-100) | No | integer | +| pinned | query | Filter by pinned status | No | string | +| sort_by | query | Sort order | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found or Not a Chat App | +| 500 | Internal Server Error | + +### /conversations/{c_id} + +#### DELETE +##### Description + +Delete a specific conversation. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | Conversation UUID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Conversation deleted successfully | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Conversation Not Found or Not a Chat App | +| 500 | Internal Server Error | + +### /conversations/{c_id}/name + +#### POST +##### Description + +Rename a specific conversation with a custom name or auto-generate one. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | Conversation UUID | Yes | string | +| auto_generate | query | Auto-generate conversation name | No | boolean | +| name | query | New conversation name | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Conversation renamed successfully | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Conversation Not Found or Not a Chat App | +| 500 | Internal Server Error | + +### /conversations/{c_id}/pin + +#### PATCH +##### Description + +Pin a specific conversation to keep it at the top of the list. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | Conversation UUID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Conversation pinned successfully | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Conversation Not Found or Not a Chat App | +| 500 | Internal Server Error | + +### /conversations/{c_id}/unpin + +#### PATCH +##### Description + +Unpin a specific conversation to remove it from the top of the list. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | Conversation UUID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Conversation unpinned successfully | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Conversation Not Found or Not a Chat App | +| 500 | Internal Server Error | + +### /email-code-login + +#### POST +##### Description + +Send email verification code for login + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EmailCodeLoginSendPayload](#emailcodeloginsendpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Email code sent successfully | +| 400 | Bad request - invalid email format | +| 404 | Account not found | + +### /email-code-login/validity + +#### POST +##### Description + +Verify email code and complete login + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EmailCodeLoginVerifyPayload](#emailcodeloginverifypayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Email code verified and login successful | +| 400 | Bad request - invalid code or token | +| 401 | Invalid token or expired code | +| 404 | Account not found | + +### /files/upload + +#### POST +##### Summary + +Upload a file for use in web applications + +##### Description + +Upload a file for use in web applications +Accepts file uploads for use within web applications, supporting +multiple file types with automatic validation and storage. + +Args: + app_model: The associated application model + end_user: The end user uploading the file + +Form Parameters: + file: The file to upload (required) + source: Optional source type (datasets or None) + +Returns: + dict: File information including ID, URL, and metadata + int: HTTP status code 201 for success + +Raises: + NoFileUploadedError: No file provided in request + TooManyFilesError: Multiple files provided (only one allowed) + FilenameNotExistsError: File has no filename + FileTooLargeError: File exceeds size limit + UnsupportedFileTypeError: File type not supported + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | File uploaded successfully | [FileResponse](#fileresponse) | +| 400 | Bad request - invalid file or parameters | | +| 413 | File too large | | +| 415 | Unsupported file type | | + +### /forgot-password + +#### POST +##### Description + +Send password reset email + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ForgotPasswordSendPayload](#forgotpasswordsendpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Password reset email sent successfully | +| 400 | Bad request - invalid email format | +| 404 | Account not found | +| 429 | Too many requests - rate limit exceeded | + +### /forgot-password/resets + +#### POST +##### Description + +Reset user password with verification token + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ForgotPasswordResetPayload](#forgotpasswordresetpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Password reset successfully | +| 400 | Bad request - invalid parameters or password mismatch | +| 401 | Invalid or expired token | +| 404 | Account not found | + +### /forgot-password/validity + +#### POST +##### Description + +Verify password reset token validity + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ForgotPasswordCheckPayload](#forgotpasswordcheckpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Token is valid | +| 400 | Bad request - invalid token format | +| 401 | Invalid or expired token | + +### /form/human_input/{form_token} + +#### GET +##### Summary + +Get human input form definition by token + +##### Description + +GET /api/form/human_input/ + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| form_token | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Summary + +Submit human input form by token + +##### Description + +POST /api/form/human_input/ + +Request body: +{ + "inputs": { + "content": "User input content" + }, + "action": "Approve" +} + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| form_token | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /login + +#### POST +##### Summary + +Authenticate user and login + +##### Description + +Authenticate user for web application access + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [LoginPayload](#loginpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Authentication successful | +| 400 | Bad request - invalid email or password format | +| 401 | Authentication failed - email or password mismatch | +| 403 | Account banned or login disabled | +| 404 | Account not found | + +### /login/status + +#### GET +##### Description + +Check login status + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Login status | +| 401 | Login status | + +### /logout + +#### POST +##### Description + +Logout user from web application + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Logout successful | + +### /messages + +#### GET +##### Description + +Retrieve paginated list of messages from a conversation in a chat application. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| conversation_id | query | Conversation UUID | Yes | string | +| first_id | query | First message ID for pagination | No | string | +| limit | query | Number of messages to return (1-100) | No | integer | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Conversation Not Found or Not a Chat App | +| 500 | Internal Server Error | + +### /messages/{message_id}/feedbacks + +#### POST +##### Description + +Submit feedback (like/dislike) for a specific message. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| message_id | path | Message UUID | Yes | string | +| content | query | Feedback content | No | string | +| rating | query | Feedback rating | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Feedback submitted successfully | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Message Not Found | +| 500 | Internal Server Error | + +### /messages/{message_id}/more-like-this + +#### GET +##### Description + +Generate a new completion similar to an existing message (completion apps only). + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| message_id | path | | Yes | string | +| payload | body | | Yes | [MessageMoreLikeThisQuery](#messagemorelikethisquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request - Not a completion app or feature disabled | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Message Not Found | +| 500 | Internal Server Error | + +### /messages/{message_id}/suggested-questions + +#### GET +##### Description + +Get suggested follow-up questions after a message (chat apps only). + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| message_id | path | Message UUID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request - Not a chat app or feature disabled | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Message Not Found or Conversation Not Found | +| 500 | Internal Server Error | + +### /meta + +#### GET +##### Summary + +Get app meta + +##### Description + +Retrieve the metadata for a specific app. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +### /parameters + +#### GET +##### Summary + +Retrieve app parameters + +##### Description + +Retrieve the parameters for a specific app. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +### /passport + +#### GET +##### Description + +Get authentication passport for web application access + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Passport retrieved successfully | +| 401 | Unauthorized - missing app code or invalid authentication | +| 404 | Application or user not found | + +### /remote-files/upload + +#### POST +##### Summary + +Upload a file from a remote URL + +##### Description + +Upload a file from a remote URL +Downloads a file from the provided remote URL and uploads it +to the platform storage for use in web applications. + +Args: + app_model: The associated application model + end_user: The end user making the request + +JSON Parameters: + url: The remote URL to download the file from (required) + +Returns: + dict: File information including ID, signed URL, and metadata + int: HTTP status code 201 for success + +Raises: + RemoteFileUploadError: Failed to fetch file from remote URL + FileTooLargeError: File exceeds size limit + UnsupportedFileTypeError: File type not supported + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Remote file uploaded successfully | [FileWithSignedUrl](#filewithsignedurl) | +| 400 | Bad request - invalid URL or parameters | | +| 413 | File too large | | +| 415 | Unsupported file type | | +| 500 | Failed to fetch remote file | | + +### /remote-files/{url} + +#### GET +##### Summary + +Get information about a remote file + +##### Description + +Get information about a remote file +Retrieves basic information about a file located at a remote URL, +including content type and content length. + +Args: + app_model: The associated application model + end_user: The end user making the request + url: URL-encoded path to the remote file + +Returns: + dict: Remote file information including type and length + +Raises: + HTTPException: If the remote file cannot be accessed + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| url | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Remote file information retrieved successfully | [RemoteFileInfo](#remotefileinfo) | +| 400 | Bad request - invalid URL | | +| 404 | Remote file not found | | +| 500 | Failed to fetch remote file | | + +### /saved-messages + +#### GET +##### Description + +Retrieve paginated list of saved messages for a completion application. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| last_id | query | Last message ID for pagination | No | string | +| limit | query | Number of messages to return (1-100) | No | integer | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request - Not a completion app | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +#### POST +##### Description + +Save a specific message for later reference. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| message_id | query | Message UUID to save | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Message saved successfully | +| 400 | Bad Request - Not a completion app | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Message Not Found | +| 500 | Internal Server Error | + +### /saved-messages/{message_id} + +#### DELETE +##### Description + +Remove a message from saved messages. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| message_id | path | Message UUID to delete | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Message removed successfully | +| 400 | Bad Request - Not a completion app | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Message Not Found | +| 500 | Internal Server Error | + +### /site + +#### GET +##### Summary + +Retrieve app site info + +##### Description + +Retrieve app site information and configuration. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +### /system-features + +#### GET +##### Summary + +Get system feature flags and configuration + +##### Description + +Get system feature flags and configuration +Returns the current system feature flags and configuration +that control various functionalities across the platform. + +Returns: + dict: System feature configuration object + +This endpoint is akin to the `SystemFeatureApi` endpoint in api/controllers/console/feature.py, +except it is intended for use by the web app, instead of the console dashboard. + +NOTE: This endpoint is unauthenticated by design, as it provides system features +data required for webapp initialization. + +Authentication would create circular dependency (can't authenticate without webapp loading). + +Only non-sensitive configuration data should be returned by this endpoint. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | System features retrieved successfully | +| 500 | Internal server error | + +### /text-to-audio + +#### POST +##### Summary + +Convert text to audio + +##### Description + +Convert text to audio using text-to-speech service. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TextToAudioPayload](#texttoaudiopayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 500 | Internal Server Error | + +### /webapp/access-mode + +#### GET +##### Description + +Retrieve the access mode for a web application (public or restricted). + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| appCode | query | Application code | No | string | +| appId | query | Application ID | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 500 | Internal Server Error | + +### /webapp/permission + +#### GET +##### Description + +Check if user has permission to access a web application. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| appId | query | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 500 | Internal Server Error | + +### /workflows/run + +#### POST +##### Summary + +Run workflow + +##### Description + +Execute a workflow with provided inputs and files. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowRunPayload](#workflowrunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +### /workflows/tasks/{task_id}/stop + +#### POST +##### Summary + +Stop workflow task + +##### Description + +Stop a running workflow task. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | Task ID to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Task Not Found | +| 500 | Internal Server Error | + +--- +## default +Default namespace + +### /workflow/{task_id}/events + +#### GET +##### Summary + +Get workflow execution events stream after resume + +##### Description + +GET /api/workflow//events + +Returns Server-Sent Events stream. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +--- +### Models + +#### AppAccessModeQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| appCode | | Application code | No | +| appId | | Application ID | No | + +#### ChatMessagePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | Conversation ID | No | +| files | | Files to be processed | No | +| inputs | object | Input variables for the chat | Yes | +| parent_message_id | | Parent message ID | No | +| query | string | User query/message | Yes | +| response_mode | | Response mode: blocking or streaming | No | +| retriever_from | string | Source of retriever | No | + +#### CompletionMessagePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | Files to be processed | No | +| inputs | object | Input variables for the completion | Yes | +| query | string | Query text for completion | No | +| response_mode | | Response mode: blocking or streaming | No | +| retriever_from | string | Source of retriever | No | + +#### ConversationListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | | No | +| limit | integer | | No | +| pinned | | | No | +| sort_by | string | *Enum:* `"-created_at"`, `"-updated_at"`, `"created_at"`, `"updated_at"` | No | + +#### ConversationRenamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| auto_generate | boolean | | No | +| name | | | No | + +#### EmailCodeLoginSendPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| language | | | No | + +#### EmailCodeLoginVerifyPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| email | string | | Yes | +| token | string | | Yes | + +#### FileResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | | No | +| created_at | | | No | +| created_by | | | No | +| extension | | | No | +| file_key | | | No | +| id | string | | Yes | +| mime_type | | | No | +| name | string | | Yes | +| original_url | | | No | +| preview_url | | | No | +| size | integer | | Yes | +| source_url | | | No | +| tenant_id | | | No | +| user_id | | | No | + +#### FileWithSignedUrl + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by | | | No | +| extension | | | No | +| id | string | | Yes | +| mime_type | | | No | +| name | string | | Yes | +| size | integer | | Yes | +| url | | | No | + +#### ForgotPasswordCheckPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| email | string | | Yes | +| token | string | | Yes | + +#### ForgotPasswordResetPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_password | string | | Yes | +| password_confirm | string | | Yes | +| token | string | | Yes | + +#### ForgotPasswordSendPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| language | | | No | + +#### LoginPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| password | string | | Yes | + +#### MessageFeedbackPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| rating | | | No | + +#### MessageListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | Conversation UUID | Yes | +| first_id | | First message ID for pagination | No | +| limit | integer | Number of messages to return (1-100) | No | + +#### MessageMoreLikeThisQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| response_mode | string | Response mode
*Enum:* `"blocking"`, `"streaming"` | Yes | + +#### RemoteFileInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| file_length | integer | | Yes | +| file_type | string | | Yes | + +#### RemoteFileUploadPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| url | string (uri) | Remote file URL | Yes | + +#### SavedMessageCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | string | | Yes | + +#### SavedMessageListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | | No | +| limit | integer | | No | + +#### TextToAudioPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | | Message ID | No | +| streaming | | Enable streaming response | No | +| text | | Text to convert to audio | No | +| voice | | Voice to use for TTS | No | + +#### WorkflowRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | diff --git a/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace_utils.py b/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace_utils.py index 1b97746dea..0900dfda97 100644 --- a/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace_utils.py +++ b/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace_utils.py @@ -3,6 +3,7 @@ from collections.abc import Mapping from typing import Any, cast from unittest.mock import MagicMock +import pytest from dify_trace_aliyun.entities.semconv import ( GEN_AI_FRAMEWORK, GEN_AI_SESSION_ID, @@ -31,7 +32,7 @@ from graphon.enums import WorkflowNodeExecutionStatus from models import EndUser -def test_get_user_id_from_message_data_no_end_user(monkeypatch): +def test_get_user_id_from_message_data_no_end_user(monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.from_account_id = "account_id" message_data.from_end_user_id = None @@ -39,7 +40,7 @@ def test_get_user_id_from_message_data_no_end_user(monkeypatch): assert get_user_id_from_message_data(message_data) == "account_id" -def test_get_user_id_from_message_data_with_end_user(monkeypatch): +def test_get_user_id_from_message_data_with_end_user(monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.from_account_id = "account_id" message_data.from_end_user_id = "end_user_id" @@ -57,7 +58,7 @@ def test_get_user_id_from_message_data_with_end_user(monkeypatch): assert get_user_id_from_message_data(message_data) == "session_id" -def test_get_user_id_from_message_data_end_user_not_found(monkeypatch): +def test_get_user_id_from_message_data_end_user_not_found(monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.from_account_id = "account_id" message_data.from_end_user_id = "end_user_id" @@ -111,7 +112,7 @@ def test_get_workflow_node_status(): assert status.status_code == StatusCode.UNSET -def test_create_links_from_trace_id(monkeypatch): +def test_create_links_from_trace_id(monkeypatch: pytest.MonkeyPatch): # Mock create_link mock_link = MagicMock(spec=Link) import dify_trace_aliyun.data_exporter.traceclient diff --git a/api/providers/trace/trace-arize-phoenix/src/dify_trace_arize_phoenix/arize_phoenix_trace.py b/api/providers/trace/trace-arize-phoenix/src/dify_trace_arize_phoenix/arize_phoenix_trace.py index 96df49ed0e..a0d150e1b6 100644 --- a/api/providers/trace/trace-arize-phoenix/src/dify_trace_arize_phoenix/arize_phoenix_trace.py +++ b/api/providers/trace/trace-arize-phoenix/src/dify_trace_arize_phoenix/arize_phoenix_trace.py @@ -1,9 +1,11 @@ import json import logging import os +import re import traceback +from collections.abc import Mapping, Sequence from datetime import datetime, timedelta -from typing import Any, Union, cast +from typing import Any, Protocol, Union, cast from urllib.parse import urlparse from openinference.semconv.trace import ( @@ -19,7 +21,7 @@ from opentelemetry.sdk import trace as trace_sdk from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.trace.export import SimpleSpanProcessor from opentelemetry.semconv.attributes import exception_attributes -from opentelemetry.trace import Span, Status, StatusCode, set_span_in_context, use_span +from opentelemetry.trace import Span, Status, StatusCode, get_current_span, set_span_in_context, use_span from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator from opentelemetry.util.types import AttributeValue from sqlalchemy.orm import sessionmaker @@ -36,16 +38,106 @@ from core.ops.entities.trace_entity import ( TraceTaskName, WorkflowTraceInfo, ) +from core.ops.exceptions import PendingTraceParentContextError from core.ops.utils import JSON_DICT_ADAPTER from core.repositories import DifyCoreRepositoryFactory from dify_trace_arize_phoenix.config import ArizeConfig, PhoenixConfig from extensions.ext_database import db +from extensions.ext_redis import redis_client from graphon.enums import WorkflowNodeExecutionStatus from models.model import EndUser, MessageFile from models.workflow import WorkflowNodeExecutionTriggeredFrom logger = logging.getLogger(__name__) +# This parent-span carrier store is intentionally Phoenix-local for the current +# nested workflow tracing feature. If other trace providers need the same +# cross-task parent restoration behavior, move the storage and retry signaling +# behind a core trace coordination interface instead of duplicating it here. +_PHOENIX_PARENT_SPAN_CONTEXT_TTL_SECONDS = 300 +_TRACEPARENT_PATTERN = re.compile( + r"^(?P[0-9a-f]{2})-(?P[0-9a-f]{32})-(?P[0-9a-f]{16})-(?P[0-9a-f]{2})$" +) + + +def _phoenix_parent_span_redis_key(parent_node_execution_id: str) -> str: + """Build the Redis key that stores a restorable Phoenix parent span carrier.""" + return f"trace:phoenix:parent_span:{parent_node_execution_id}" + + +def _publish_parent_span_context(parent_node_execution_id: str, carrier: Mapping[str, str]) -> None: + """Persist a tracecontext carrier so nested workflow spans can restore the tool span parent.""" + redis_client.setex( + _phoenix_parent_span_redis_key(parent_node_execution_id), + _PHOENIX_PARENT_SPAN_CONTEXT_TTL_SECONDS, + safe_json_dumps(dict(carrier)), + ) + + +def _resolve_published_parent_span_context(parent_node_execution_id: str) -> dict[str, str]: + """Load a previously published tool-span carrier for nested workflow parenting.""" + raw_carrier = redis_client.get(_phoenix_parent_span_redis_key(parent_node_execution_id)) + if raw_carrier is None: + raise PendingTraceParentContextError(parent_node_execution_id) + + if isinstance(raw_carrier, bytes): + raw_carrier = raw_carrier.decode("utf-8") + + carrier = json.loads(raw_carrier) + if not isinstance(carrier, dict): + raise ValueError( + "Phoenix parent span context must be stored as a JSON object: " + f"parent_node_execution_id={parent_node_execution_id}" + ) + + normalized_carrier = {str(key): str(value) for key, value in carrier.items()} + if not normalized_carrier: + raise ValueError( + f"Phoenix parent span context payload is empty: parent_node_execution_id={parent_node_execution_id}" + ) + + traceparent = normalized_carrier.get("traceparent") + if not isinstance(traceparent, str): + raise ValueError( + "Phoenix parent span context payload is missing traceparent: " + f"parent_node_execution_id={parent_node_execution_id}" + ) + + traceparent_match = _TRACEPARENT_PATTERN.fullmatch(traceparent) + if traceparent_match is None: + raise ValueError( + "Phoenix parent span context payload has invalid traceparent format: " + f"parent_node_execution_id={parent_node_execution_id}" + ) + + if traceparent_match.group("version") == "ff": + raise ValueError( + "Phoenix parent span context payload has unsupported traceparent version: " + f"parent_node_execution_id={parent_node_execution_id}" + ) + + if traceparent_match.group("trace_id") == "0" * 32: + raise ValueError( + "Phoenix parent span context payload has zero trace_id in traceparent: " + f"parent_node_execution_id={parent_node_execution_id}" + ) + + if traceparent_match.group("span_id") == "0" * 16: + raise ValueError( + "Phoenix parent span context payload has zero span_id in traceparent: " + f"parent_node_execution_id={parent_node_execution_id}" + ) + + extracted_context = TraceContextTextMapPropagator().extract(carrier=normalized_carrier) + extracted_span_context = get_current_span(extracted_context).get_span_context() + if not extracted_span_context.is_valid or not extracted_span_context.is_remote: + raise ValueError( + "Phoenix parent span context payload could not be restored into a valid parent span: " + f"parent_node_execution_id={parent_node_execution_id}" + ) + + return normalized_carrier + def setup_tracer(arize_phoenix_config: ArizeConfig | PhoenixConfig) -> tuple[trace_sdk.Tracer, SimpleSpanProcessor]: """Configure OpenTelemetry tracer with OTLP exporter for Arize/Phoenix.""" @@ -177,6 +269,246 @@ def _get_node_span_kind(node_type: str) -> OpenInferenceSpanKindValues: return _NODE_TYPE_TO_SPAN_KIND.get(node_type, OpenInferenceSpanKindValues.CHAIN) +def _resolve_workflow_session_id(trace_info: WorkflowTraceInfo) -> str: + """Resolve the workflow session ID for Phoenix workflow spans.""" + if trace_info.conversation_id: + return trace_info.conversation_id + + parent_workflow_run_id, _ = _resolve_workflow_parent_context(trace_info) + if parent_workflow_run_id: + return parent_workflow_run_id + + return trace_info.workflow_run_id + + +def _resolve_workflow_parent_context(trace_info: BaseTraceInfo) -> tuple[str | None, str | None]: + """Expose the typed parent context already resolved on the trace info.""" + return trace_info.resolved_parent_context + + +def _resolve_workflow_root_trace_id(trace_info: WorkflowTraceInfo) -> str: + """Resolve the canonical root trace ID for Phoenix workflow spans.""" + trace_correlation_override, _ = _resolve_workflow_parent_context(trace_info) + return trace_correlation_override or trace_info.resolved_trace_id or trace_info.workflow_run_id + + +class _NodeExecutionIdentityLike(Protocol): + @property + def node_execution_id(self) -> str | None: ... + + @property + def node_id(self) -> str: ... + + @property + def predecessor_node_id(self) -> str | None: ... + + +class _NodeExecutionLike(_NodeExecutionIdentityLike, Protocol): + @property + def id(self) -> str: ... + + @property + def node_type(self) -> str: ... + + @property + def title(self) -> str | None: ... + + @property + def inputs(self) -> Mapping[str, Any] | None: ... + + @property + def process_data(self) -> Mapping[str, Any] | None: ... + + @property + def outputs(self) -> Mapping[str, Any] | None: ... + + @property + def status(self) -> WorkflowNodeExecutionStatus: ... + + @property + def error(self) -> str | None: ... + + @property + def elapsed_time(self) -> float | None: ... + + @property + def metadata(self) -> Mapping[Any, Any] | None: ... + + @property + def created_at(self) -> datetime | None: ... + + +_PHOENIX_STRUCTURED_NODE_TYPES = frozenset({"start", "end", "loop", "iteration"}) + + +def _resolve_workflow_span_name(trace_info: WorkflowTraceInfo) -> str: + """Resolve the Phoenix workflow span display name.""" + workflow_run_id = trace_info.workflow_run_id.strip() if trace_info.workflow_run_id else "" + if workflow_run_id: + return f"{TraceTaskName.WORKFLOW_TRACE.value}_{workflow_run_id}" + return TraceTaskName.WORKFLOW_TRACE.value + + +def _build_node_title_by_id(trace_info: WorkflowTraceInfo) -> dict[str, str]: + """Build an authoritative node-title index from the persisted workflow graph.""" + workflow_data = trace_info.workflow_data + workflow_graph = getattr(workflow_data, "graph_dict", None) + if not isinstance(workflow_graph, Mapping): + workflow_graph = workflow_data.get("graph") if isinstance(workflow_data, Mapping) else None + if not isinstance(workflow_graph, Mapping): + return {} + + graph_nodes = workflow_graph.get("nodes") + if not isinstance(graph_nodes, Sequence): + return {} + + node_title_by_id: dict[str, str] = {} + for graph_node in graph_nodes: + if not isinstance(graph_node, Mapping): + continue + node_id = graph_node.get("id") + node_data = graph_node.get("data") + if not isinstance(node_id, str) or not isinstance(node_data, Mapping): + continue + node_title = node_data.get("title") + if isinstance(node_title, str) and node_title.strip(): + node_title_by_id[node_id] = node_title.strip() + + return node_title_by_id + + +def _resolve_workflow_node_span_name( + node_execution: _NodeExecutionLike, + node_title_by_id: Mapping[str, str] | None = None, +) -> str: + """Resolve the Phoenix workflow node span display name.""" + node_type = str(node_execution.node_type or "") + graph_node_title = None + if node_title_by_id is not None and isinstance(node_execution.node_id, str): + graph_node_title = node_title_by_id.get(node_execution.node_id) + + node_title = graph_node_title or (node_execution.title.strip() if isinstance(node_execution.title, str) else "") + if node_title: + return f"{node_type}_{node_title}" + return node_type + + +def _get_node_execution_id(node_execution: _NodeExecutionIdentityLike) -> str: + """Return the stable execution identifier for a workflow node execution.""" + return str(getattr(node_execution, "id", None) or node_execution.node_execution_id) + + +def _build_execution_id_by_node_id(node_executions: Sequence[_NodeExecutionIdentityLike]) -> dict[str, str]: + """Index unique workflow graph node ids by execution id. + + This Phoenix-local hierarchy reconstruction intentionally drops ambiguous + node ids instead of guessing based on repository order. That keeps parent + selection deterministic until upstream tracing exposes explicit parent span + data for repeated executions. + """ + execution_id_by_node_id: dict[str, str] = {} + ambiguous_node_ids: set[str] = set() + + for node_execution in node_executions: + node_id = node_execution.node_id + if not isinstance(node_id, str): + continue + execution_id = _get_node_execution_id(node_execution) + + if node_id in ambiguous_node_ids: + continue + + existing_execution_id = execution_id_by_node_id.get(node_id) + if existing_execution_id is None: + execution_id_by_node_id[node_id] = execution_id + continue + + if existing_execution_id != execution_id: + ambiguous_node_ids.add(node_id) + execution_id_by_node_id.pop(node_id, None) + + return execution_id_by_node_id + + +def _build_graph_parent_index(node_executions: Sequence[_NodeExecutionIdentityLike]) -> dict[str, str]: + """Build an execution-id parent index from predecessor node ids.""" + execution_id_by_node_id = _build_execution_id_by_node_id(node_executions) + graph_parent_index: dict[str, str] = {} + + for node_execution in node_executions: + predecessor_node_id = node_execution.predecessor_node_id + if not isinstance(predecessor_node_id, str): + continue + + predecessor_execution_id = execution_id_by_node_id.get(predecessor_node_id) + if predecessor_execution_id is not None: + execution_id = _get_node_execution_id(node_execution) + graph_parent_index[execution_id] = predecessor_execution_id + + return graph_parent_index + + +def _resolve_structured_parent_execution_id( + node_execution: object, execution_id_by_node_id: Mapping[str, str] +) -> str | None: + """Resolve Phoenix-local structured parents from loop/iteration node ids. + + Any execution carrying ``iteration_id`` or ``loop_id`` belongs to an + enclosing structured node. When predecessor node ids are ambiguous because + the graph node repeats inside that structure, Phoenix can still keep the + child span under the enclosing loop/iteration span without relying on + execution-order heuristics. + """ + execution_metadata = getattr(node_execution, "execution_metadata_dict", None) + if not isinstance(execution_metadata, Mapping): + execution_metadata = getattr(node_execution, "metadata", None) + if not isinstance(execution_metadata, Mapping): + execution_metadata = {} + + for enclosing_node_id in ( + getattr(node_execution, "iteration_id", None), + getattr(node_execution, "loop_id", None), + execution_metadata.get("iteration_id"), + execution_metadata.get("loop_id"), + ): + if not isinstance(enclosing_node_id, str): + continue + + enclosing_execution_id = execution_id_by_node_id.get(enclosing_node_id) + if enclosing_execution_id is not None: + return enclosing_execution_id + + return None + + +def _resolve_node_parent( + execution_id: str, + predecessor_execution_id: str | None, + structured_parent_execution_id: str | None, + span_by_execution_id: Mapping[str, Span], + graph_parent_index: Mapping[str, str], + workflow_span: Span, +) -> Span: + """Resolve the parent span for a workflow node execution.""" + if predecessor_execution_id is not None: + predecessor_span = span_by_execution_id.get(predecessor_execution_id) + if predecessor_span is not None: + return predecessor_span + + graph_parent_execution_id = graph_parent_index.get(execution_id) + if graph_parent_execution_id is not None: + graph_parent_span = span_by_execution_id.get(graph_parent_execution_id) + if graph_parent_span is not None: + return graph_parent_span + + if structured_parent_execution_id is not None: + structured_parent_span = span_by_execution_id.get(structured_parent_execution_id) + if structured_parent_span is not None: + return structured_parent_span + + return workflow_span + + class ArizePhoenixDataTrace(BaseTraceInstance): def __init__( self, @@ -189,6 +521,8 @@ class ArizePhoenixDataTrace(BaseTraceInstance): self.file_base_url = os.getenv("FILES_URL", "http://127.0.0.1:5001") self.propagator = TraceContextTextMapPropagator() self.dify_trace_ids: set[str] = set() + self.root_span_carriers: dict[str, dict[str, str]] = {} + self.carrier: dict[str, str] = {} def trace(self, trace_info: BaseTraceInfo): logger.info("[Arize/Phoenix] Trace Entity Info: %s", trace_info) @@ -235,13 +569,41 @@ class ArizePhoenixDataTrace(BaseTraceInstance): file_list=safe_json_dumps(file_list), query=trace_info.query or "", ) + workflow_session_id = _resolve_workflow_session_id(trace_info) + parent_workflow_run_id, parent_node_execution_id = _resolve_workflow_parent_context(trace_info) + logger.info( + "[Arize/Phoenix] Workflow session resolution: workflow_run_id=%s conversation_id=%s " + "parent_workflow_run_id=%s parent_node_execution_id=%s resolved_session_id=%s", + trace_info.workflow_run_id, + trace_info.conversation_id, + parent_workflow_run_id, + parent_node_execution_id, + workflow_session_id, + ) - dify_trace_id = trace_info.trace_id or trace_info.message_id or trace_info.workflow_run_id - self.ensure_root_span(dify_trace_id) - root_span_context = self.propagator.extract(carrier=self.carrier) + if parent_node_execution_id: + workflow_parent_carrier = _resolve_published_parent_span_context(parent_node_execution_id) + else: + root_trace_id = _resolve_workflow_root_trace_id(trace_info) + workflow_root_span_name: str | None = trace_info.workflow_run_id + if not isinstance(workflow_root_span_name, str) or not workflow_root_span_name.strip(): + workflow_root_span_name = None + + workflow_parent_carrier = self.ensure_root_span( + root_trace_id, + root_span_name=workflow_root_span_name, + root_span_attributes={ + SpanAttributes.INPUT_VALUE: safe_json_dumps(trace_info.workflow_run_inputs), + SpanAttributes.INPUT_MIME_TYPE: OpenInferenceMimeTypeValues.JSON.value, + SpanAttributes.OUTPUT_VALUE: safe_json_dumps(trace_info.workflow_run_outputs), + SpanAttributes.OUTPUT_MIME_TYPE: OpenInferenceMimeTypeValues.JSON.value, + }, + ) + + workflow_span_context = self.propagator.extract(carrier=workflow_parent_carrier) workflow_span = self.tracer.start_span( - name=TraceTaskName.WORKFLOW_TRACE.value, + name=_resolve_workflow_span_name(trace_info), attributes={ SpanAttributes.OPENINFERENCE_SPAN_KIND: OpenInferenceSpanKindValues.CHAIN.value, SpanAttributes.INPUT_VALUE: safe_json_dumps(trace_info.workflow_run_inputs), @@ -249,10 +611,10 @@ class ArizePhoenixDataTrace(BaseTraceInstance): SpanAttributes.OUTPUT_VALUE: safe_json_dumps(trace_info.workflow_run_outputs), SpanAttributes.OUTPUT_MIME_TYPE: OpenInferenceMimeTypeValues.JSON.value, SpanAttributes.METADATA: safe_json_dumps(metadata), - SpanAttributes.SESSION_ID: trace_info.conversation_id or "", + SpanAttributes.SESSION_ID: workflow_session_id or "", }, start_time=datetime_to_nanos(trace_info.start_time), - context=root_span_context, + context=workflow_span_context, ) # Through workflow_run_id, get all_nodes_execution using repository @@ -276,16 +638,50 @@ class ArizePhoenixDataTrace(BaseTraceInstance): workflow_node_executions = workflow_node_execution_repository.get_by_workflow_execution( workflow_execution_id=trace_info.workflow_run_id ) + node_title_by_id = _build_node_title_by_id(trace_info) + execution_id_by_node_id = _build_execution_id_by_node_id(workflow_node_executions) + graph_parent_index = _build_graph_parent_index(workflow_node_executions) + node_execution_by_execution_id = { + _get_node_execution_id(node_execution): node_execution for node_execution in workflow_node_executions + } + span_by_execution_id: dict[str, Span] = {} + emitting_execution_ids: set[str] = set() + workflow_span_error: Exception | str | None = trace_info.error try: - for node_execution in workflow_node_executions: + + def emit_node_span(node_execution: _NodeExecutionLike) -> Span: + execution_id = _get_node_execution_id(node_execution) + existing_span = span_by_execution_id.get(execution_id) + if existing_span is not None: + return existing_span + + graph_parent_execution_id = graph_parent_index.get(execution_id) + structured_parent_execution_id = _resolve_structured_parent_execution_id( + node_execution, execution_id_by_node_id + ) + + if execution_id not in emitting_execution_ids: + emitting_execution_ids.add(execution_id) + try: + for parent_execution_id in (graph_parent_execution_id, structured_parent_execution_id): + if parent_execution_id is None or parent_execution_id == execution_id: + continue + if parent_execution_id in span_by_execution_id: + continue + parent_node_execution = node_execution_by_execution_id.get(parent_execution_id) + if parent_node_execution is not None: + emit_node_span(parent_node_execution) + finally: + emitting_execution_ids.discard(execution_id) + tenant_id = trace_info.tenant_id # Use from trace_info instead app_id = trace_info.metadata.get("app_id") # Use from trace_info instead inputs_value = node_execution.inputs or {} outputs_value = node_execution.outputs or {} created_at = node_execution.created_at or datetime.now() - elapsed_time = node_execution.elapsed_time + elapsed_time = node_execution.elapsed_time or 0 finished_at = created_at + timedelta(seconds=elapsed_time) process_data = node_execution.process_data or {} @@ -324,9 +720,17 @@ class ArizePhoenixDataTrace(BaseTraceInstance): node_metadata["prompt_tokens"] = usage_data.get("prompt_tokens", 0) node_metadata["completion_tokens"] = usage_data.get("completion_tokens", 0) - workflow_span_context = set_span_in_context(workflow_span) + parent_span = _resolve_node_parent( + execution_id=execution_id, + predecessor_execution_id=None, + structured_parent_execution_id=structured_parent_execution_id, + span_by_execution_id=span_by_execution_id, + graph_parent_index=graph_parent_index, + workflow_span=workflow_span, + ) + workflow_span_context = set_span_in_context(parent_span) node_span = self.tracer.start_span( - name=node_execution.node_type, + name=_resolve_workflow_node_span_name(node_execution, node_title_by_id), attributes={ SpanAttributes.OPENINFERENCE_SPAN_KIND: span_kind.value, SpanAttributes.INPUT_VALUE: safe_json_dumps(inputs_value), @@ -334,13 +738,20 @@ class ArizePhoenixDataTrace(BaseTraceInstance): SpanAttributes.OUTPUT_VALUE: safe_json_dumps(outputs_value), SpanAttributes.OUTPUT_MIME_TYPE: OpenInferenceMimeTypeValues.JSON.value, SpanAttributes.METADATA: safe_json_dumps(node_metadata), - SpanAttributes.SESSION_ID: trace_info.conversation_id or "", + SpanAttributes.SESSION_ID: workflow_session_id or "", }, start_time=datetime_to_nanos(created_at), context=workflow_span_context, ) - + span_by_execution_id[execution_id] = node_span + node_span_error: Exception | str | None = None try: + if node_execution.node_type == "tool": + parent_span_carrier: dict[str, str] = {} + with use_span(node_span, end_on_exit=False): + self.propagator.inject(carrier=parent_span_carrier) + _publish_parent_span_context(execution_id, parent_span_carrier) + if node_execution.node_type == "llm": llm_attributes: dict[str, Any] = { SpanAttributes.INPUT_VALUE: json.dumps(process_data.get("prompts", []), ensure_ascii=False), @@ -362,17 +773,26 @@ class ArizePhoenixDataTrace(BaseTraceInstance): ) llm_attributes.update(self._construct_llm_attributes(process_data.get("prompts", []))) node_span.set_attributes(llm_attributes) + except Exception as e: + node_span_error = e + raise finally: - if node_execution.status == WorkflowNodeExecutionStatus.FAILED: + if node_span_error is not None: + set_span_status(node_span, node_span_error) + elif node_execution.status == WorkflowNodeExecutionStatus.FAILED: set_span_status(node_span, node_execution.error) else: set_span_status(node_span) node_span.end(end_time=datetime_to_nanos(finished_at)) + return node_span + + for node_execution in workflow_node_executions: + emit_node_span(node_execution) + except Exception as e: + workflow_span_error = e + raise finally: - if trace_info.error: - set_span_status(workflow_span, trace_info.error) - else: - set_span_status(workflow_span) + set_span_status(workflow_span, workflow_span_error) workflow_span.end(end_time=datetime_to_nanos(trace_info.end_time)) def message_trace(self, trace_info: MessageTraceInfo): @@ -735,22 +1155,39 @@ class ArizePhoenixDataTrace(BaseTraceInstance): finally: span.end(end_time=datetime_to_nanos(trace_info.end_time)) - def ensure_root_span(self, dify_trace_id: str | None): + def ensure_root_span( + self, + dify_trace_id: str | None, + *, + root_span_name: str | None = None, + root_span_attributes: Mapping[str, AttributeValue] | None = None, + ): """Ensure a unique root span exists for the given Dify trace ID.""" - if str(dify_trace_id) not in self.dify_trace_ids: - self.carrier: dict[str, str] = {} + trace_key = str(dify_trace_id) + if trace_key not in self.dify_trace_ids: + carrier: dict[str, str] = {} - root_span = self.tracer.start_span(name="Dify") - root_span.set_attribute(SpanAttributes.OPENINFERENCE_SPAN_KIND, OpenInferenceSpanKindValues.CHAIN.value) - root_span.set_attribute("dify_project_name", str(self.project)) - root_span.set_attribute("dify_trace_id", str(dify_trace_id)) + span_name = root_span_name.strip() if isinstance(root_span_name, str) and root_span_name.strip() else "Dify" + root_span_attributes_dict: dict[str, AttributeValue] = { + SpanAttributes.OPENINFERENCE_SPAN_KIND: OpenInferenceSpanKindValues.CHAIN.value, + "dify_project_name": str(self.project), + "dify_trace_id": trace_key, + } + if root_span_attributes: + root_span_attributes_dict.update(root_span_attributes) + + root_span = self.tracer.start_span(name=span_name, attributes=root_span_attributes_dict) with use_span(root_span, end_on_exit=False): - self.propagator.inject(carrier=self.carrier) + self.propagator.inject(carrier=carrier) set_span_status(root_span) root_span.end() - self.dify_trace_ids.add(str(dify_trace_id)) + self.dify_trace_ids.add(trace_key) + self.root_span_carriers[trace_key] = carrier + + self.carrier = self.root_span_carriers[trace_key] + return self.carrier def api_check(self): try: diff --git a/api/providers/trace/trace-arize-phoenix/tests/unit_tests/arize_phoenix_trace/test_arize_phoenix_trace.py b/api/providers/trace/trace-arize-phoenix/tests/unit_tests/arize_phoenix_trace/test_arize_phoenix_trace.py index e9ecc2e083..dd260aeee5 100644 --- a/api/providers/trace/trace-arize-phoenix/tests/unit_tests/arize_phoenix_trace/test_arize_phoenix_trace.py +++ b/api/providers/trace/trace-arize-phoenix/tests/unit_tests/arize_phoenix_trace/test_arize_phoenix_trace.py @@ -1,10 +1,21 @@ from datetime import UTC, datetime, timedelta -from typing import cast +from types import SimpleNamespace +from typing import Any, cast from unittest.mock import MagicMock, patch +import dify_trace_arize_phoenix.arize_phoenix_trace as arize_phoenix_trace_module import pytest from dify_trace_arize_phoenix.arize_phoenix_trace import ( + _NODE_TYPE_TO_SPAN_KIND, ArizePhoenixDataTrace, + _build_graph_parent_index, + _get_node_span_kind, + _phoenix_parent_span_redis_key, + _resolve_node_parent, + _resolve_published_parent_span_context, + _resolve_structured_parent_execution_id, + _resolve_workflow_parent_context, + _resolve_workflow_session_id, datetime_to_nanos, error_to_string, safe_json_dumps, @@ -13,6 +24,7 @@ from dify_trace_arize_phoenix.arize_phoenix_trace import ( wrap_span_metadata, ) from dify_trace_arize_phoenix.config import ArizeConfig, PhoenixConfig +from openinference.semconv.trace import OpenInferenceSpanKindValues, SpanAttributes from opentelemetry.sdk.trace import Tracer from opentelemetry.semconv.trace import SpanAttributes as OTELSpanAttributes from opentelemetry.trace import StatusCode @@ -24,8 +36,12 @@ from core.ops.entities.trace_entity import ( ModerationTraceInfo, SuggestedQuestionTraceInfo, ToolTraceInfo, + TraceTaskName, + WorkflowNodeTraceInfo, WorkflowTraceInfo, ) +from core.ops.exceptions import PendingTraceParentContextError +from graphon.enums import BUILT_IN_NODE_TYPES, BuiltinNodeTypes # --- Helpers --- @@ -73,6 +89,80 @@ def _make_message_info(**kwargs): return MessageTraceInfo(**defaults) +def _get_start_span_call(start_span_mock, *, span_name: str): + for call in start_span_mock.call_args_list: + if call.kwargs.get("name") == span_name: + return call + raise AssertionError(f"Could not find start_span call with name={span_name!r}") + + +def _make_node_execution(**kwargs): + defaults = { + "node_type": "tool", + "status": "succeeded", + "inputs": {}, + "outputs": {}, + "created_at": _dt(), + "elapsed_time": 1.0, + "process_data": {}, + "metadata": {}, + "title": "Node", + "id": "node-execution-1", + "node_execution_id": "node-execution-1", + "node_id": "node-1", + "predecessor_node_id": None, + "iteration_id": None, + "loop_id": None, + "error": None, + } + defaults.update(kwargs) + node_execution = MagicMock() + for key, value in defaults.items(): + setattr(node_execution, key, value) + return node_execution + + +def _make_workflow_trace_info(**kwargs) -> WorkflowTraceInfo: + defaults = { + "workflow_id": "workflow-1", + "tenant_id": "tenant-1", + "workflow_run_id": "workflow-run-1", + "workflow_run_elapsed_time": 1.0, + "workflow_run_status": "succeeded", + "workflow_run_inputs": {"input": "value"}, + "workflow_run_outputs": {"output": "value"}, + "workflow_run_version": "1.0", + "total_tokens": 10, + "file_list": ["file-1"], + "query": "hello", + "metadata": {"app_id": "app-1"}, + "start_time": _dt(), + "end_time": _dt() + timedelta(seconds=1), + } + defaults.update(kwargs) + return WorkflowTraceInfo(**defaults) + + +def _make_workflow_node_trace_info(**kwargs) -> WorkflowNodeTraceInfo: + defaults = { + "workflow_id": "workflow-1", + "workflow_run_id": "workflow-run-1", + "tenant_id": "tenant-1", + "node_execution_id": "node-execution-1", + "node_id": "node-1", + "node_type": "tool", + "title": "Node 1", + "status": "succeeded", + "elapsed_time": 1.0, + "index": 1, + "metadata": {"app_id": "app-1"}, + "start_time": _dt(), + "end_time": _dt() + timedelta(seconds=1), + } + defaults.update(kwargs) + return WorkflowNodeTraceInfo(**defaults) + + # --- Utility Function Tests --- @@ -143,6 +233,258 @@ def test_wrap_span_metadata(): assert res == {"a": 1, "b": 2, "created_from": "Dify"} +class TestGetNodeSpanKind: + def test_all_node_types_are_mapped_correctly(self): + special_mappings = { + BuiltinNodeTypes.LLM: OpenInferenceSpanKindValues.LLM, + BuiltinNodeTypes.KNOWLEDGE_RETRIEVAL: OpenInferenceSpanKindValues.RETRIEVER, + BuiltinNodeTypes.TOOL: OpenInferenceSpanKindValues.TOOL, + BuiltinNodeTypes.AGENT: OpenInferenceSpanKindValues.AGENT, + } + + for node_type in BUILT_IN_NODE_TYPES: + expected_span_kind = special_mappings.get(node_type, OpenInferenceSpanKindValues.CHAIN) + actual_span_kind = _get_node_span_kind(node_type) + assert actual_span_kind == expected_span_kind, ( + f"Node type {node_type!r} was mapped to {actual_span_kind}, but {expected_span_kind} was expected." + ) + + def test_unknown_string_defaults_to_chain(self): + assert _get_node_span_kind("some-future-node-type") == OpenInferenceSpanKindValues.CHAIN + + def test_stale_dataset_retrieval_not_in_mapping(self): + assert "dataset_retrieval" not in _NODE_TYPE_TO_SPAN_KIND + + +class TestWorkflowSessionResolution: + def test_prefers_conversation_id(self): + info = _make_workflow_trace_info(conversation_id="conversation-1") + + assert _resolve_workflow_session_id(info) == "conversation-1" + + def test_nested_workflow_keeps_own_conversation_id_when_parent_context_exists(self): + info = _make_workflow_trace_info( + conversation_id="conversation-1", + metadata={ + "app_id": "app-1", + "parent_trace_context": { + "parent_workflow_run_id": "outer-workflow-run-1", + "parent_node_execution_id": "outer-node-execution-1", + }, + }, + ) + + assert _resolve_workflow_session_id(info) == "conversation-1" + + def test_uses_parent_workflow_run_id_for_nested_parent_trace_context(self): + info = _make_workflow_trace_info( + conversation_id=None, + metadata={ + "app_id": "app-1", + "parent_trace_context": { + "parent_workflow_run_id": "outer-workflow-run-1", + "parent_node_execution_id": "outer-node-execution-1", + }, + }, + ) + + assert _resolve_workflow_session_id(info) == "outer-workflow-run-1" + + def test_falls_back_to_workflow_run_id(self): + info = _make_workflow_trace_info(conversation_id=None) + + assert _resolve_workflow_session_id(info) == "workflow-run-1" + + def test_parent_context_helper_delegates_to_resolved_parent_context(self): + info = MagicMock() + info.resolved_parent_context = ("outer-workflow-run-1", "outer-node-execution-1") + + assert _resolve_workflow_parent_context(info) == info.resolved_parent_context + + +class TestPhoenixParentSpanBridgeHelpers: + def test_parent_span_redis_key_is_stable(self): + assert _phoenix_parent_span_redis_key("outer-node-execution-1") == ( + "trace:phoenix:parent_span:outer-node-execution-1" + ) + + def test_pending_parent_exception_exposes_execution_id(self): + error = PendingTraceParentContextError("outer-node-execution-1") + + assert error.parent_node_execution_id == "outer-node-execution-1" + assert "outer-node-execution-1" in str(error) + + def test_resolve_parent_span_context_rejects_payload_without_traceparent(self, monkeypatch): + mock_redis = MagicMock() + mock_redis.get.return_value = '{"tracestate": "vendor=value"}' + monkeypatch.setattr(arize_phoenix_trace_module, "redis_client", mock_redis) + + with pytest.raises(ValueError, match="traceparent"): + _resolve_published_parent_span_context("outer-node-execution-1") + + @pytest.mark.parametrize( + "stored_payload", + [ + '{"traceparent": ""}', + '{"traceparent": "not-a-traceparent"}', + '{"traceparent": "00-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-bbbbbbbbbbbbbbbb"}', + ], + ) + def test_resolve_parent_span_context_rejects_malformed_traceparent(self, monkeypatch, stored_payload): + mock_redis = MagicMock() + mock_redis.get.return_value = stored_payload + monkeypatch.setattr(arize_phoenix_trace_module, "redis_client", mock_redis) + + with pytest.raises(ValueError, match="traceparent"): + _resolve_published_parent_span_context("outer-node-execution-1") + + +class TestWorkflowHierarchyHelpers: + def test_build_graph_parent_index_uses_predecessor_nodes_without_order_heuristics(self): + later_node = _make_workflow_node_trace_info( + node_execution_id="node-execution-3", + node_id="node-3", + predecessor_node_id="node-2", + index=3, + ) + root_node = _make_workflow_node_trace_info( + node_execution_id="node-execution-1", + node_id="node-1", + predecessor_node_id=None, + index=1, + ) + middle_node = _make_workflow_node_trace_info( + node_execution_id="node-execution-2", + node_id="node-2", + predecessor_node_id="node-1", + index=2, + ) + + graph_parent_index = _build_graph_parent_index([later_node, root_node, middle_node]) + + assert graph_parent_index == { + "node-execution-2": "node-execution-1", + "node-execution-3": "node-execution-2", + } + + def test_build_graph_parent_index_drops_ambiguous_parallel_like_predecessors(self): + first_parallel_node = _make_workflow_node_trace_info( + node_execution_id="parallel-node-execution-1", + node_id="parallel-node-1", + predecessor_node_id=None, + index=1, + parallel_id="parallel-1", + ) + second_parallel_node = _make_workflow_node_trace_info( + node_execution_id="parallel-node-execution-2", + node_id="parallel-node-1", + predecessor_node_id=None, + index=2, + parallel_id="parallel-2", + ) + child_node = _make_workflow_node_trace_info( + node_execution_id="child-node-execution-1", + node_id="child-node-1", + predecessor_node_id="parallel-node-1", + index=3, + ) + + graph_parent_index = _build_graph_parent_index([child_node, first_parallel_node, second_parallel_node]) + + assert graph_parent_index == {} + + def test_resolve_node_parent_prefers_predecessor_span(self): + workflow_span = MagicMock(name="workflow-span") + predecessor_span = MagicMock(name="predecessor-span") + graph_parent_span = MagicMock(name="graph-parent-span") + + parent = _resolve_node_parent( + execution_id="node-execution-2", + predecessor_execution_id="node-execution-1", + structured_parent_execution_id=None, + span_by_execution_id={ + "node-execution-1": predecessor_span, + "node-execution-0": graph_parent_span, + }, + graph_parent_index={ + "node-execution-2": "node-execution-0", + }, + workflow_span=workflow_span, + ) + + assert parent is predecessor_span + + def test_resolve_node_parent_falls_back_to_graph_parent_span(self): + workflow_span = MagicMock(name="workflow-span") + graph_parent_span = MagicMock(name="graph-parent-span") + + parent = _resolve_node_parent( + execution_id="node-execution-2", + predecessor_execution_id="missing-predecessor", + structured_parent_execution_id=None, + span_by_execution_id={ + "node-execution-0": graph_parent_span, + }, + graph_parent_index={ + "node-execution-2": "node-execution-0", + }, + workflow_span=workflow_span, + ) + + assert parent is graph_parent_span + + def test_resolve_node_parent_falls_back_to_workflow_span(self): + workflow_span = MagicMock(name="workflow-span") + + parent = _resolve_node_parent( + execution_id="node-execution-2", + predecessor_execution_id=None, + structured_parent_execution_id=None, + span_by_execution_id={}, + graph_parent_index={}, + workflow_span=workflow_span, + ) + + assert parent is workflow_span + + def test_resolve_structured_parent_execution_id_allows_body_nodes_to_use_enclosing_structure(self): + body_node = _make_workflow_node_trace_info( + node_execution_id="body-execution-1", + node_id="body-node-1", + node_type="tool", + loop_id="loop-node-1", + ) + + structured_parent_execution_id = _resolve_structured_parent_execution_id( + body_node, + execution_id_by_node_id={ + "loop-node-1": "loop-execution-1", + }, + ) + + assert structured_parent_execution_id == "loop-execution-1" + + def test_resolve_structured_parent_execution_id_reads_execution_metadata_dict_for_models(self): + body_node = SimpleNamespace( + node_execution_id="body-execution-1", + node_id="body-node-1", + execution_metadata_dict={ + "iteration_id": "iteration-node-1", + "loop_id": "loop-node-1", + }, + ) + + structured_parent_execution_id = _resolve_structured_parent_execution_id( + body_node, + execution_id_by_node_id={ + "iteration-node-1": "iteration-execution-1", + "loop-node-1": "loop-execution-1", + }, + ) + + assert structured_parent_execution_id == "iteration-execution-1" + + @patch("dify_trace_arize_phoenix.arize_phoenix_trace.GrpcOTLPSpanExporter") @patch("dify_trace_arize_phoenix.arize_phoenix_trace.trace_sdk.TracerProvider") def test_setup_tracer_arize(mock_provider, mock_exporter): @@ -173,12 +515,17 @@ def test_setup_tracer_exception(): @pytest.fixture def trace_instance(): - with patch("dify_trace_arize_phoenix.arize_phoenix_trace.setup_tracer") as mock_setup: + with ( + patch("dify_trace_arize_phoenix.arize_phoenix_trace.setup_tracer") as mock_setup, + patch("dify_trace_arize_phoenix.arize_phoenix_trace.redis_client", new=MagicMock()) as mock_redis, + ): mock_tracer = MagicMock(spec=Tracer) mock_processor = MagicMock() mock_setup.return_value = (mock_tracer, mock_processor) config = ArizeConfig(endpoint="http://a.com", api_key="k", space_id="s", project="p") - return ArizePhoenixDataTrace(config) + instance = ArizePhoenixDataTrace(config) + cast(Any, instance)._mock_redis_client = mock_redis + yield instance def test_trace_dispatch(trace_instance): @@ -273,23 +620,821 @@ def test_workflow_trace_no_app_id(mock_db, trace_instance): @patch("dify_trace_arize_phoenix.arize_phoenix_trace.db") -def test_message_trace_success(mock_db, trace_instance): +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.DifyCoreRepositoryFactory") +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.sessionmaker") +def test_workflow_trace_uses_canonical_root_context_for_top_level_workflow( + mock_sessionmaker, mock_repo_factory, mock_db, trace_instance +): + mock_db.engine = MagicMock() + info = _make_workflow_info(message_id="message-1", workflow_run_id="workflow-run-1") + repo = MagicMock() + repo.get_by_workflow_execution.return_value = [] + mock_repo_factory.create_workflow_node_execution_repository.return_value = repo + + root_carrier = {} + root_context = object() + + with ( + patch.object(trace_instance, "get_service_account_with_tenant", return_value=MagicMock()), + patch.object(trace_instance, "ensure_root_span", return_value=root_carrier) as mock_ensure_root_span, + patch.object(trace_instance.propagator, "extract", return_value=root_context) as mock_extract, + ): + trace_instance.workflow_trace(info) + + mock_ensure_root_span.assert_called_once_with( + info.resolved_trace_id, + root_span_name="workflow-run-1", + root_span_attributes={ + SpanAttributes.INPUT_VALUE: safe_json_dumps(info.workflow_run_inputs), + SpanAttributes.INPUT_MIME_TYPE: "application/json", + SpanAttributes.OUTPUT_VALUE: safe_json_dumps(info.workflow_run_outputs), + SpanAttributes.OUTPUT_MIME_TYPE: "application/json", + }, + ) + mock_extract.assert_called_once_with(carrier=root_carrier) + workflow_span_call = _get_start_span_call(trace_instance.tracer.start_span, span_name="workflow_workflow-run-1") + assert workflow_span_call.kwargs["context"] is root_context + + +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.db") +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.DifyCoreRepositoryFactory") +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.sessionmaker") +def test_workflow_trace_uses_workflow_run_id_for_root_span_and_populates_root_inputs_outputs( + mock_sessionmaker, + mock_repo_factory, + mock_db, + trace_instance, +): + mock_db.engine = MagicMock() + info = _make_workflow_info( + workflow_run_inputs={"prompt": "hello"}, + workflow_run_outputs={"result": "world"}, + metadata={ + "app_id": "app1", + "app_name": "Workflow Name", + }, + workflow_run_id="workflow-run-xyz", + ) + repo = MagicMock() + repo.get_by_workflow_execution.return_value = [] + mock_repo_factory.create_workflow_node_execution_repository.return_value = repo + + with patch.object(trace_instance, "get_service_account_with_tenant", return_value=MagicMock()): + trace_instance.workflow_trace(info) + + root_span_call = _get_start_span_call(trace_instance.tracer.start_span, span_name="workflow-run-xyz") + assert root_span_call.kwargs["attributes"][SpanAttributes.INPUT_VALUE] == safe_json_dumps(info.workflow_run_inputs) + assert root_span_call.kwargs["attributes"][SpanAttributes.OUTPUT_VALUE] == safe_json_dumps( + info.workflow_run_outputs + ) + assert root_span_call.kwargs["attributes"][SpanAttributes.INPUT_MIME_TYPE] == "application/json" + assert root_span_call.kwargs["attributes"][SpanAttributes.OUTPUT_MIME_TYPE] == "application/json" + + +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.db") +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.DifyCoreRepositoryFactory") +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.sessionmaker") +def test_workflow_trace_falls_back_to_dify_name_when_workflow_run_id_is_blank( + mock_sessionmaker, + mock_repo_factory, + mock_db, + trace_instance, +): + mock_db.engine = MagicMock() + info = _make_workflow_info( + metadata={ + "app_id": "app1", + "app_name": "", + }, + workflow_run_id="", + ) + repo = MagicMock() + repo.get_by_workflow_execution.return_value = [] + mock_repo_factory.create_workflow_node_execution_repository.return_value = repo + + with patch.object(trace_instance, "get_service_account_with_tenant", return_value=MagicMock()): + trace_instance.workflow_trace(info) + + root_span_call = _get_start_span_call(trace_instance.tracer.start_span, span_name="Dify") + assert root_span_call.kwargs["attributes"]["dify_trace_id"] == "" + + +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.db") +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.DifyCoreRepositoryFactory") +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.sessionmaker") +def test_workflow_trace_reuses_upstream_parent_workflow_context_when_no_parent_node_execution_id_is_available( + mock_sessionmaker, mock_repo_factory, mock_db, trace_instance +): + mock_db.engine = MagicMock() + info = _make_workflow_info( + message_id="message-1", + workflow_run_id="workflow-run-1", + metadata={ + "app_id": "app1", + "parent_trace_context": { + "parent_workflow_run_id": "outer-workflow-run-1", + }, + }, + ) + repo = MagicMock() + repo.get_by_workflow_execution.return_value = [] + mock_repo_factory.create_workflow_node_execution_repository.return_value = repo + + parent_carrier = {} + parent_context = object() + + with ( + patch.object(trace_instance, "get_service_account_with_tenant", return_value=MagicMock()), + patch.object(trace_instance, "ensure_root_span", return_value=parent_carrier) as mock_ensure_root_span, + patch.object(trace_instance.propagator, "extract", return_value=parent_context) as mock_extract, + ): + trace_instance.workflow_trace(info) + + mock_ensure_root_span.assert_called_once_with( + "outer-workflow-run-1", + root_span_name="workflow-run-1", + root_span_attributes={ + SpanAttributes.INPUT_VALUE: safe_json_dumps(info.workflow_run_inputs), + SpanAttributes.INPUT_MIME_TYPE: "application/json", + SpanAttributes.OUTPUT_VALUE: safe_json_dumps(info.workflow_run_outputs), + SpanAttributes.OUTPUT_MIME_TYPE: "application/json", + }, + ) + mock_extract.assert_called_once_with(carrier=parent_carrier) + workflow_span_call = _get_start_span_call(trace_instance.tracer.start_span, span_name="workflow_workflow-run-1") + assert workflow_span_call.kwargs["context"] is parent_context + + +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.db") +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.DifyCoreRepositoryFactory") +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.sessionmaker") +def test_workflow_trace_uses_published_parent_node_context_for_nested_workflow( + mock_sessionmaker, + mock_repo_factory, + mock_db, + trace_instance, +): + mock_db.engine = MagicMock() + info = _make_workflow_info( + message_id="message-1", + workflow_run_id="workflow-run-1", + metadata={ + "app_id": "app1", + "parent_trace_context": { + "parent_workflow_run_id": "outer-workflow-run-1", + "parent_node_execution_id": "outer-node-execution-1", + }, + }, + ) + repo = MagicMock() + repo.get_by_workflow_execution.return_value = [] + mock_repo_factory.create_workflow_node_execution_repository.return_value = repo + stored_carrier = '{"traceparent":"00-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-bbbbbbbbbbbbbbbb-01"}' + trace_instance._mock_redis_client.get.return_value = stored_carrier + parent_context = object() + + with ( + patch.object(trace_instance, "get_service_account_with_tenant", return_value=MagicMock()), + patch.object(trace_instance, "ensure_root_span") as mock_ensure_root_span, + patch.object(trace_instance.propagator, "extract", return_value=parent_context) as mock_extract, + ): + trace_instance.workflow_trace(info) + + trace_instance._mock_redis_client.get.assert_called_once_with( + _phoenix_parent_span_redis_key("outer-node-execution-1") + ) + mock_ensure_root_span.assert_not_called() + mock_extract.assert_called_once_with( + carrier={"traceparent": "00-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-bbbbbbbbbbbbbbbb-01"} + ) + workflow_span_call = _get_start_span_call(trace_instance.tracer.start_span, span_name="workflow_workflow-run-1") + assert workflow_span_call.kwargs["context"] is parent_context + + +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.db") +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.DifyCoreRepositoryFactory") +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.sessionmaker") +def test_workflow_trace_raises_pending_parent_error_when_parent_node_context_is_missing( + mock_sessionmaker, + mock_repo_factory, + mock_db, + trace_instance, +): + mock_db.engine = MagicMock() + info = _make_workflow_info( + message_id="message-1", + workflow_run_id="workflow-run-1", + metadata={ + "app_id": "app1", + "parent_trace_context": { + "parent_workflow_run_id": "outer-workflow-run-1", + "parent_node_execution_id": "outer-node-execution-1", + }, + }, + ) + repo = MagicMock() + repo.get_by_workflow_execution.return_value = [] + mock_repo_factory.create_workflow_node_execution_repository.return_value = repo + trace_instance._mock_redis_client.get.return_value = None + + with ( + patch.object(trace_instance, "get_service_account_with_tenant", return_value=MagicMock()), + patch.object(trace_instance, "ensure_root_span") as mock_ensure_root_span, + pytest.raises(PendingTraceParentContextError) as exc_info, + ): + trace_instance.workflow_trace(info) + + assert exc_info.value.parent_node_execution_id == "outer-node-execution-1" + trace_instance._mock_redis_client.get.assert_called_once_with( + _phoenix_parent_span_redis_key("outer-node-execution-1") + ) + mock_ensure_root_span.assert_not_called() + + +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.db") +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.DifyCoreRepositoryFactory") +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.sessionmaker") +def test_workflow_trace_uses_parent_workflow_run_id_for_workflow_and_nodes_when_nested_context_is_present( + mock_sessionmaker, mock_repo_factory, mock_db, trace_instance +): + mock_db.engine = MagicMock() + info = _make_workflow_info( + conversation_id=None, + metadata={ + "app_id": "app1", + "parent_trace_context": { + "parent_workflow_run_id": "outer-workflow-run-1", + }, + }, + ) + repo = MagicMock() + node_execution = MagicMock() + node_execution.node_type = "tool" + node_execution.status = "succeeded" + node_execution.inputs = {"tool_input": "value"} + node_execution.outputs = {"tool_output": "value"} + node_execution.created_at = _dt() + node_execution.elapsed_time = 1.0 + node_execution.process_data = {} + node_execution.metadata = {} + node_execution.title = "Tool node" + node_execution.id = "node-1" + node_execution.error = None + repo.get_by_workflow_execution.return_value = [node_execution] + mock_repo_factory.create_workflow_node_execution_repository.return_value = repo + + with patch.object(trace_instance, "get_service_account_with_tenant", return_value=MagicMock()): + trace_instance.workflow_trace(info) + + workflow_span_call = _get_start_span_call(trace_instance.tracer.start_span, span_name="workflow_r1") + node_span_call = _get_start_span_call(trace_instance.tracer.start_span, span_name="tool_Tool node") + + assert workflow_span_call.kwargs["attributes"][SpanAttributes.SESSION_ID] == "outer-workflow-run-1" + assert node_span_call.kwargs["attributes"][SpanAttributes.SESSION_ID] == "outer-workflow-run-1" + + +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.db") +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.DifyCoreRepositoryFactory") +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.sessionmaker") +def test_workflow_trace_falls_back_to_node_type_when_node_title_is_blank( + mock_sessionmaker, mock_repo_factory, mock_db, trace_instance +): + mock_db.engine = MagicMock() + info = _make_workflow_info() + repo = MagicMock() + node_execution = _make_node_execution( + id="node-execution-1", + node_execution_id="node-execution-1", + node_id="node-1", + node_type="tool", + title=" ", + ) + repo.get_by_workflow_execution.return_value = [node_execution] + mock_repo_factory.create_workflow_node_execution_repository.return_value = repo + + with patch.object(trace_instance, "get_service_account_with_tenant", return_value=MagicMock()): + trace_instance.workflow_trace(info) + + node_span_call = _get_start_span_call(trace_instance.tracer.start_span, span_name="tool") + assert node_span_call.kwargs["attributes"][SpanAttributes.SESSION_ID] == "r1" + + +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.db") +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.DifyCoreRepositoryFactory") +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.sessionmaker") +def test_workflow_trace_prefers_workflow_graph_node_title_over_execution_title( + mock_sessionmaker, mock_repo_factory, mock_db, trace_instance +): + mock_db.engine = MagicMock() + info = _make_workflow_info( + workflow_data={ + "graph": { + "nodes": [ + { + "id": "nested-tool-node", + "data": { + "type": "tool", + "title": "nested workflow tool", + }, + } + ] + } + } + ) + repo = MagicMock() + node_execution = _make_node_execution( + id="node-execution-1", + node_execution_id="node-execution-1", + node_id="nested-tool-node", + node_type="tool", + title="2", + ) + repo.get_by_workflow_execution.return_value = [node_execution] + mock_repo_factory.create_workflow_node_execution_repository.return_value = repo + + with patch.object(trace_instance, "get_service_account_with_tenant", return_value=MagicMock()): + trace_instance.workflow_trace(info) + + node_span_call = _get_start_span_call(trace_instance.tracer.start_span, span_name="tool_nested workflow tool") + assert node_span_call.kwargs["attributes"][SpanAttributes.SESSION_ID] == "r1" + + +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.db") +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.DifyCoreRepositoryFactory") +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.sessionmaker") +def test_workflow_trace_keeps_nested_conversation_session_while_reusing_parent_root_context( + mock_sessionmaker, mock_repo_factory, mock_db, trace_instance +): + mock_db.engine = MagicMock() + info = _make_workflow_info( + conversation_id="conversation-1", + message_id="message-1", + workflow_run_id="workflow-run-1", + metadata={ + "app_id": "app1", + "parent_trace_context": { + "parent_workflow_run_id": "outer-workflow-run-1", + }, + }, + ) + repo = MagicMock() + node_execution = _make_node_execution( + id="node-execution-1", + node_execution_id="node-execution-1", + node_id="node-1", + node_type="tool", + ) + repo.get_by_workflow_execution.return_value = [node_execution] + mock_repo_factory.create_workflow_node_execution_repository.return_value = repo + + parent_carrier = {} + parent_context = object() + + with ( + patch.object(trace_instance, "get_service_account_with_tenant", return_value=MagicMock()), + patch.object(trace_instance, "ensure_root_span", return_value=parent_carrier) as mock_ensure_root_span, + patch.object(trace_instance.propagator, "extract", return_value=parent_context) as mock_extract, + ): + trace_instance.workflow_trace(info) + + mock_ensure_root_span.assert_called_once_with( + "outer-workflow-run-1", + root_span_name="workflow-run-1", + root_span_attributes={ + SpanAttributes.INPUT_VALUE: safe_json_dumps(info.workflow_run_inputs), + SpanAttributes.INPUT_MIME_TYPE: "application/json", + SpanAttributes.OUTPUT_VALUE: safe_json_dumps(info.workflow_run_outputs), + SpanAttributes.OUTPUT_MIME_TYPE: "application/json", + }, + ) + mock_extract.assert_called_once_with(carrier=parent_carrier) + workflow_span_call = _get_start_span_call(trace_instance.tracer.start_span, span_name="workflow_workflow-run-1") + node_span_call = _get_start_span_call(trace_instance.tracer.start_span, span_name="tool_Node") + assert workflow_span_call.kwargs["context"] is parent_context + assert workflow_span_call.kwargs["attributes"][SpanAttributes.SESSION_ID] == "conversation-1" + assert node_span_call.kwargs["attributes"][SpanAttributes.SESSION_ID] == "conversation-1" + + +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.db") +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.DifyCoreRepositoryFactory") +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.sessionmaker") +def test_workflow_trace_publishes_tool_node_parent_span_context_to_redis( + mock_sessionmaker, + mock_repo_factory, + mock_db, + trace_instance, +): + mock_db.engine = MagicMock() + info = _make_workflow_info() + repo = MagicMock() + node_execution = _make_node_execution( + id="tool-execution-1", + node_execution_id="tool-execution-1", + node_id="tool-node-1", + node_type="tool", + ) + repo.get_by_workflow_execution.return_value = [node_execution] + mock_repo_factory.create_workflow_node_execution_repository.return_value = repo + + workflow_span = MagicMock(name="workflow-span") + workflow_span._context_label = "workflow" + tool_span = MagicMock(name="tool-span") + tool_span._context_label = "tool" + trace_instance.tracer.start_span.side_effect = [workflow_span, tool_span] + + def inject_side_effect(carrier): + carrier["traceparent"] = "00-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-bbbbbbbbbbbbbbbb-01" + + with ( + patch.object(trace_instance, "get_service_account_with_tenant", return_value=MagicMock()), + patch.object(trace_instance, "ensure_root_span", return_value={}), + patch.object(trace_instance.propagator, "extract", return_value="root-context"), + patch.object(trace_instance.propagator, "inject", side_effect=inject_side_effect) as mock_inject, + patch( + "dify_trace_arize_phoenix.arize_phoenix_trace.set_span_in_context", + side_effect=lambda span: f"context:{span._context_label}", + ), + ): + trace_instance.workflow_trace(info) + + mock_inject.assert_called_once() + trace_instance._mock_redis_client.setex.assert_called_once_with( + _phoenix_parent_span_redis_key("tool-execution-1"), + 300, + '{"traceparent": "00-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-bbbbbbbbbbbbbbbb-01"}', + ) + + +@pytest.mark.parametrize( + ("failing_step", "expected_message"), + [ + ("inject", "inject failed"), + ("publish", "publish failed"), + ], +) +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.db") +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.DifyCoreRepositoryFactory") +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.sessionmaker") +def test_workflow_trace_cleans_up_tool_span_when_parent_context_publish_fails( + mock_sessionmaker, + mock_repo_factory, + mock_db, + trace_instance, + failing_step, + expected_message, +): + mock_db.engine = MagicMock() + info = _make_workflow_info() + repo = MagicMock() + node_execution = _make_node_execution( + id="tool-execution-1", + node_execution_id="tool-execution-1", + node_id="tool-node-1", + node_type="tool", + ) + repo.get_by_workflow_execution.return_value = [node_execution] + mock_repo_factory.create_workflow_node_execution_repository.return_value = repo + + workflow_span = MagicMock(name="workflow-span") + workflow_span._context_label = "workflow" + tool_span = MagicMock(name="tool-span") + tool_span._context_label = "tool" + trace_instance.tracer.start_span.side_effect = [workflow_span, tool_span] + + inject_side_effect = None + if failing_step == "inject": + inject_side_effect = RuntimeError(expected_message) + else: + trace_instance._mock_redis_client.setex.side_effect = RuntimeError(expected_message) + + def inject_side_effect(carrier): + carrier["traceparent"] = "00-aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa-bbbbbbbbbbbbbbbb-01" + + with ( + patch.object(trace_instance, "get_service_account_with_tenant", return_value=MagicMock()), + patch.object(trace_instance, "ensure_root_span", return_value={}), + patch.object(trace_instance.propagator, "extract", return_value="root-context"), + patch.object(trace_instance.propagator, "inject", side_effect=inject_side_effect), + patch( + "dify_trace_arize_phoenix.arize_phoenix_trace.set_span_in_context", + side_effect=lambda span: f"context:{span._context_label}", + ), + pytest.raises(RuntimeError, match=expected_message), + ): + trace_instance.workflow_trace(info) + + tool_span.end.assert_called_once() + workflow_span.end.assert_called_once() + + +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.db") +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.DifyCoreRepositoryFactory") +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.sessionmaker") +def test_workflow_trace_parents_serial_nodes_to_resolved_predecessor_span( + mock_sessionmaker, mock_repo_factory, mock_db, trace_instance +): + mock_db.engine = MagicMock() + info = _make_workflow_info() + repo = MagicMock() + second_node = _make_node_execution( + id="node-execution-2", + node_execution_id="node-execution-2", + node_id="node-2", + node_type="llm", + predecessor_node_id="node-1", + process_data={ + "prompts": [{"role": "user", "content": "hi"}], + "model_provider": "openai", + "model_name": "gpt-4", + }, + ) + first_node = _make_node_execution( + id="node-execution-1", + node_execution_id="node-execution-1", + node_id="node-1", + node_type="tool", + ) + repo.get_by_workflow_execution.return_value = [second_node, first_node] + mock_repo_factory.create_workflow_node_execution_repository.return_value = repo + + workflow_span = MagicMock(name="workflow-span") + workflow_span._context_label = "workflow" + first_node_span = MagicMock(name="first-node-span") + first_node_span._context_label = "node-1" + second_node_span = MagicMock(name="second-node-span") + second_node_span._context_label = "node-2" + trace_instance.tracer.start_span.side_effect = [workflow_span, first_node_span, second_node_span] + + with ( + patch.object(trace_instance, "get_service_account_with_tenant", return_value=MagicMock()), + patch.object(trace_instance, "ensure_root_span", return_value={}), + patch.object(trace_instance.propagator, "extract", return_value="root-context"), + patch( + "dify_trace_arize_phoenix.arize_phoenix_trace.set_span_in_context", + side_effect=lambda span: f"context:{span._context_label}", + ), + ): + trace_instance.workflow_trace(info) + + first_node_call = _get_start_span_call(trace_instance.tracer.start_span, span_name="tool_Node") + second_node_call = _get_start_span_call(trace_instance.tracer.start_span, span_name="llm_Node") + assert first_node_call.kwargs["context"] == "context:workflow" + assert second_node_call.kwargs["context"] == "context:node-1" + + +@pytest.mark.parametrize( + ("enclosing_node_type", "structured_field"), + [ + ("loop", "loop_id"), + ("iteration", "iteration_id"), + ], +) +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.db") +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.DifyCoreRepositoryFactory") +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.sessionmaker") +def test_workflow_trace_parents_structured_start_nodes_to_enclosing_structure_span( + mock_sessionmaker, + mock_repo_factory, + mock_db, + trace_instance, + enclosing_node_type, + structured_field, +): + mock_db.engine = MagicMock() + info = _make_workflow_info() + repo = MagicMock() + enclosing_node = _make_node_execution( + id=f"{enclosing_node_type}-execution-1", + node_execution_id=f"{enclosing_node_type}-execution-1", + node_id=f"{enclosing_node_type}-node-1", + node_type=enclosing_node_type, + ) + structured_kwargs = {structured_field: f"{enclosing_node_type}-node-1"} + start_node = _make_node_execution( + id="start-execution-1", + node_execution_id="start-execution-1", + node_id="start-node-1", + node_type="start", + **structured_kwargs, + ) + repo.get_by_workflow_execution.return_value = [start_node, enclosing_node] + mock_repo_factory.create_workflow_node_execution_repository.return_value = repo + + workflow_span = MagicMock(name="workflow-span") + workflow_span._context_label = "workflow" + enclosing_node_span = MagicMock(name="enclosing-node-span") + enclosing_node_span._context_label = enclosing_node_type + start_node_span = MagicMock(name="start-node-span") + start_node_span._context_label = "start" + trace_instance.tracer.start_span.side_effect = [workflow_span, enclosing_node_span, start_node_span] + + with ( + patch.object(trace_instance, "get_service_account_with_tenant", return_value=MagicMock()), + patch.object(trace_instance, "ensure_root_span", return_value={}), + patch.object(trace_instance.propagator, "extract", return_value="root-context"), + patch( + "dify_trace_arize_phoenix.arize_phoenix_trace.set_span_in_context", + side_effect=lambda span: f"context:{span._context_label}", + ), + ): + trace_instance.workflow_trace(info) + + start_node_call = _get_start_span_call(trace_instance.tracer.start_span, span_name="start_Node") + assert start_node_call.kwargs["context"] == f"context:{enclosing_node_type}" + + +@pytest.mark.parametrize( + ("enclosing_node_type", "structured_field"), + [ + ("loop", "loop_id"), + ("iteration", "iteration_id"), + ], +) +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.db") +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.DifyCoreRepositoryFactory") +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.sessionmaker") +def test_workflow_trace_keeps_duplicate_body_node_children_under_enclosing_structure( + mock_sessionmaker, + mock_repo_factory, + mock_db, + trace_instance, + enclosing_node_type, + structured_field, +): + mock_db.engine = MagicMock() + info = _make_workflow_info() + repo = MagicMock() + enclosing_node = _make_node_execution( + id=f"{enclosing_node_type}-execution-1", + node_execution_id=f"{enclosing_node_type}-execution-1", + node_id=f"{enclosing_node_type}-node-1", + node_type=enclosing_node_type, + ) + structured_kwargs = {structured_field: f"{enclosing_node_type}-node-1"} + repeated_body_node_1 = _make_node_execution( + id="body-execution-1", + node_execution_id="body-execution-1", + node_id="body-node-1", + node_type="tool", + **structured_kwargs, + ) + repeated_body_node_2 = _make_node_execution( + id="body-execution-2", + node_execution_id="body-execution-2", + node_id="body-node-1", + node_type="tool", + **structured_kwargs, + ) + child_node = _make_node_execution( + id="child-execution-1", + node_execution_id="child-execution-1", + node_id="child-node-1", + node_type="llm", + predecessor_node_id="body-node-1", + process_data={ + "prompts": [{"role": "user", "content": "hi"}], + "model_provider": "openai", + "model_name": "gpt-4", + }, + **structured_kwargs, + ) + repo.get_by_workflow_execution.return_value = [ + child_node, + repeated_body_node_1, + repeated_body_node_2, + enclosing_node, + ] + mock_repo_factory.create_workflow_node_execution_repository.return_value = repo + + workflow_span = MagicMock(name="workflow-span") + workflow_span._context_label = "workflow" + enclosing_node_span = MagicMock(name="enclosing-node-span") + enclosing_node_span._context_label = enclosing_node_type + child_node_span = MagicMock(name="child-node-span") + child_node_span._context_label = "child" + repeated_body_node_1_span = MagicMock(name="repeated-body-node-1-span") + repeated_body_node_1_span._context_label = "body-1" + repeated_body_node_2_span = MagicMock(name="repeated-body-node-2-span") + repeated_body_node_2_span._context_label = "body-2" + trace_instance.tracer.start_span.side_effect = [ + workflow_span, + enclosing_node_span, + child_node_span, + repeated_body_node_1_span, + repeated_body_node_2_span, + ] + + with ( + patch.object(trace_instance, "get_service_account_with_tenant", return_value=MagicMock()), + patch.object(trace_instance, "ensure_root_span", return_value={}), + patch.object(trace_instance.propagator, "extract", return_value="root-context"), + patch( + "dify_trace_arize_phoenix.arize_phoenix_trace.set_span_in_context", + side_effect=lambda span: f"context:{span._context_label}", + ), + ): + trace_instance.workflow_trace(info) + + child_node_call = _get_start_span_call(trace_instance.tracer.start_span, span_name="llm_Node") + assert child_node_call.kwargs["context"] == f"context:{enclosing_node_type}" + + +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.db") +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.DifyCoreRepositoryFactory") +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.sessionmaker") +def test_workflow_trace_falls_back_to_workflow_span_for_parallel_like_ambiguous_predecessors( + mock_sessionmaker, mock_repo_factory, mock_db, trace_instance +): + mock_db.engine = MagicMock() + info = _make_workflow_info() + repo = MagicMock() + child_node = _make_node_execution( + id="child-execution-1", + node_execution_id="child-execution-1", + node_id="child-node-1", + node_type="llm", + predecessor_node_id="parallel-node-1", + process_data={ + "prompts": [{"role": "user", "content": "hi"}], + "model_provider": "openai", + "model_name": "gpt-4", + }, + ) + first_parallel_node = _make_node_execution( + id="parallel-execution-1", + node_execution_id="parallel-execution-1", + node_id="parallel-node-1", + node_type="tool", + parallel_id="parallel-1", + ) + second_parallel_node = _make_node_execution( + id="parallel-execution-2", + node_execution_id="parallel-execution-2", + node_id="parallel-node-1", + node_type="tool", + parallel_id="parallel-2", + ) + repo.get_by_workflow_execution.return_value = [child_node, first_parallel_node, second_parallel_node] + mock_repo_factory.create_workflow_node_execution_repository.return_value = repo + + workflow_span = MagicMock(name="workflow-span") + workflow_span._context_label = "workflow" + child_node_span = MagicMock(name="child-node-span") + child_node_span._context_label = "child" + first_parallel_node_span = MagicMock(name="first-parallel-node-span") + first_parallel_node_span._context_label = "parallel-1" + second_parallel_node_span = MagicMock(name="second-parallel-node-span") + second_parallel_node_span._context_label = "parallel-2" + trace_instance.tracer.start_span.side_effect = [ + workflow_span, + child_node_span, + first_parallel_node_span, + second_parallel_node_span, + ] + + with ( + patch.object(trace_instance, "get_service_account_with_tenant", return_value=MagicMock()), + patch.object(trace_instance, "ensure_root_span", return_value={}), + patch.object(trace_instance.propagator, "extract", return_value="root-context"), + patch( + "dify_trace_arize_phoenix.arize_phoenix_trace.set_span_in_context", + side_effect=lambda span: f"context:{span._context_label}", + ), + ): + trace_instance.workflow_trace(info) + + child_node_call = _get_start_span_call(trace_instance.tracer.start_span, span_name="llm_Node") + assert child_node_call.kwargs["context"] == "context:workflow" + + +@patch("dify_trace_arize_phoenix.arize_phoenix_trace.db") +def test_message_trace_keeps_conversation_id_as_session(mock_db, trace_instance): mock_db.engine = MagicMock() info = _make_message_info() info.message_data = MagicMock() - info.message_data.from_account_id = "acc1" + info.message_data.conversation_id = "conversation-2" + info.message_data.from_account_id = "acc2" info.message_data.from_end_user_id = None - info.message_data.query = "q" - info.message_data.answer = "a" - info.message_data.status = "s" - info.message_data.model_id = "m" - info.message_data.model_provider = "p" + info.message_data.query = "q2" + info.message_data.answer = "a2" + info.message_data.status = "s2" + info.message_data.model_id = "m2" + info.message_data.model_provider = "p2" info.message_data.message_metadata = "{}" info.message_data.error = None info.error = None + root_span = MagicMock() + message_span = MagicMock() + llm_span = MagicMock() + trace_instance.tracer.start_span.side_effect = [root_span, message_span, llm_span] + trace_instance.message_trace(info) - assert trace_instance.tracer.start_span.call_count >= 1 + + message_span_call = _get_start_span_call( + trace_instance.tracer.start_span, span_name=TraceTaskName.MESSAGE_TRACE.value + ) + assert message_span_call.kwargs["attributes"][SpanAttributes.SESSION_ID] == "conversation-2" @patch("dify_trace_arize_phoenix.arize_phoenix_trace.db") @@ -397,3 +1542,30 @@ def test_api_check_success(trace_instance): def test_ensure_root_span_basic(trace_instance): trace_instance.ensure_root_span("tid") assert "tid" in trace_instance.dify_trace_ids + + +def test_ensure_root_span_uses_custom_name_and_attributes(trace_instance): + root_attributes = { + SpanAttributes.INPUT_VALUE: '{"input":"value"}', + SpanAttributes.OUTPUT_VALUE: '{"output":"value"}', + } + + trace_instance.ensure_root_span("tid", root_span_name="Workflow Name", root_span_attributes=root_attributes) + + trace_instance.tracer.start_span.assert_called_once_with( + name="Workflow Name", + attributes={ + SpanAttributes.OPENINFERENCE_SPAN_KIND: "CHAIN", + "dify_project_name": "p", + "dify_trace_id": "tid", + SpanAttributes.INPUT_VALUE: '{"input":"value"}', + SpanAttributes.OUTPUT_VALUE: '{"output":"value"}', + }, + ) + + +def test_ensure_root_span_falls_back_to_dify_name_when_custom_name_is_blank(trace_instance): + trace_instance.ensure_root_span("tid", root_span_name=" ") + + trace_instance.tracer.start_span.assert_called_once() + assert trace_instance.tracer.start_span.call_args.kwargs["name"] == "Dify" diff --git a/api/providers/trace/trace-arize-phoenix/tests/unit_tests/test_arize_phoenix_trace.py b/api/providers/trace/trace-arize-phoenix/tests/unit_tests/test_arize_phoenix_trace.py deleted file mode 100644 index a01c63ae61..0000000000 --- a/api/providers/trace/trace-arize-phoenix/tests/unit_tests/test_arize_phoenix_trace.py +++ /dev/null @@ -1,36 +0,0 @@ -from dify_trace_arize_phoenix.arize_phoenix_trace import _NODE_TYPE_TO_SPAN_KIND, _get_node_span_kind -from openinference.semconv.trace import OpenInferenceSpanKindValues - -from graphon.enums import BUILT_IN_NODE_TYPES, BuiltinNodeTypes - - -class TestGetNodeSpanKind: - """Tests for _get_node_span_kind helper.""" - - def test_all_node_types_are_mapped_correctly(self): - """Ensure every built-in node type is mapped to the correct span kind.""" - # Mappings for node types that have a specialised span kind. - special_mappings = { - BuiltinNodeTypes.LLM: OpenInferenceSpanKindValues.LLM, - BuiltinNodeTypes.KNOWLEDGE_RETRIEVAL: OpenInferenceSpanKindValues.RETRIEVER, - BuiltinNodeTypes.TOOL: OpenInferenceSpanKindValues.TOOL, - BuiltinNodeTypes.AGENT: OpenInferenceSpanKindValues.AGENT, - } - - # Test that every built-in node type is mapped to the correct span kind. - # Node types not in `special_mappings` should default to CHAIN. - for node_type in BUILT_IN_NODE_TYPES: - expected_span_kind = special_mappings.get(node_type, OpenInferenceSpanKindValues.CHAIN) - actual_span_kind = _get_node_span_kind(node_type) - assert actual_span_kind == expected_span_kind, ( - f"Node type {node_type!r} was mapped to {actual_span_kind}, but {expected_span_kind} was expected." - ) - - def test_unknown_string_defaults_to_chain(self): - """An unrecognised node type string should still return CHAIN.""" - assert _get_node_span_kind("some-future-node-type") == OpenInferenceSpanKindValues.CHAIN - - def test_stale_dataset_retrieval_not_in_mapping(self): - """The old 'dataset_retrieval' string was never a valid NodeType value; - make sure it is not present in the mapping dictionary.""" - assert "dataset_retrieval" not in _NODE_TYPE_TO_SPAN_KIND diff --git a/api/providers/trace/trace-langfuse/tests/unit_tests/langfuse_trace/test_langfuse_trace.py b/api/providers/trace/trace-langfuse/tests/unit_tests/langfuse_trace/test_langfuse_trace.py index 952f10c34f..95e27c791f 100644 --- a/api/providers/trace/trace-langfuse/tests/unit_tests/langfuse_trace/test_langfuse_trace.py +++ b/api/providers/trace/trace-langfuse/tests/unit_tests/langfuse_trace/test_langfuse_trace.py @@ -40,7 +40,7 @@ def langfuse_config(): @pytest.fixture -def trace_instance(langfuse_config, monkeypatch): +def trace_instance(langfuse_config, monkeypatch: pytest.MonkeyPatch): # Mock Langfuse client to avoid network calls mock_client = MagicMock() monkeypatch.setattr("dify_trace_langfuse.langfuse_trace.Langfuse", lambda **kwargs: mock_client) @@ -49,7 +49,7 @@ def trace_instance(langfuse_config, monkeypatch): return instance -def test_init(langfuse_config, monkeypatch): +def test_init(langfuse_config, monkeypatch: pytest.MonkeyPatch): mock_langfuse = MagicMock() monkeypatch.setattr("dify_trace_langfuse.langfuse_trace.Langfuse", mock_langfuse) monkeypatch.setenv("FILES_URL", "http://test.url") @@ -64,7 +64,7 @@ def test_init(langfuse_config, monkeypatch): assert instance.file_base_url == "http://test.url" -def test_trace_dispatch(trace_instance, monkeypatch): +def test_trace_dispatch(trace_instance, monkeypatch: pytest.MonkeyPatch): methods = [ "workflow_trace", "message_trace", @@ -114,7 +114,7 @@ def test_trace_dispatch(trace_instance, monkeypatch): mocks["generate_name_trace"].assert_called_once_with(info) -def test_workflow_trace_with_message_id(trace_instance, monkeypatch): +def test_workflow_trace_with_message_id(trace_instance, monkeypatch: pytest.MonkeyPatch): # Setup trace info trace_info = WorkflowTraceInfo( workflow_id="wf-1", @@ -218,7 +218,7 @@ def test_workflow_trace_with_message_id(trace_instance, monkeypatch): assert other_span.level == LevelEnum.ERROR -def test_workflow_trace_no_message_id(trace_instance, monkeypatch): +def test_workflow_trace_no_message_id(trace_instance, monkeypatch: pytest.MonkeyPatch): trace_info = WorkflowTraceInfo( workflow_id="wf-1", tenant_id="tenant-1", @@ -259,7 +259,7 @@ def test_workflow_trace_no_message_id(trace_instance, monkeypatch): assert trace_data.name == TraceTaskName.WORKFLOW_TRACE -def test_workflow_trace_missing_app_id(trace_instance, monkeypatch): +def test_workflow_trace_missing_app_id(trace_instance, monkeypatch: pytest.MonkeyPatch): trace_info = WorkflowTraceInfo( workflow_id="wf-1", tenant_id="tenant-1", @@ -287,7 +287,7 @@ def test_workflow_trace_missing_app_id(trace_instance, monkeypatch): trace_instance.workflow_trace(trace_info) -def test_message_trace_basic(trace_instance, monkeypatch): +def test_message_trace_basic(trace_instance, monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.id = "msg-1" message_data.from_account_id = "acc-1" @@ -331,7 +331,7 @@ def test_message_trace_basic(trace_instance, monkeypatch): assert gen_data.usage.total == 30 -def test_message_trace_with_end_user(trace_instance, monkeypatch): +def test_message_trace_with_end_user(trace_instance, monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.id = "msg-1" message_data.from_account_id = "acc-1" @@ -636,7 +636,7 @@ def test_langfuse_trace_entity_with_list_dict_input(): assert data.input[0]["content"] == "hello" -def test_workflow_trace_handles_usage_extraction_error(trace_instance, monkeypatch, caplog): +def test_workflow_trace_handles_usage_extraction_error(trace_instance, monkeypatch: pytest.MonkeyPatch, caplog): # Setup trace info to trigger LLM node usage extraction trace_info = WorkflowTraceInfo( workflow_id="wf-1", diff --git a/api/providers/trace/trace-langsmith/src/dify_trace_langsmith/entities/langsmith_trace_entity.py b/api/providers/trace/trace-langsmith/src/dify_trace_langsmith/entities/langsmith_trace_entity.py index f73ba01c8b..be9d64ae01 100644 --- a/api/providers/trace/trace-langsmith/src/dify_trace_langsmith/entities/langsmith_trace_entity.py +++ b/api/providers/trace/trace-langsmith/src/dify_trace_langsmith/entities/langsmith_trace_entity.py @@ -65,35 +65,18 @@ class LangSmithRunModel(LangSmithTokenUsage, LangSmithMultiModel): } file_list = values.get("file_list", []) if isinstance(v, str): - if field_name == "inputs": - return { - "messages": { - "role": "user", - "content": v, - "usage_metadata": usage_metadata, - "file_list": file_list, - }, - } - elif field_name == "outputs": - return { - "choices": { - "role": "ai", - "content": v, - "usage_metadata": usage_metadata, - "file_list": file_list, - }, - } - elif isinstance(v, list): - data = {} - if len(v) > 0 and isinstance(v[0], dict): - # rename text to content - v = replace_text_with_content(data=v) - if field_name == "inputs": - data = { - "messages": v, + match field_name: + case "inputs": + return { + "messages": { + "role": "user", + "content": v, + "usage_metadata": usage_metadata, + "file_list": file_list, + }, } - elif field_name == "outputs": - data = { + case "outputs": + return { "choices": { "role": "ai", "content": v, @@ -101,6 +84,29 @@ class LangSmithRunModel(LangSmithTokenUsage, LangSmithMultiModel): "file_list": file_list, }, } + case _: + pass + elif isinstance(v, list): + data = {} + if len(v) > 0 and isinstance(v[0], dict): + # rename text to content + v = replace_text_with_content(data=v) + match field_name: + case "inputs": + data = { + "messages": v, + } + case "outputs": + data = { + "choices": { + "role": "ai", + "content": v, + "usage_metadata": usage_metadata, + "file_list": file_list, + }, + } + case _: + pass return data else: return { diff --git a/api/providers/trace/trace-langsmith/src/dify_trace_langsmith/langsmith_trace.py b/api/providers/trace/trace-langsmith/src/dify_trace_langsmith/langsmith_trace.py index 145bd70dbc..045ec44e4e 100644 --- a/api/providers/trace/trace-langsmith/src/dify_trace_langsmith/langsmith_trace.py +++ b/api/providers/trace/trace-langsmith/src/dify_trace_langsmith/langsmith_trace.py @@ -64,7 +64,9 @@ class LangSmithDataTrace(BaseTraceInstance): self.generate_name_trace(trace_info) def workflow_trace(self, trace_info: WorkflowTraceInfo): - trace_id = trace_info.trace_id or trace_info.message_id or trace_info.workflow_run_id + # trace_id must equal the root run's run_id (LangSmith protocol); external trace_id + # cannot be used here as it would cause HTTP 400. + trace_id = trace_info.message_id or trace_info.workflow_run_id if trace_info.start_time is None: trace_info.start_time = datetime.now() message_dotted_order = ( @@ -77,6 +79,8 @@ class LangSmithDataTrace(BaseTraceInstance): ) metadata = trace_info.metadata metadata["workflow_app_log_id"] = trace_info.workflow_app_log_id + if trace_info.trace_id: + metadata["external_trace_id"] = trace_info.trace_id if trace_info.message_id: message_run = LangSmithRunModel( diff --git a/api/providers/trace/trace-langsmith/tests/unit_tests/langsmith_trace/test_langsmith_trace.py b/api/providers/trace/trace-langsmith/tests/unit_tests/langsmith_trace/test_langsmith_trace.py index 45e5894e4a..edc4aafd87 100644 --- a/api/providers/trace/trace-langsmith/tests/unit_tests/langsmith_trace/test_langsmith_trace.py +++ b/api/providers/trace/trace-langsmith/tests/unit_tests/langsmith_trace/test_langsmith_trace.py @@ -35,7 +35,7 @@ def langsmith_config(): @pytest.fixture -def trace_instance(langsmith_config, monkeypatch): +def trace_instance(langsmith_config, monkeypatch: pytest.MonkeyPatch): # Mock LangSmith client mock_client = MagicMock() monkeypatch.setattr("dify_trace_langsmith.langsmith_trace.Client", lambda **kwargs: mock_client) @@ -44,7 +44,7 @@ def trace_instance(langsmith_config, monkeypatch): return instance -def test_init(langsmith_config, monkeypatch): +def test_init(langsmith_config, monkeypatch: pytest.MonkeyPatch): mock_client_class = MagicMock() monkeypatch.setattr("dify_trace_langsmith.langsmith_trace.Client", mock_client_class) monkeypatch.setenv("FILES_URL", "http://test.url") @@ -57,7 +57,7 @@ def test_init(langsmith_config, monkeypatch): assert instance.file_base_url == "http://test.url" -def test_trace_dispatch(trace_instance, monkeypatch): +def test_trace_dispatch(trace_instance, monkeypatch: pytest.MonkeyPatch): methods = [ "workflow_trace", "message_trace", @@ -107,7 +107,7 @@ def test_trace_dispatch(trace_instance, monkeypatch): mocks["generate_name_trace"].assert_called_once_with(info) -def test_workflow_trace(trace_instance, monkeypatch): +def test_workflow_trace(trace_instance, monkeypatch: pytest.MonkeyPatch): # Setup trace info workflow_data = MagicMock() workflow_data.created_at = _dt() @@ -208,13 +208,17 @@ def test_workflow_trace(trace_instance, monkeypatch): assert call_args[0].id == "msg-1" assert call_args[0].name == TraceTaskName.MESSAGE_TRACE + # trace_id must equal root run's id (message_id), not the external trace_id "trace-1" + assert call_args[0].trace_id == "msg-1" assert call_args[1].id == "run-1" assert call_args[1].name == TraceTaskName.WORKFLOW_TRACE assert call_args[1].parent_run_id == "msg-1" + assert call_args[1].trace_id == "msg-1" assert call_args[2].id == "node-llm" assert call_args[2].run_type == LangSmithRunType.llm + assert call_args[2].trace_id == "msg-1" assert call_args[3].id == "node-other" assert call_args[3].run_type == LangSmithRunType.tool @@ -223,7 +227,7 @@ def test_workflow_trace(trace_instance, monkeypatch): assert call_args[4].run_type == LangSmithRunType.retriever -def test_workflow_trace_no_start_time(trace_instance, monkeypatch): +def test_workflow_trace_no_start_time(trace_instance, monkeypatch: pytest.MonkeyPatch): workflow_data = MagicMock() workflow_data.created_at = _dt() workflow_data.finished_at = _dt() + timedelta(seconds=1) @@ -266,7 +270,7 @@ def test_workflow_trace_no_start_time(trace_instance, monkeypatch): assert trace_instance.add_run.called -def test_workflow_trace_missing_app_id(trace_instance, monkeypatch): +def test_workflow_trace_missing_app_id(trace_instance, monkeypatch: pytest.MonkeyPatch): trace_info = MagicMock(spec=WorkflowTraceInfo) trace_info.trace_id = "trace-1" trace_info.message_id = None @@ -290,7 +294,7 @@ def test_workflow_trace_missing_app_id(trace_instance, monkeypatch): trace_instance.workflow_trace(trace_info) -def test_message_trace(trace_instance, monkeypatch): +def test_message_trace(trace_instance, monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.id = "msg-1" message_data.from_account_id = "acc-1" @@ -516,7 +520,7 @@ def test_update_run_error(trace_instance): trace_instance.update_run(update_data) -def test_workflow_trace_usage_extraction_error(trace_instance, monkeypatch, caplog): +def test_workflow_trace_usage_extraction_error(trace_instance, monkeypatch: pytest.MonkeyPatch, caplog): workflow_data = MagicMock() workflow_data.created_at = _dt() workflow_data.finished_at = _dt() + timedelta(seconds=1) @@ -604,3 +608,83 @@ def test_get_project_url_error(trace_instance): trace_instance.langsmith_client.get_run_url.side_effect = Exception("error") with pytest.raises(ValueError, match="LangSmith get run url failed: error"): trace_instance.get_project_url() + + +def _make_workflow_trace_info( + *, message_id: str | None, workflow_run_id: str, trace_id: str | None +) -> WorkflowTraceInfo: + workflow_data = MagicMock() + workflow_data.created_at = _dt() + workflow_data.finished_at = _dt() + timedelta(seconds=1) + return WorkflowTraceInfo( + tenant_id="tenant-1", + workflow_id="wf-1", + workflow_run_id=workflow_run_id, + workflow_run_inputs={}, + workflow_run_outputs={}, + workflow_run_status="succeeded", + workflow_run_version="1.0", + workflow_run_elapsed_time=1.0, + total_tokens=0, + file_list=[], + query="q", + message_id=message_id, + conversation_id="conv-1" if message_id else None, + start_time=_dt(), + end_time=_dt() + timedelta(seconds=1), + trace_id=trace_id, + metadata={"app_id": "app-1"}, + workflow_app_log_id=None, + error=None, + workflow_data=workflow_data, + ) + + +def _patch_workflow_trace_deps(monkeypatch, trace_instance): + monkeypatch.setattr("dify_trace_langsmith.langsmith_trace.sessionmaker", lambda bind: lambda: MagicMock()) + monkeypatch.setattr("dify_trace_langsmith.langsmith_trace.db", MagicMock(engine="engine")) + repo = MagicMock() + repo.get_by_workflow_execution.return_value = [] + factory = MagicMock() + factory.create_workflow_node_execution_repository.return_value = repo + monkeypatch.setattr("dify_trace_langsmith.langsmith_trace.DifyCoreRepositoryFactory", factory) + monkeypatch.setattr(trace_instance, "get_service_account_with_tenant", lambda app_id: MagicMock()) + trace_instance.add_run = MagicMock() + + +def test_workflow_trace_id_uses_message_id_not_external(trace_instance, monkeypatch): + """Chatflow with external trace_id: LangSmith trace_id must be message_id, not external.""" + trace_info = _make_workflow_trace_info( + message_id="msg-abc", + workflow_run_id="run-xyz", + trace_id="external-999", + ) + _patch_workflow_trace_deps(monkeypatch, trace_instance) + + trace_instance.workflow_trace(trace_info) + + calls = [c[0][0] for c in trace_instance.add_run.call_args_list] + # message run (root) and workflow run (child) must both use message_id as trace_id + assert calls[0].id == "msg-abc" + assert calls[0].trace_id == "msg-abc" + assert calls[1].id == "run-xyz" + assert calls[1].trace_id == "msg-abc" + # external_trace_id preserved in metadata + assert trace_info.metadata.get("external_trace_id") == "external-999" + + +def test_workflow_trace_id_pure_workflow_uses_run_id(trace_instance, monkeypatch): + """Pure workflow (no message_id) with external trace_id: trace_id must be workflow_run_id.""" + trace_info = _make_workflow_trace_info( + message_id=None, + workflow_run_id="run-xyz", + trace_id="external-999", + ) + _patch_workflow_trace_deps(monkeypatch, trace_instance) + + trace_instance.workflow_trace(trace_info) + + calls = [c[0][0] for c in trace_instance.add_run.call_args_list] + # workflow run is the root; trace_id must equal its run_id + assert calls[0].id == "run-xyz" + assert calls[0].trace_id == "run-xyz" diff --git a/api/providers/trace/trace-mlflow/tests/unit_tests/mlflow_trace/test_mlflow_trace.py b/api/providers/trace/trace-mlflow/tests/unit_tests/mlflow_trace/test_mlflow_trace.py index 46c9750a5d..324f894b25 100644 --- a/api/providers/trace/trace-mlflow/tests/unit_tests/mlflow_trace/test_mlflow_trace.py +++ b/api/providers/trace/trace-mlflow/tests/unit_tests/mlflow_trace/test_mlflow_trace.py @@ -614,7 +614,7 @@ class TestMessageTrace: span.set_status.assert_called_once() span.add_event.assert_called_once() - def test_message_trace_with_file_data(self, trace_instance, mock_tracing, mock_db, monkeypatch): + def test_message_trace_with_file_data(self, trace_instance, mock_tracing, mock_db, monkeypatch: pytest.MonkeyPatch): span = MagicMock() mock_tracing["start"].return_value = span mock_tracing["set"].return_value = "token" diff --git a/api/providers/trace/trace-opik/tests/unit_tests/opik_trace/test_opik_trace.py b/api/providers/trace/trace-opik/tests/unit_tests/opik_trace/test_opik_trace.py index eefed3c78c..5daaa7132c 100644 --- a/api/providers/trace/trace-opik/tests/unit_tests/opik_trace/test_opik_trace.py +++ b/api/providers/trace/trace-opik/tests/unit_tests/opik_trace/test_opik_trace.py @@ -35,7 +35,7 @@ def opik_config(): @pytest.fixture -def trace_instance(opik_config, monkeypatch): +def trace_instance(opik_config, monkeypatch: pytest.MonkeyPatch): mock_client = MagicMock() monkeypatch.setattr("dify_trace_opik.opik_trace.Opik", lambda **kwargs: mock_client) @@ -65,7 +65,7 @@ def test_prepare_opik_uuid(): assert result is not None -def test_init(opik_config, monkeypatch): +def test_init(opik_config, monkeypatch: pytest.MonkeyPatch): mock_opik = MagicMock() monkeypatch.setattr("dify_trace_opik.opik_trace.Opik", mock_opik) monkeypatch.setenv("FILES_URL", "http://test.url") @@ -82,7 +82,7 @@ def test_init(opik_config, monkeypatch): assert instance.project == opik_config.project -def test_trace_dispatch(trace_instance, monkeypatch): +def test_trace_dispatch(trace_instance, monkeypatch: pytest.MonkeyPatch): methods = [ "workflow_trace", "message_trace", @@ -132,7 +132,7 @@ def test_trace_dispatch(trace_instance, monkeypatch): mocks["generate_name_trace"].assert_called_once_with(info) -def test_workflow_trace_with_message_id(trace_instance, monkeypatch): +def test_workflow_trace_with_message_id(trace_instance, monkeypatch: pytest.MonkeyPatch): # Define constants for better readability WORKFLOW_ID = "fb05c7cd-6cec-4add-8a84-df03a408b4ce" WORKFLOW_RUN_ID = "33c67568-7a8a-450e-8916-a5f135baeaef" @@ -221,7 +221,7 @@ def test_workflow_trace_with_message_id(trace_instance, monkeypatch): assert trace_instance.add_span.call_count >= 1 -def test_workflow_trace_no_message_id(trace_instance, monkeypatch): +def test_workflow_trace_no_message_id(trace_instance, monkeypatch: pytest.MonkeyPatch): # Define constants for better readability WORKFLOW_ID = "f0708b36-b1d7-42b3-a876-1d01b7d8f1a3" WORKFLOW_RUN_ID = "d42ec285-c2fd-4248-8866-5c9386b101ac" @@ -265,7 +265,7 @@ def test_workflow_trace_no_message_id(trace_instance, monkeypatch): trace_instance.add_trace.assert_called_once() -def test_workflow_trace_missing_app_id(trace_instance, monkeypatch): +def test_workflow_trace_missing_app_id(trace_instance, monkeypatch: pytest.MonkeyPatch): trace_info = WorkflowTraceInfo( workflow_id="5745f1b8-f8e6-4859-8110-996acb6c8d6a", tenant_id="tenant-1", @@ -293,7 +293,7 @@ def test_workflow_trace_missing_app_id(trace_instance, monkeypatch): trace_instance.workflow_trace(trace_info) -def test_message_trace_basic(trace_instance, monkeypatch): +def test_message_trace_basic(trace_instance, monkeypatch: pytest.MonkeyPatch): # Define constants for better readability MESSAGE_DATA_ID = "e3a26712-8cac-4a25-94a4-a3bff21ee3ab" CONVERSATION_ID = "9d3f3751-7521-4c19-9307-20e3cf6789a3" @@ -340,7 +340,7 @@ def test_message_trace_basic(trace_instance, monkeypatch): trace_instance.add_span.assert_called_once() -def test_message_trace_with_end_user(trace_instance, monkeypatch): +def test_message_trace_with_end_user(trace_instance, monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.id = "85411059-79fb-4deb-a76c-c2e215f1b97e" message_data.from_account_id = "acc-1" @@ -614,7 +614,7 @@ def test_get_project_url_error(trace_instance): trace_instance.get_project_url() -def test_workflow_trace_usage_extraction_error_fixed(trace_instance, monkeypatch, caplog): +def test_workflow_trace_usage_extraction_error_fixed(trace_instance, monkeypatch: pytest.MonkeyPatch, caplog): trace_info = WorkflowTraceInfo( workflow_id="86a52565-4a6b-4a1b-9bfd-98e4595e70de", tenant_id="66e8e918-472e-4b69-8051-12502c34fc07", diff --git a/api/providers/trace/trace-weave/tests/unit_tests/weave_trace/test_weave_trace.py b/api/providers/trace/trace-weave/tests/unit_tests/weave_trace/test_weave_trace.py index 6028d0c550..30646815d8 100644 --- a/api/providers/trace/trace-weave/tests/unit_tests/weave_trace/test_weave_trace.py +++ b/api/providers/trace/trace-weave/tests/unit_tests/weave_trace/test_weave_trace.py @@ -267,14 +267,14 @@ class TestInit: with pytest.raises(ValueError, match="Weave login failed"): WeaveDataTrace(config) - def test_init_files_url_from_env(self, mock_wandb, mock_weave, monkeypatch): + def test_init_files_url_from_env(self, mock_wandb, mock_weave, monkeypatch: pytest.MonkeyPatch): """Test FILES_URL is read from environment.""" monkeypatch.setenv("FILES_URL", "http://files.example.com") config = _make_weave_config() instance = WeaveDataTrace(config) assert instance.file_base_url == "http://files.example.com" - def test_init_files_url_default(self, mock_wandb, mock_weave, monkeypatch): + def test_init_files_url_default(self, mock_wandb, mock_weave, monkeypatch: pytest.MonkeyPatch): """Test FILES_URL defaults to http://127.0.0.1:5001.""" monkeypatch.delenv("FILES_URL", raising=False) config = _make_weave_config() @@ -302,7 +302,7 @@ class TestGetProjectUrl: url = instance.get_project_url() assert url == "https://wandb.ai/my-project" - def test_get_project_url_exception_raises(self, trace_instance, monkeypatch): + def test_get_project_url_exception_raises(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Raises ValueError when exception occurs in get_project_url.""" monkeypatch.setattr(trace_instance, "entity", None) monkeypatch.setattr(trace_instance, "project_name", None) @@ -583,7 +583,7 @@ class TestFinishCall: class TestWorkflowTrace: - def _setup_repo(self, monkeypatch, nodes=None): + def _setup_repo(self, monkeypatch: pytest.MonkeyPatch, nodes=None): """Helper to patch session/repo dependencies.""" if nodes is None: nodes = [] @@ -599,7 +599,7 @@ class TestWorkflowTrace: monkeypatch.setattr("dify_trace_weave.weave_trace.db", MagicMock(engine="engine")) return repo - def test_workflow_trace_no_nodes_no_message_id(self, trace_instance, monkeypatch): + def test_workflow_trace_no_nodes_no_message_id(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Workflow trace with no nodes and no message_id.""" self._setup_repo(monkeypatch, nodes=[]) monkeypatch.setattr(trace_instance, "get_service_account_with_tenant", lambda app_id: MagicMock()) @@ -614,7 +614,7 @@ class TestWorkflowTrace: assert trace_instance.start_call.call_count == 1 assert trace_instance.finish_call.call_count == 1 - def test_workflow_trace_with_message_id(self, trace_instance, monkeypatch): + def test_workflow_trace_with_message_id(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Workflow trace with message_id creates both message and workflow runs.""" self._setup_repo(monkeypatch, nodes=[]) monkeypatch.setattr(trace_instance, "get_service_account_with_tenant", lambda app_id: MagicMock()) @@ -629,7 +629,7 @@ class TestWorkflowTrace: assert trace_instance.start_call.call_count == 2 assert trace_instance.finish_call.call_count == 2 - def test_workflow_trace_with_node_execution(self, trace_instance, monkeypatch): + def test_workflow_trace_with_node_execution(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Workflow trace iterates node executions and creates node runs.""" node = _make_node( id="node-1", @@ -652,7 +652,7 @@ class TestWorkflowTrace: # workflow run + node run = 2 calls assert trace_instance.start_call.call_count == 2 - def test_workflow_trace_with_llm_node(self, trace_instance, monkeypatch): + def test_workflow_trace_with_llm_node(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """LLM node uses process_data prompts as inputs.""" node = _make_node( node_type=BuiltinNodeTypes.LLM, @@ -680,7 +680,7 @@ class TestWorkflowTrace: # The key "messages" should be present (validator transforms the list) assert "messages" in node_run.inputs - def test_workflow_trace_with_non_llm_node_uses_inputs(self, trace_instance, monkeypatch): + def test_workflow_trace_with_non_llm_node_uses_inputs(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Non-LLM node uses node_execution.inputs directly.""" node = _make_node( node_type=BuiltinNodeTypes.TOOL, @@ -701,7 +701,7 @@ class TestWorkflowTrace: node_run = node_call_args[0][0] assert node_run.inputs.get("tool_input") == "val" - def test_workflow_trace_missing_app_id_raises(self, trace_instance, monkeypatch): + def test_workflow_trace_missing_app_id_raises(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Raises ValueError when app_id is missing from metadata.""" monkeypatch.setattr("dify_trace_weave.weave_trace.sessionmaker", lambda bind: MagicMock()) monkeypatch.setattr("dify_trace_weave.weave_trace.db", MagicMock(engine="engine")) @@ -714,7 +714,7 @@ class TestWorkflowTrace: with pytest.raises(ValueError, match="No app_id found in trace_info metadata"): trace_instance.workflow_trace(trace_info) - def test_workflow_trace_start_time_none_defaults_to_now(self, trace_instance, monkeypatch): + def test_workflow_trace_start_time_none_defaults_to_now(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """start_time defaults to datetime.now() when None.""" self._setup_repo(monkeypatch, nodes=[]) monkeypatch.setattr(trace_instance, "get_service_account_with_tenant", lambda app_id: MagicMock()) @@ -727,7 +727,7 @@ class TestWorkflowTrace: assert trace_instance.start_call.call_count == 1 - def test_workflow_trace_node_created_at_none(self, trace_instance, monkeypatch): + def test_workflow_trace_node_created_at_none(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Node with created_at=None uses datetime.now().""" node = _make_node(created_at=None, elapsed_time=0.5) self._setup_repo(monkeypatch, nodes=[node]) @@ -740,7 +740,7 @@ class TestWorkflowTrace: trace_instance.workflow_trace(trace_info) assert trace_instance.start_call.call_count == 2 - def test_workflow_trace_chat_mode_llm_node_adds_provider(self, trace_instance, monkeypatch): + def test_workflow_trace_chat_mode_llm_node_adds_provider(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Chat mode LLM node adds ls_provider and ls_model_name to attributes.""" node = _make_node( node_type=BuiltinNodeTypes.LLM, @@ -765,7 +765,7 @@ class TestWorkflowTrace: assert node_run.attributes.get("ls_provider") == "openai" assert node_run.attributes.get("ls_model_name") == "gpt-4" - def test_workflow_trace_nodes_sorted_by_created_at(self, trace_instance, monkeypatch): + def test_workflow_trace_nodes_sorted_by_created_at(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Nodes are sorted by created_at before processing.""" node1 = _make_node(id="node-b", created_at=_dt() + timedelta(seconds=2)) node2 = _make_node(id="node-a", created_at=_dt()) @@ -799,7 +799,7 @@ class TestMessageTrace: trace_instance.message_trace(trace_info) trace_instance.start_call.assert_not_called() - def test_basic_message_trace(self, trace_instance, monkeypatch): + def test_basic_message_trace(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """message_trace creates message run and llm child run.""" monkeypatch.setattr( "dify_trace_weave.weave_trace.db.session.get", @@ -816,7 +816,7 @@ class TestMessageTrace: assert trace_instance.start_call.call_count == 2 assert trace_instance.finish_call.call_count == 2 - def test_message_trace_with_file_data(self, trace_instance, monkeypatch): + def test_message_trace_with_file_data(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """message_trace appends file URL to file_list.""" file_data = MagicMock() file_data.url = "path/to/file.png" @@ -839,7 +839,7 @@ class TestMessageTrace: message_run = trace_instance.start_call.call_args_list[0][0][0] assert "http://files.test/path/to/file.png" in message_run.file_list - def test_message_trace_with_end_user(self, trace_instance, monkeypatch): + def test_message_trace_with_end_user(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """message_trace looks up end user and sets end_user_id attribute.""" end_user = MagicMock() end_user.session_id = "session-xyz" @@ -862,7 +862,7 @@ class TestMessageTrace: message_run = trace_instance.start_call.call_args_list[0][0][0] assert message_run.attributes.get("end_user_id") == "session-xyz" - def test_message_trace_no_end_user(self, trace_instance, monkeypatch): + def test_message_trace_no_end_user(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """message_trace handles when from_end_user_id is None.""" mock_db = MagicMock() mock_db.session.get.return_value = None @@ -880,7 +880,7 @@ class TestMessageTrace: trace_instance.message_trace(trace_info) assert trace_instance.start_call.call_count == 2 - def test_message_trace_trace_id_fallback_to_message_id(self, trace_instance, monkeypatch): + def test_message_trace_trace_id_fallback_to_message_id(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """trace_id falls back to message_id when trace_id is None.""" mock_db = MagicMock() mock_db.session.get.return_value = None @@ -895,7 +895,7 @@ class TestMessageTrace: message_run = trace_instance.start_call.call_args_list[0][0][0] assert message_run.id == "msg-1" - def test_message_trace_file_list_none(self, trace_instance, monkeypatch): + def test_message_trace_file_list_none(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """message_trace handles file_list=None gracefully.""" mock_db = MagicMock() mock_db.session.get.return_value = None diff --git a/api/providers/vdb/vdb-alibabacloud-mysql/tests/unit_tests/test_alibabacloud_mysql_factory.py b/api/providers/vdb/vdb-alibabacloud-mysql/tests/unit_tests/test_alibabacloud_mysql_factory.py index a907f918c3..37b2331f0f 100644 --- a/api/providers/vdb/vdb-alibabacloud-mysql/tests/unit_tests/test_alibabacloud_mysql_factory.py +++ b/api/providers/vdb/vdb-alibabacloud-mysql/tests/unit_tests/test_alibabacloud_mysql_factory.py @@ -20,7 +20,7 @@ def test_validate_distance_function_rejects_unsupported_values(): factory._validate_distance_function("dot_product") -def test_factory_init_vector_uses_existing_index_struct_class_prefix(monkeypatch): +def test_factory_init_vector_uses_existing_index_struct_class_prefix(monkeypatch: pytest.MonkeyPatch): factory = AlibabaCloudMySQLVectorFactory() dataset = SimpleNamespace( id="dataset-1", @@ -45,7 +45,7 @@ def test_factory_init_vector_uses_existing_index_struct_class_prefix(monkeypatch assert vector_cls.call_args.kwargs["collection_name"] == "existing_collection" -def test_factory_init_vector_generates_collection_name_when_index_struct_is_missing(monkeypatch): +def test_factory_init_vector_generates_collection_name_when_index_struct_is_missing(monkeypatch: pytest.MonkeyPatch): factory = AlibabaCloudMySQLVectorFactory() dataset = SimpleNamespace( id="dataset-2", diff --git a/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector.py b/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector.py index d1d471761d..2e8052b7dc 100644 --- a/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector.py +++ b/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector.py @@ -83,7 +83,7 @@ def test_get_type_is_analyticdb(): assert vector.get_type() == "analyticdb" -def test_factory_builds_openapi_config_when_host_is_missing(monkeypatch): +def test_factory_builds_openapi_config_when_host_is_missing(monkeypatch: pytest.MonkeyPatch): factory = AnalyticdbVectorFactory() dataset = SimpleNamespace(id="dataset-1", index_struct_dict=None, index_struct=None) @@ -109,7 +109,7 @@ def test_factory_builds_openapi_config_when_host_is_missing(monkeypatch): assert dataset.index_struct is not None -def test_factory_builds_sql_config_when_host_is_present(monkeypatch): +def test_factory_builds_sql_config_when_host_is_present(monkeypatch: pytest.MonkeyPatch): factory = AnalyticdbVectorFactory() dataset = SimpleNamespace( id="dataset-2", index_struct_dict={"vector_store": {"class_prefix": "EXISTING"}}, index_struct=None diff --git a/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_openapi.py b/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_openapi.py index d2d735ae3e..26bd385333 100644 --- a/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_openapi.py +++ b/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_openapi.py @@ -24,7 +24,7 @@ def _request_class(name: str): return _Request -def _install_openapi_stubs(monkeypatch): +def _install_openapi_stubs(monkeypatch: pytest.MonkeyPatch): gpdb_package = types.ModuleType("alibabacloud_gpdb20160503") gpdb_package.__path__ = [] gpdb_models = types.ModuleType("alibabacloud_gpdb20160503.models") @@ -130,7 +130,7 @@ def test_openapi_config_to_client_params(): assert params["read_timeout"] == 60000 -def test_init_creates_openapi_client_and_runs_initialize(monkeypatch): +def test_init_creates_openapi_client_and_runs_initialize(monkeypatch: pytest.MonkeyPatch): stubs = _install_openapi_stubs(monkeypatch) initialize_mock = MagicMock() monkeypatch.setattr(openapi_module.AnalyticdbVectorOpenAPI, "_initialize", initialize_mock) @@ -145,7 +145,7 @@ def test_init_creates_openapi_client_and_runs_initialize(monkeypatch): initialize_mock.assert_called_once_with() -def test_initialize_skips_when_cached(monkeypatch): +def test_initialize_skips_when_cached(monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -164,7 +164,7 @@ def test_initialize_skips_when_cached(monkeypatch): vector._create_namespace_if_not_exists.assert_not_called() -def test_initialize_runs_when_cache_is_missing(monkeypatch): +def test_initialize_runs_when_cache_is_missing(monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -184,7 +184,7 @@ def test_initialize_runs_when_cache_is_missing(monkeypatch): openapi_module.redis_client.set.assert_called_once() -def test_initialize_vector_database_calls_openapi_client(monkeypatch): +def test_initialize_vector_database_calls_openapi_client(monkeypatch: pytest.MonkeyPatch): _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector.config = _config() @@ -199,7 +199,7 @@ def test_initialize_vector_database_calls_openapi_client(monkeypatch): assert request.manager_account_password == "password" -def test_create_namespace_creates_when_namespace_not_found(monkeypatch): +def test_create_namespace_creates_when_namespace_not_found(monkeypatch: pytest.MonkeyPatch): stubs = _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector.config = _config() @@ -211,7 +211,7 @@ def test_create_namespace_creates_when_namespace_not_found(monkeypatch): vector._client.create_namespace.assert_called_once() -def test_create_namespace_raises_on_unexpected_api_error(monkeypatch): +def test_create_namespace_raises_on_unexpected_api_error(monkeypatch: pytest.MonkeyPatch): stubs = _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector.config = _config() @@ -222,7 +222,7 @@ def test_create_namespace_raises_on_unexpected_api_error(monkeypatch): vector._create_namespace_if_not_exists() -def test_create_namespace_noop_when_namespace_exists(monkeypatch): +def test_create_namespace_noop_when_namespace_exists(monkeypatch: pytest.MonkeyPatch): _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector.config = _config() @@ -234,7 +234,7 @@ def test_create_namespace_noop_when_namespace_exists(monkeypatch): vector._client.create_namespace.assert_not_called() -def test_create_collection_if_not_exists_creates_when_missing(monkeypatch): +def test_create_collection_if_not_exists_creates_when_missing(monkeypatch: pytest.MonkeyPatch): stubs = _install_openapi_stubs(monkeypatch) lock = MagicMock() lock.__enter__.return_value = None @@ -255,7 +255,7 @@ def test_create_collection_if_not_exists_creates_when_missing(monkeypatch): openapi_module.redis_client.set.assert_called_once() -def test_create_collection_if_not_exists_skips_when_cached(monkeypatch): +def test_create_collection_if_not_exists_skips_when_cached(monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -274,7 +274,7 @@ def test_create_collection_if_not_exists_skips_when_cached(monkeypatch): vector._client.create_collection.assert_not_called() -def test_create_collection_if_not_exists_raises_on_non_404_errors(monkeypatch): +def test_create_collection_if_not_exists_raises_on_non_404_errors(monkeypatch: pytest.MonkeyPatch): stubs = _install_openapi_stubs(monkeypatch) lock = MagicMock() lock.__enter__.return_value = None @@ -293,7 +293,7 @@ def test_create_collection_if_not_exists_raises_on_non_404_errors(monkeypatch): vector.create_collection_if_not_exists(embedding_dimension=512) -def test_openapi_add_delete_and_search_methods(monkeypatch): +def test_openapi_add_delete_and_search_methods(monkeypatch: pytest.MonkeyPatch): _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector._collection_name = "collection_1" @@ -348,7 +348,7 @@ def test_openapi_add_delete_and_search_methods(monkeypatch): assert docs_by_text[0].page_content == "high" -def test_text_exists_returns_false_when_matches_empty(monkeypatch): +def test_text_exists_returns_false_when_matches_empty(monkeypatch: pytest.MonkeyPatch): _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector._collection_name = "collection_1" @@ -361,7 +361,7 @@ def test_text_exists_returns_false_when_matches_empty(monkeypatch): assert vector.text_exists("missing-id") is False -def test_openapi_delete_success(monkeypatch): +def test_openapi_delete_success(monkeypatch: pytest.MonkeyPatch): _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector._collection_name = "collection_1" @@ -372,7 +372,7 @@ def test_openapi_delete_success(monkeypatch): vector._client.delete_collection.assert_called_once() -def test_openapi_delete_propagates_errors(monkeypatch): +def test_openapi_delete_propagates_errors(monkeypatch: pytest.MonkeyPatch): _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector._collection_name = "collection_1" diff --git a/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_sql.py b/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_sql.py index 49a2ae72d0..cd255b37cf 100644 --- a/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_sql.py +++ b/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_sql.py @@ -53,7 +53,7 @@ def test_sql_config_rejects_min_connection_greater_than_max_connection(): AnalyticdbVectorBySqlConfig.model_validate(values) -def test_initialize_skips_when_cache_exists(monkeypatch): +def test_initialize_skips_when_cache_exists(monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -70,7 +70,7 @@ def test_initialize_skips_when_cache_exists(monkeypatch): vector._initialize_vector_database.assert_not_called() -def test_initialize_runs_when_cache_is_missing(monkeypatch): +def test_initialize_runs_when_cache_is_missing(monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -88,7 +88,7 @@ def test_initialize_runs_when_cache_is_missing(monkeypatch): sql_module.redis_client.set.assert_called_once() -def test_create_connection_pool_uses_psycopg2_pool(monkeypatch): +def test_create_connection_pool_uses_psycopg2_pool(monkeypatch: pytest.MonkeyPatch): vector = AnalyticdbVectorBySql.__new__(AnalyticdbVectorBySql) vector.config = AnalyticdbVectorBySqlConfig(**_config_values()) vector.databaseName = "knowledgebase" @@ -119,7 +119,7 @@ def test_get_cursor_context_manager_handles_connection_lifecycle(): pool.putconn.assert_called_once_with(connection) -def test_add_texts_inserts_only_documents_with_metadata(monkeypatch): +def test_add_texts_inserts_only_documents_with_metadata(monkeypatch: pytest.MonkeyPatch): vector = AnalyticdbVectorBySql.__new__(AnalyticdbVectorBySql) vector.table_name = "dify.collection" @@ -273,7 +273,7 @@ def test_delete_drops_table(): cursor.execute.assert_called_once() -def test_init_normalizes_collection_name_and_creates_pool_when_missing(monkeypatch): +def test_init_normalizes_collection_name_and_creates_pool_when_missing(monkeypatch: pytest.MonkeyPatch): config = AnalyticdbVectorBySqlConfig(**_config_values()) created_pool = MagicMock() @@ -288,7 +288,7 @@ def test_init_normalizes_collection_name_and_creates_pool_when_missing(monkeypat assert vector.pool is created_pool -def test_initialize_vector_database_handles_existing_database_and_search_config(monkeypatch): +def test_initialize_vector_database_handles_existing_database_and_search_config(monkeypatch: pytest.MonkeyPatch): vector = AnalyticdbVectorBySql.__new__(AnalyticdbVectorBySql) vector.config = AnalyticdbVectorBySqlConfig(**_config_values()) vector.databaseName = "knowledgebase" @@ -326,7 +326,7 @@ def test_initialize_vector_database_handles_existing_database_and_search_config( assert any("CREATE SCHEMA IF NOT EXISTS dify" in call.args[0] for call in worker_cursor.execute.call_args_list) -def test_initialize_vector_database_raises_runtime_error_when_zhparser_fails(monkeypatch): +def test_initialize_vector_database_raises_runtime_error_when_zhparser_fails(monkeypatch: pytest.MonkeyPatch): vector = AnalyticdbVectorBySql.__new__(AnalyticdbVectorBySql) vector.config = AnalyticdbVectorBySqlConfig(**_config_values()) vector.databaseName = "knowledgebase" @@ -353,7 +353,7 @@ def test_initialize_vector_database_raises_runtime_error_when_zhparser_fails(mon worker_connection.rollback.assert_called_once() -def test_create_collection_if_not_exists_creates_table_indexes_and_cache(monkeypatch): +def test_create_collection_if_not_exists_creates_table_indexes_and_cache(monkeypatch: pytest.MonkeyPatch): vector = AnalyticdbVectorBySql.__new__(AnalyticdbVectorBySql) vector.config = AnalyticdbVectorBySqlConfig(**_config_values()) vector._collection_name = "collection" @@ -381,7 +381,7 @@ def test_create_collection_if_not_exists_creates_table_indexes_and_cache(monkeyp sql_module.redis_client.set.assert_called_once() -def test_create_collection_if_not_exists_raises_for_non_existing_error(monkeypatch): +def test_create_collection_if_not_exists_raises_for_non_existing_error(monkeypatch: pytest.MonkeyPatch): vector = AnalyticdbVectorBySql.__new__(AnalyticdbVectorBySql) vector.config = AnalyticdbVectorBySqlConfig(**_config_values()) vector._collection_name = "collection" diff --git a/api/providers/vdb/vdb-baidu/tests/unit_tests/test_baidu_vector.py b/api/providers/vdb/vdb-baidu/tests/unit_tests/test_baidu_vector.py index 851c09f47a..f0dddee3b9 100644 --- a/api/providers/vdb/vdb-baidu/tests/unit_tests/test_baidu_vector.py +++ b/api/providers/vdb/vdb-baidu/tests/unit_tests/test_baidu_vector.py @@ -121,7 +121,7 @@ def _build_fake_pymochow_modules(): @pytest.fixture -def baidu_module(monkeypatch): +def baidu_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_pymochow_modules().items(): monkeypatch.setitem(sys.modules, name, module) import dify_vdb_baidu.baidu_vector as module @@ -254,7 +254,7 @@ def test_search_methods_delegate_to_database_table(baidu_module): assert vector._get_search_res.call_count == 2 -def test_factory_initializes_collection_name_and_index_struct(baidu_module, monkeypatch): +def test_factory_initializes_collection_name_and_index_struct(baidu_module, monkeypatch: pytest.MonkeyPatch): factory = baidu_module.BaiduVectorFactory() dataset = SimpleNamespace(id="dataset-1", index_struct_dict=None, index_struct=None) monkeypatch.setattr(baidu_module.Dataset, "gen_collection_name_by_id", lambda _id: "AUTO_COLLECTION") @@ -279,7 +279,7 @@ def test_factory_initializes_collection_name_and_index_struct(baidu_module, monk assert dataset.index_struct is not None -def test_init_get_type_to_index_struct_and_create_delegate(baidu_module, monkeypatch): +def test_init_get_type_to_index_struct_and_create_delegate(baidu_module, monkeypatch: pytest.MonkeyPatch): init_client = MagicMock(return_value="client") init_database = MagicMock(return_value="database") monkeypatch.setattr(baidu_module.BaiduVector, "_init_client", init_client) @@ -372,7 +372,7 @@ def test_get_search_result_handles_invalid_metadata_json(baidu_module): assert "document_id" not in docs[0].metadata -def test_init_client_constructs_configuration_and_client(baidu_module, monkeypatch): +def test_init_client_constructs_configuration_and_client(baidu_module, monkeypatch: pytest.MonkeyPatch): credentials = MagicMock(return_value="credentials") configuration = MagicMock(return_value="configuration") client_cls = MagicMock(return_value="client") @@ -411,7 +411,7 @@ def test_init_database_raises_for_unknown_create_database_error(baidu_module): vector._init_database() -def test_create_table_handles_cache_and_validation_paths(baidu_module, monkeypatch): +def test_create_table_handles_cache_and_validation_paths(baidu_module, monkeypatch: pytest.MonkeyPatch): vector = baidu_module.BaiduVector.__new__(baidu_module.BaiduVector) vector._collection_name = "collection_1" vector._client_config = SimpleNamespace( @@ -460,7 +460,7 @@ def test_create_table_handles_cache_and_validation_paths(baidu_module, monkeypat vector._wait_for_index_ready.assert_called_once_with(table, 3600) -def test_create_table_raises_for_invalid_index_or_metric(baidu_module, monkeypatch): +def test_create_table_raises_for_invalid_index_or_metric(baidu_module, monkeypatch: pytest.MonkeyPatch): vector = baidu_module.BaiduVector.__new__(baidu_module.BaiduVector) vector._collection_name = "collection_1" vector._db = MagicMock() @@ -493,7 +493,7 @@ def test_create_table_raises_for_invalid_index_or_metric(baidu_module, monkeypat vector._create_table(3) -def test_create_table_raises_timeout_if_table_never_becomes_normal(baidu_module, monkeypatch): +def test_create_table_raises_timeout_if_table_never_becomes_normal(baidu_module, monkeypatch: pytest.MonkeyPatch): vector = baidu_module.BaiduVector.__new__(baidu_module.BaiduVector) vector._collection_name = "collection_1" vector._client_config = SimpleNamespace( @@ -524,7 +524,9 @@ def test_create_table_raises_timeout_if_table_never_becomes_normal(baidu_module, vector._create_table(3) -def test_factory_uses_existing_collection_prefix_when_index_struct_exists(baidu_module, monkeypatch): +def test_factory_uses_existing_collection_prefix_when_index_struct_exists( + baidu_module, monkeypatch: pytest.MonkeyPatch +): factory = baidu_module.BaiduVectorFactory() dataset = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-chroma/tests/unit_tests/test_chroma_vector.py b/api/providers/vdb/vdb-chroma/tests/unit_tests/test_chroma_vector.py index b209c9df96..f18f9a6561 100644 --- a/api/providers/vdb/vdb-chroma/tests/unit_tests/test_chroma_vector.py +++ b/api/providers/vdb/vdb-chroma/tests/unit_tests/test_chroma_vector.py @@ -44,7 +44,7 @@ def _build_fake_chroma_modules(): @pytest.fixture -def chroma_module(monkeypatch): +def chroma_module(monkeypatch: pytest.MonkeyPatch): fake_chroma = _build_fake_chroma_modules() monkeypatch.setitem(sys.modules, "chromadb", fake_chroma) import dify_vdb_chroma.chroma_vector as module @@ -73,7 +73,7 @@ def test_chroma_config_to_params_builds_expected_payload(chroma_module): assert params["settings"].chroma_client_auth_credentials == "credentials" -def test_create_collection_uses_redis_lock_and_cache(chroma_module, monkeypatch): +def test_create_collection_uses_redis_lock_and_cache(chroma_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -173,7 +173,7 @@ def test_search_by_full_text_returns_empty_list(chroma_module): assert vector.search_by_full_text("query") == [] -def test_factory_init_vector_uses_existing_or_generated_collection(chroma_module, monkeypatch): +def test_factory_init_vector_uses_existing_or_generated_collection(chroma_module, monkeypatch: pytest.MonkeyPatch): factory = chroma_module.ChromaVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", index_struct_dict={"vector_store": {"class_prefix": "EXISTING"}}, index_struct=None diff --git a/api/providers/vdb/vdb-clickzetta/tests/unit_tests/test_clickzetta_vector.py b/api/providers/vdb/vdb-clickzetta/tests/unit_tests/test_clickzetta_vector.py index a7473f1b91..4f8395e475 100644 --- a/api/providers/vdb/vdb-clickzetta/tests/unit_tests/test_clickzetta_vector.py +++ b/api/providers/vdb/vdb-clickzetta/tests/unit_tests/test_clickzetta_vector.py @@ -45,7 +45,7 @@ def _build_fake_clickzetta_module(): @pytest.fixture -def clickzetta_module(monkeypatch): +def clickzetta_module(monkeypatch: pytest.MonkeyPatch): monkeypatch.setitem(sys.modules, "clickzetta", _build_fake_clickzetta_module()) import dify_vdb_clickzetta.clickzetta_vector as module @@ -218,7 +218,7 @@ def test_search_by_like_returns_documents_with_default_score(clickzetta_module): assert docs[0].metadata["score"] == 0.5 -def test_factory_initializes_clickzetta_vector(clickzetta_module, monkeypatch): +def test_factory_initializes_clickzetta_vector(clickzetta_module, monkeypatch: pytest.MonkeyPatch): factory = clickzetta_module.ClickzettaVectorFactory() dataset = SimpleNamespace(id="dataset-1") @@ -243,7 +243,7 @@ def test_factory_initializes_clickzetta_vector(clickzetta_module, monkeypatch): assert vector_cls.call_args.kwargs["collection_name"] == "collection" -def test_connection_pool_singleton_and_config_key(clickzetta_module, monkeypatch): +def test_connection_pool_singleton_and_config_key(clickzetta_module, monkeypatch: pytest.MonkeyPatch): clickzetta_module.ClickzettaConnectionPool._instance = None monkeypatch.setattr(clickzetta_module.ClickzettaConnectionPool, "_start_cleanup_thread", MagicMock()) @@ -255,7 +255,7 @@ def test_connection_pool_singleton_and_config_key(clickzetta_module, monkeypatch assert "username:instance:service:workspace:cluster:dify" in key -def test_connection_pool_create_connection_retries_and_configures(clickzetta_module, monkeypatch): +def test_connection_pool_create_connection_retries_and_configures(clickzetta_module, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(clickzetta_module.ClickzettaConnectionPool, "_start_cleanup_thread", MagicMock()) pool = clickzetta_module.ClickzettaConnectionPool() config = _config(clickzetta_module) @@ -274,7 +274,7 @@ def test_connection_pool_create_connection_retries_and_configures(clickzetta_mod pool._configure_connection.assert_called_once_with(connection) -def test_connection_pool_create_connection_raises_after_retries(clickzetta_module, monkeypatch): +def test_connection_pool_create_connection_raises_after_retries(clickzetta_module, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(clickzetta_module.ClickzettaConnectionPool, "_start_cleanup_thread", MagicMock()) pool = clickzetta_module.ClickzettaConnectionPool() config = _config(clickzetta_module) @@ -318,7 +318,7 @@ def test_connection_pool_configure_connection_swallows_errors(clickzetta_module) monkeypatch.undo() -def test_connection_pool_get_return_cleanup_and_shutdown(clickzetta_module, monkeypatch): +def test_connection_pool_get_return_cleanup_and_shutdown(clickzetta_module, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(clickzetta_module.ClickzettaConnectionPool, "_start_cleanup_thread", MagicMock()) pool = clickzetta_module.ClickzettaConnectionPool() config = _config(clickzetta_module) @@ -360,7 +360,7 @@ def test_connection_pool_get_return_cleanup_and_shutdown(clickzetta_module, monk assert pool._shutdown is True -def test_connection_pool_start_cleanup_thread_runs_worker_once(clickzetta_module, monkeypatch): +def test_connection_pool_start_cleanup_thread_runs_worker_once(clickzetta_module, monkeypatch: pytest.MonkeyPatch): pool = clickzetta_module.ClickzettaConnectionPool.__new__(clickzetta_module.ClickzettaConnectionPool) pool._shutdown = False pool._cleanup_expired_connections = MagicMock(side_effect=lambda: setattr(pool, "_shutdown", True)) @@ -384,7 +384,7 @@ def test_connection_pool_start_cleanup_thread_runs_worker_once(clickzetta_module pool._cleanup_expired_connections.assert_called_once() -def test_vector_init_connection_context_and_helpers(clickzetta_module, monkeypatch): +def test_vector_init_connection_context_and_helpers(clickzetta_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() pool.get_connection.return_value = "conn" monkeypatch.setattr(clickzetta_module.ClickzettaConnectionPool, "get_instance", MagicMock(return_value=pool)) @@ -405,7 +405,7 @@ def test_vector_init_connection_context_and_helpers(clickzetta_module, monkeypat assert vector._ensure_connection() == "conn" -def test_write_queue_initialization_worker_and_execute_write(clickzetta_module, monkeypatch): +def test_write_queue_initialization_worker_and_execute_write(clickzetta_module, monkeypatch: pytest.MonkeyPatch): class _Thread: def __init__(self, target, daemon): self.target = target @@ -579,7 +579,7 @@ def test_create_inverted_index_branches(clickzetta_module): vector._create_inverted_index(cursor) -def test_add_texts_batches_and_insert_batch_behaviors(clickzetta_module, monkeypatch): +def test_add_texts_batches_and_insert_batch_behaviors(clickzetta_module, monkeypatch: pytest.MonkeyPatch): vector = clickzetta_module.ClickzettaVector.__new__(clickzetta_module.ClickzettaVector) vector._config = _config(clickzetta_module) vector._config.batch_size = 2 @@ -811,7 +811,7 @@ def test_clickzetta_pool_cleanup_and_shutdown_edge_paths(clickzetta_module): assert pool._shutdown is True -def test_clickzetta_pool_cleanup_thread_and_worker_exception_paths(clickzetta_module, monkeypatch): +def test_clickzetta_pool_cleanup_thread_and_worker_exception_paths(clickzetta_module, monkeypatch: pytest.MonkeyPatch): pool = clickzetta_module.ClickzettaConnectionPool.__new__(clickzetta_module.ClickzettaConnectionPool) pool._shutdown = False diff --git a/api/providers/vdb/vdb-couchbase/tests/unit_tests/test_couchbase_vector.py b/api/providers/vdb/vdb-couchbase/tests/unit_tests/test_couchbase_vector.py index 7e5c40b8f2..d474b566d3 100644 --- a/api/providers/vdb/vdb-couchbase/tests/unit_tests/test_couchbase_vector.py +++ b/api/providers/vdb/vdb-couchbase/tests/unit_tests/test_couchbase_vector.py @@ -150,7 +150,7 @@ def _build_fake_couchbase_modules(): @pytest.fixture -def couchbase_module(monkeypatch): +def couchbase_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_couchbase_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -194,7 +194,7 @@ def test_init_sets_cluster_handles(couchbase_module): vector._cluster.wait_until_ready.assert_called_once() -def test_create_and_create_collection_branches(couchbase_module, monkeypatch): +def test_create_and_create_collection_branches(couchbase_module, monkeypatch: pytest.MonkeyPatch): vector = couchbase_module.CouchbaseVector.__new__(couchbase_module.CouchbaseVector) vector._collection_name = "collection_1" vector._client_config = _config(couchbase_module) @@ -319,7 +319,7 @@ def test_search_methods_and_format_metadata(couchbase_module): assert vector._format_metadata({"metadata.a": 1, "plain": 2}) == {"a": 1, "plain": 2} -def test_delete_collection_and_factory(couchbase_module, monkeypatch): +def test_delete_collection_and_factory(couchbase_module, monkeypatch: pytest.MonkeyPatch): vector = couchbase_module.CouchbaseVector("collection_1", _config(couchbase_module)) scopes = [ SimpleNamespace(collections=[SimpleNamespace(name="other")]), diff --git a/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_ja_vector.py b/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_ja_vector.py index f81ed6beea..91cc2e0fdb 100644 --- a/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_ja_vector.py +++ b/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_ja_vector.py @@ -28,7 +28,7 @@ def _build_fake_elasticsearch_modules(): @pytest.fixture -def elasticsearch_ja_module(monkeypatch): +def elasticsearch_ja_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_elasticsearch_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -39,7 +39,7 @@ def elasticsearch_ja_module(monkeypatch): return importlib.reload(ja_module) -def test_create_collection_cache_hit(elasticsearch_ja_module, monkeypatch): +def test_create_collection_cache_hit(elasticsearch_ja_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -57,7 +57,7 @@ def test_create_collection_cache_hit(elasticsearch_ja_module, monkeypatch): elasticsearch_ja_module.redis_client.set.assert_not_called() -def test_create_collection_create_and_exists_paths(elasticsearch_ja_module, monkeypatch): +def test_create_collection_create_and_exists_paths(elasticsearch_ja_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -87,7 +87,7 @@ def test_create_collection_create_and_exists_paths(elasticsearch_ja_module, monk elasticsearch_ja_module.redis_client.set.assert_called_once() -def test_ja_factory_uses_existing_or_generated_collection(elasticsearch_ja_module, monkeypatch): +def test_ja_factory_uses_existing_or_generated_collection(elasticsearch_ja_module, monkeypatch: pytest.MonkeyPatch): factory = elasticsearch_ja_module.ElasticSearchJaVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_vector.py b/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_vector.py index 48f1f6dc26..d54c105a0f 100644 --- a/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_vector.py +++ b/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_vector.py @@ -38,7 +38,7 @@ def _build_fake_elasticsearch_modules(): @pytest.fixture -def elasticsearch_module(monkeypatch): +def elasticsearch_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_elasticsearch_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -287,7 +287,7 @@ def test_search_by_vector_and_full_text(elasticsearch_module): assert "bool" in query -def test_create_and_create_collection_paths(elasticsearch_module, monkeypatch): +def test_create_and_create_collection_paths(elasticsearch_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -331,7 +331,7 @@ def test_create_and_create_collection_paths(elasticsearch_module, monkeypatch): elasticsearch_module.redis_client.set.assert_called_once() -def test_elasticsearch_factory_branches(elasticsearch_module, monkeypatch): +def test_elasticsearch_factory_branches(elasticsearch_module, monkeypatch: pytest.MonkeyPatch): factory = elasticsearch_module.ElasticSearchVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-hologres/tests/unit_tests/test_hologres_vector.py b/api/providers/vdb/vdb-hologres/tests/unit_tests/test_hologres_vector.py index f9a557ecce..8b197662e3 100644 --- a/api/providers/vdb/vdb-hologres/tests/unit_tests/test_hologres_vector.py +++ b/api/providers/vdb/vdb-hologres/tests/unit_tests/test_hologres_vector.py @@ -38,7 +38,7 @@ def _build_fake_hologres_modules(): @pytest.fixture -def hologres_module(monkeypatch): +def hologres_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_hologres_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -266,7 +266,7 @@ def test_delete_handles_existing_and_missing_tables(hologres_module): vector._client.drop_table.assert_called_once_with(vector.table_name) -def test_create_collection_returns_early_when_cache_hits(hologres_module, monkeypatch): +def test_create_collection_returns_early_when_cache_hits(hologres_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = False @@ -281,7 +281,7 @@ def test_create_collection_returns_early_when_cache_hits(hologres_module, monkey hologres_module.redis_client.set.assert_not_called() -def test_create_collection_creates_table_and_indexes(hologres_module, monkeypatch): +def test_create_collection_creates_table_and_indexes(hologres_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = False @@ -313,7 +313,7 @@ def test_create_collection_creates_table_and_indexes(hologres_module, monkeypatc hologres_module.redis_client.set.assert_called_once() -def test_create_collection_raises_when_table_never_becomes_ready(hologres_module, monkeypatch): +def test_create_collection_raises_when_table_never_becomes_ready(hologres_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = False @@ -331,7 +331,7 @@ def test_create_collection_raises_when_table_never_becomes_ready(hologres_module hologres_module.redis_client.set.assert_not_called() -def test_hologres_factory_uses_existing_or_generated_collection(hologres_module, monkeypatch): +def test_hologres_factory_uses_existing_or_generated_collection(hologres_module, monkeypatch: pytest.MonkeyPatch): factory = hologres_module.HologresVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-huawei-cloud/tests/unit_tests/test_huawei_cloud_vector.py b/api/providers/vdb/vdb-huawei-cloud/tests/unit_tests/test_huawei_cloud_vector.py index ba3f14912b..a1617b6d43 100644 --- a/api/providers/vdb/vdb-huawei-cloud/tests/unit_tests/test_huawei_cloud_vector.py +++ b/api/providers/vdb/vdb-huawei-cloud/tests/unit_tests/test_huawei_cloud_vector.py @@ -29,7 +29,7 @@ def _build_fake_elasticsearch_modules(): @pytest.fixture -def huawei_module(monkeypatch): +def huawei_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_elasticsearch_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -155,7 +155,7 @@ def test_search_by_vector_and_full_text(huawei_module): assert docs[0].page_content == "text-hit" -def test_search_by_vector_skips_hits_without_metadata(huawei_module, monkeypatch): +def test_search_by_vector_skips_hits_without_metadata(huawei_module, monkeypatch: pytest.MonkeyPatch): class FakeDocument: def __init__(self, page_content, vector, metadata): self.page_content = page_content @@ -185,7 +185,7 @@ def test_search_by_vector_skips_hits_without_metadata(huawei_module, monkeypatch assert docs == [] -def test_create_and_create_collection_paths(huawei_module, monkeypatch): +def test_create_and_create_collection_paths(huawei_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -218,7 +218,7 @@ def test_create_and_create_collection_paths(huawei_module, monkeypatch): huawei_module.redis_client.set.assert_called_once() -def test_huawei_factory_branches(huawei_module, monkeypatch): +def test_huawei_factory_branches(huawei_module, monkeypatch: pytest.MonkeyPatch): factory = huawei_module.HuaweiCloudVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-iris/tests/unit_tests/test_iris_vector.py b/api/providers/vdb/vdb-iris/tests/unit_tests/test_iris_vector.py index 8c038e82b9..b4ea6ea6c1 100644 --- a/api/providers/vdb/vdb-iris/tests/unit_tests/test_iris_vector.py +++ b/api/providers/vdb/vdb-iris/tests/unit_tests/test_iris_vector.py @@ -23,7 +23,7 @@ def _build_fake_iris_module(): @pytest.fixture -def iris_module(monkeypatch): +def iris_module(monkeypatch: pytest.MonkeyPatch): monkeypatch.setitem(sys.modules, "iris", _build_fake_iris_module()) import dify_vdb_iris.iris_vector as module @@ -249,7 +249,7 @@ def test_iris_vector_init_get_cursor_and_create(iris_module): vector._create_collection.assert_called_once_with(2) -def test_iris_vector_crud_and_vector_search(iris_module, monkeypatch): +def test_iris_vector_crud_and_vector_search(iris_module, monkeypatch: pytest.MonkeyPatch): with patch.object(iris_module, "get_iris_pool", return_value=MagicMock()): vector = iris_module.IrisVector("collection", _config(iris_module)) @@ -297,7 +297,7 @@ def test_iris_vector_crud_and_vector_search(iris_module, monkeypatch): assert docs[0].metadata["score"] == pytest.approx(0.9) -def test_iris_vector_full_text_search_paths(iris_module, monkeypatch): +def test_iris_vector_full_text_search_paths(iris_module, monkeypatch: pytest.MonkeyPatch): cfg = _config(iris_module, IRIS_TEXT_INDEX=True) with patch.object(iris_module, "get_iris_pool", return_value=MagicMock()): vector = iris_module.IrisVector("collection", cfg) @@ -344,7 +344,7 @@ def test_iris_vector_full_text_search_paths(iris_module, monkeypatch): assert vector_like.search_by_full_text("100%", top_k=1) == [] -def test_iris_vector_delete_create_collection_and_factory(iris_module, monkeypatch): +def test_iris_vector_delete_create_collection_and_factory(iris_module, monkeypatch: pytest.MonkeyPatch): with patch.object(iris_module, "get_iris_pool", return_value=MagicMock()): vector = iris_module.IrisVector("collection", _config(iris_module, IRIS_TEXT_INDEX=True)) diff --git a/api/providers/vdb/vdb-lindorm/tests/unit_tests/test_lindorm_vector.py b/api/providers/vdb/vdb-lindorm/tests/unit_tests/test_lindorm_vector.py index 238145c1d6..4a408d1b10 100644 --- a/api/providers/vdb/vdb-lindorm/tests/unit_tests/test_lindorm_vector.py +++ b/api/providers/vdb/vdb-lindorm/tests/unit_tests/test_lindorm_vector.py @@ -47,7 +47,7 @@ def _build_fake_opensearch_modules(): @pytest.fixture -def lindorm_module(monkeypatch): +def lindorm_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_opensearch_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -100,7 +100,7 @@ def test_to_opensearch_params_and_init(lindorm_module): assert vector_ugc._routing == "route" -def test_create_refresh_and_add_texts_success(lindorm_module, monkeypatch): +def test_create_refresh_and_add_texts_success(lindorm_module, monkeypatch: pytest.MonkeyPatch): vector = lindorm_module.LindormVectorStore( "collection", _config(lindorm_module), using_ugc=True, routing_value="route" ) @@ -301,7 +301,7 @@ def test_search_by_full_text_success_and_error(lindorm_module): vector.search_by_full_text("hello") -def test_create_collection_paths(lindorm_module, monkeypatch): +def test_create_collection_paths(lindorm_module, monkeypatch: pytest.MonkeyPatch): vector = lindorm_module.LindormVectorStore("collection", _config(lindorm_module), using_ugc=False) with pytest.raises(ValueError, match="cannot be empty"): @@ -331,7 +331,7 @@ def test_create_collection_paths(lindorm_module, monkeypatch): vector._client.indices.create.assert_not_called() -def test_lindorm_factory_branches(lindorm_module, monkeypatch): +def test_lindorm_factory_branches(lindorm_module, monkeypatch: pytest.MonkeyPatch): factory = lindorm_module.LindormVectorStoreFactory() monkeypatch.setattr(lindorm_module.dify_config, "LINDORM_URL", "http://localhost:9200") diff --git a/api/providers/vdb/vdb-matrixone/tests/unit_tests/test_matrixone_vector.py b/api/providers/vdb/vdb-matrixone/tests/unit_tests/test_matrixone_vector.py index c22f4304e5..762ec330b2 100644 --- a/api/providers/vdb/vdb-matrixone/tests/unit_tests/test_matrixone_vector.py +++ b/api/providers/vdb/vdb-matrixone/tests/unit_tests/test_matrixone_vector.py @@ -32,7 +32,7 @@ def _build_fake_mo_vector_modules(): @pytest.fixture -def matrixone_module(monkeypatch): +def matrixone_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_mo_vector_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -70,7 +70,7 @@ def test_matrixone_config_validation(matrixone_module, field, value, message): matrixone_module.MatrixoneConfig.model_validate(values) -def test_get_client_creates_full_text_index_when_cache_misses(matrixone_module, monkeypatch): +def test_get_client_creates_full_text_index_when_cache_misses(matrixone_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -86,7 +86,7 @@ def test_get_client_creates_full_text_index_when_cache_misses(matrixone_module, matrixone_module.redis_client.set.assert_called_once() -def test_get_client_skips_index_creation_when_cache_hits(matrixone_module, monkeypatch): +def test_get_client_skips_index_creation_when_cache_hits(matrixone_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -146,7 +146,7 @@ def test_get_type_and_create_delegate_to_add_texts(matrixone_module): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_get_client_handles_full_text_index_creation_error(matrixone_module, monkeypatch): +def test_get_client_handles_full_text_index_creation_error(matrixone_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -165,7 +165,7 @@ def test_get_client_handles_full_text_index_creation_error(matrixone_module, mon matrixone_module.redis_client.set.assert_not_called() -def test_add_texts_generates_ids_and_inserts(matrixone_module, monkeypatch): +def test_add_texts_generates_ids_and_inserts(matrixone_module, monkeypatch: pytest.MonkeyPatch): vector = matrixone_module.MatrixoneVector("collection_1", _valid_config(matrixone_module)) vector.client = MagicMock() monkeypatch.setattr(matrixone_module.uuid, "uuid4", lambda: "generated-uuid") @@ -224,7 +224,7 @@ def test_search_by_vector_builds_documents(matrixone_module): assert vector.client.query.call_args.kwargs["filter"] == {"document_id": {"$in": ["d-1"]}} -def test_matrixone_factory_uses_existing_or_generated_collection(matrixone_module, monkeypatch): +def test_matrixone_factory_uses_existing_or_generated_collection(matrixone_module, monkeypatch: pytest.MonkeyPatch): factory = matrixone_module.MatrixoneVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-milvus/tests/unit_tests/test_milvus.py b/api/providers/vdb/vdb-milvus/tests/unit_tests/test_milvus.py index 36c0ed8f6f..730ff9f296 100644 --- a/api/providers/vdb/vdb-milvus/tests/unit_tests/test_milvus.py +++ b/api/providers/vdb/vdb-milvus/tests/unit_tests/test_milvus.py @@ -99,7 +99,7 @@ def _build_fake_pymilvus_modules(): @pytest.fixture -def milvus_module(monkeypatch): +def milvus_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_pymilvus_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -327,7 +327,7 @@ def test_process_search_results_and_search_methods(milvus_module): assert "document_id" in vector._client.search.call_args.kwargs["filter"] -def test_create_collection_cache_and_existing_collection(milvus_module, monkeypatch): +def test_create_collection_cache_and_existing_collection(milvus_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -351,7 +351,7 @@ def test_create_collection_cache_and_existing_collection(milvus_module, monkeypa milvus_module.redis_client.set.assert_called() -def test_create_collection_builds_schema_and_indexes(milvus_module, monkeypatch): +def test_create_collection_builds_schema_and_indexes(milvus_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -385,7 +385,7 @@ def test_create_collection_builds_schema_and_indexes(milvus_module, monkeypatch) assert call_kwargs["consistency_level"] == "Session" -def test_factory_initializes_milvus_vector(milvus_module, monkeypatch): +def test_factory_initializes_milvus_vector(milvus_module, monkeypatch: pytest.MonkeyPatch): factory = milvus_module.MilvusVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-myscale/tests/unit_tests/test_myscale_vector.py b/api/providers/vdb/vdb-myscale/tests/unit_tests/test_myscale_vector.py index 228ea92639..900c75fdab 100644 --- a/api/providers/vdb/vdb-myscale/tests/unit_tests/test_myscale_vector.py +++ b/api/providers/vdb/vdb-myscale/tests/unit_tests/test_myscale_vector.py @@ -38,7 +38,7 @@ def _build_fake_clickhouse_connect_module(): @pytest.fixture -def myscale_module(monkeypatch): +def myscale_module(monkeypatch: pytest.MonkeyPatch): fake_module = _build_fake_clickhouse_connect_module() monkeypatch.setitem(sys.modules, "clickhouse_connect", fake_module) @@ -90,7 +90,7 @@ def test_delete_by_ids_short_circuits_on_empty_list(myscale_module): vector._client.command.assert_not_called() -def test_factory_initializes_lower_case_collection_name(myscale_module, monkeypatch): +def test_factory_initializes_lower_case_collection_name(myscale_module, monkeypatch: pytest.MonkeyPatch): factory = myscale_module.MyScaleVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", @@ -160,7 +160,7 @@ def test_create_collection_builds_expected_sql(myscale_module): assert "INDEX text_idx text TYPE fts('tokenizer=unicode')" in sql -def test_add_texts_inserts_rows_and_returns_ids(myscale_module, monkeypatch): +def test_add_texts_inserts_rows_and_returns_ids(myscale_module, monkeypatch: pytest.MonkeyPatch): vector = myscale_module.MyScaleVector("collection_1", _config(myscale_module)) monkeypatch.setattr(myscale_module.uuid, "uuid4", lambda: "generated-uuid") docs = [ diff --git a/api/providers/vdb/vdb-oceanbase/tests/unit_tests/test_oceanbase_vector.py b/api/providers/vdb/vdb-oceanbase/tests/unit_tests/test_oceanbase_vector.py index 31f9ff3e56..36393cc486 100644 --- a/api/providers/vdb/vdb-oceanbase/tests/unit_tests/test_oceanbase_vector.py +++ b/api/providers/vdb/vdb-oceanbase/tests/unit_tests/test_oceanbase_vector.py @@ -53,7 +53,7 @@ def _build_fake_pyobvector_module(): @pytest.fixture -def oceanbase_module(monkeypatch): +def oceanbase_module(monkeypatch: pytest.MonkeyPatch): monkeypatch.setitem(sys.modules, "pyobvector", _build_fake_pyobvector_module()) import dify_vdb_oceanbase.oceanbase_vector as module @@ -208,7 +208,7 @@ def test_create_delegates_to_collection_and_insert(oceanbase_module): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_create_collection_cache_and_existing_table_short_circuits(oceanbase_module, monkeypatch): +def test_create_collection_cache_and_existing_table_short_circuits(oceanbase_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -234,7 +234,7 @@ def test_create_collection_cache_and_existing_table_short_circuits(oceanbase_mod vector.delete.assert_not_called() -def test_create_collection_happy_path_with_hybrid_and_index(oceanbase_module, monkeypatch): +def test_create_collection_happy_path_with_hybrid_and_index(oceanbase_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -271,7 +271,7 @@ def test_create_collection_happy_path_with_hybrid_and_index(oceanbase_module, mo oceanbase_module.redis_client.set.assert_called_once() -def test_create_collection_error_paths(oceanbase_module, monkeypatch): +def test_create_collection_error_paths(oceanbase_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -308,7 +308,7 @@ def test_create_collection_error_paths(oceanbase_module, monkeypatch): vector._create_collection() -def test_create_collection_fulltext_and_metadata_index_exceptions(oceanbase_module, monkeypatch): +def test_create_collection_fulltext_and_metadata_index_exceptions(oceanbase_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -517,7 +517,7 @@ def test_delete_success_and_exception(oceanbase_module): vector.delete() -def test_oceanbase_factory_uses_existing_or_generated_collection(oceanbase_module, monkeypatch): +def test_oceanbase_factory_uses_existing_or_generated_collection(oceanbase_module, monkeypatch: pytest.MonkeyPatch): factory = oceanbase_module.OceanBaseVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-opengauss/tests/unit_tests/test_opengauss.py b/api/providers/vdb/vdb-opengauss/tests/unit_tests/test_opengauss.py index 09abd625fc..57c9b14d9f 100644 --- a/api/providers/vdb/vdb-opengauss/tests/unit_tests/test_opengauss.py +++ b/api/providers/vdb/vdb-opengauss/tests/unit_tests/test_opengauss.py @@ -37,7 +37,7 @@ def _build_fake_psycopg2_modules(): @pytest.fixture -def opengauss_module(monkeypatch): +def opengauss_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_psycopg2_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -88,7 +88,7 @@ def test_opengauss_config_validation_rejects_min_greater_than_max(opengauss_modu opengauss_module.OpenGaussConfig.model_validate(values) -def test_init_sets_table_name_and_vector_type(opengauss_module, monkeypatch): +def test_init_sets_table_name_and_vector_type(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) @@ -99,7 +99,7 @@ def test_init_sets_table_name_and_vector_type(opengauss_module, monkeypatch): assert vector.pool is pool -def test_create_index_with_pq_executes_pq_sql(opengauss_module, monkeypatch): +def test_create_index_with_pq_executes_pq_sql(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) @@ -126,7 +126,7 @@ def test_create_index_with_pq_executes_pq_sql(opengauss_module, monkeypatch): opengauss_module.redis_client.set.assert_called_once() -def test_create_index_skips_index_sql_for_large_dimension(opengauss_module, monkeypatch): +def test_create_index_skips_index_sql_for_large_dimension(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) @@ -158,7 +158,7 @@ def test_search_by_vector_validates_top_k(opengauss_module): vector.search_by_vector([0.1, 0.2], top_k=0) -def test_delete_by_ids_short_circuits_with_empty_input(opengauss_module, monkeypatch): +def test_delete_by_ids_short_circuits_with_empty_input(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) vector = opengauss_module.OpenGauss("collection_1", _config(opengauss_module)) @@ -200,7 +200,7 @@ def test_create_calls_collection_insert_and_index(opengauss_module): vector._create_index.assert_called_once_with(2) -def test_create_index_returns_early_on_cache_hit(opengauss_module, monkeypatch): +def test_create_index_returns_early_on_cache_hit(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) @@ -220,7 +220,7 @@ def test_create_index_returns_early_on_cache_hit(opengauss_module, monkeypatch): opengauss_module.redis_client.set.assert_not_called() -def test_create_index_without_pq_executes_standard_index_sql(opengauss_module, monkeypatch): +def test_create_index_without_pq_executes_standard_index_sql(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) @@ -245,7 +245,7 @@ def test_create_index_without_pq_executes_standard_index_sql(opengauss_module, m assert any("embedding_cosine_embedding_collection_1_idx" in query for query in sql) -def test_add_texts_uses_execute_values(opengauss_module, monkeypatch): +def test_add_texts_uses_execute_values(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) vector = opengauss_module.OpenGauss("collection_1", _config(opengauss_module)) @@ -342,7 +342,7 @@ def test_search_by_full_text_validates_top_k(opengauss_module): vector.search_by_full_text("query", top_k=0) -def test_create_collection_cache_and_create_path(opengauss_module, monkeypatch): +def test_create_collection_cache_and_create_path(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) lock = MagicMock() @@ -370,7 +370,7 @@ def test_create_collection_cache_and_create_path(opengauss_module, monkeypatch): opengauss_module.redis_client.set.assert_called_once() -def test_opengauss_factory_uses_existing_or_generated_collection(opengauss_module, monkeypatch): +def test_opengauss_factory_uses_existing_or_generated_collection(opengauss_module, monkeypatch: pytest.MonkeyPatch): factory = opengauss_module.OpenGaussFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-opensearch/src/dify_vdb_opensearch/opensearch_vector.py b/api/providers/vdb/vdb-opensearch/src/dify_vdb_opensearch/opensearch_vector.py index 843c495d82..d6998f6672 100644 --- a/api/providers/vdb/vdb-opensearch/src/dify_vdb_opensearch/opensearch_vector.py +++ b/api/providers/vdb/vdb-opensearch/src/dify_vdb_opensearch/opensearch_vector.py @@ -81,14 +81,15 @@ class OpenSearchConfig(BaseModel): pool_maxsize=20, ) - if self.auth_method == "basic": - logger.info("Using basic authentication for OpenSearch Vector DB") + match self.auth_method: + case AuthMethod.BASIC: + logger.info("Using basic authentication for OpenSearch Vector DB") - params["http_auth"] = (self.user, self.password) - elif self.auth_method == "aws_managed_iam": - logger.info("Using AWS managed IAM role for OpenSearch Vector DB") + params["http_auth"] = (self.user, self.password) + case AuthMethod.AWS_MANAGED_IAM: + logger.info("Using AWS managed IAM role for OpenSearch Vector DB") - params["http_auth"] = self.create_aws_managed_iam_auth() + params["http_auth"] = self.create_aws_managed_iam_auth() return params diff --git a/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch.py b/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch.py index f2ed7cb6fb..b2b004a4de 100644 --- a/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch.py +++ b/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch.py @@ -59,7 +59,7 @@ def _build_fake_opensearch_modules(): @pytest.fixture -def opensearch_module(monkeypatch): +def opensearch_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_opensearch_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -95,7 +95,7 @@ class TestOpenSearchConfig: assert params["connection_class"].__name__ == "Urllib3HttpConnection" assert params["http_auth"] == ("admin", "password") - def test_to_opensearch_params_with_aws_managed_iam(self, opensearch_module, monkeypatch): + def test_to_opensearch_params_with_aws_managed_iam(self, opensearch_module, monkeypatch: pytest.MonkeyPatch): class _Session: def get_credentials(self): return "creds" diff --git a/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch_vector.py b/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch_vector.py index 1c2921f85b..80bf20e820 100644 --- a/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch_vector.py +++ b/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch_vector.py @@ -58,7 +58,7 @@ def _build_fake_opensearch_modules(): @pytest.fixture -def opensearch_module(monkeypatch): +def opensearch_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_opensearch_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -116,7 +116,7 @@ def test_config_validation_for_aws_auth_and_https_fields(opensearch_module): opensearch_module.OpenSearchConfig.model_validate(values) -def test_create_aws_managed_iam_auth(opensearch_module, monkeypatch): +def test_create_aws_managed_iam_auth(opensearch_module, monkeypatch: pytest.MonkeyPatch): class _Session: def get_credentials(self): return "creds" @@ -167,7 +167,7 @@ def test_init_and_create_delegate_calls(opensearch_module): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_add_texts_supports_regular_and_aoss_clients(opensearch_module, monkeypatch): +def test_add_texts_supports_regular_and_aoss_clients(opensearch_module, monkeypatch: pytest.MonkeyPatch): vector = opensearch_module.OpenSearchVector("Collection_1", _config(opensearch_module, aws_service="es")) docs = [ Document(page_content="a", metadata={"doc_id": "1"}), @@ -308,7 +308,7 @@ def test_search_by_full_text_and_filters(opensearch_module): assert query["query"]["bool"]["filter"] == [{"terms": {"metadata.document_id": ["d-1"]}}] -def test_create_collection_cache_and_create_path(opensearch_module, monkeypatch): +def test_create_collection_cache_and_create_path(opensearch_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -331,7 +331,7 @@ def test_create_collection_cache_and_create_path(opensearch_module, monkeypatch) opensearch_module.redis_client.set.assert_called() -def test_opensearch_factory_initializes_expected_collection_name(opensearch_module, monkeypatch): +def test_opensearch_factory_initializes_expected_collection_name(opensearch_module, monkeypatch: pytest.MonkeyPatch): factory = opensearch_module.OpenSearchVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-oracle/tests/unit_tests/test_oraclevector.py b/api/providers/vdb/vdb-oracle/tests/unit_tests/test_oraclevector.py index 678cf876b0..46027c7e44 100644 --- a/api/providers/vdb/vdb-oracle/tests/unit_tests/test_oraclevector.py +++ b/api/providers/vdb/vdb-oracle/tests/unit_tests/test_oraclevector.py @@ -51,7 +51,7 @@ def _connection_with_cursor(cursor): @pytest.fixture -def oracle_module(monkeypatch): +def oracle_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_oracle_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -94,7 +94,7 @@ def test_oracle_config_validation_autonomous_requirements(oracle_module): ) -def test_init_and_get_type(oracle_module, monkeypatch): +def test_init_and_get_type(oracle_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(oracle_module.oracledb, "create_pool", MagicMock(return_value=pool)) vector = oracle_module.OracleVector("collection_1", _config(oracle_module)) @@ -139,7 +139,7 @@ def test_numpy_converters_and_type_handlers(oracle_module): assert out_float64.dtype == numpy.float64 -def test_get_connection_supports_standard_and_autonomous_paths(oracle_module, monkeypatch): +def test_get_connection_supports_standard_and_autonomous_paths(oracle_module, monkeypatch: pytest.MonkeyPatch): connect = MagicMock(return_value="connection") monkeypatch.setattr(oracle_module.oracledb, "connect", connect) @@ -173,7 +173,7 @@ def test_create_delegates_collection_and_insert(oracle_module): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_add_texts_inserts_and_logs_on_failures(oracle_module, monkeypatch): +def test_add_texts_inserts_and_logs_on_failures(oracle_module, monkeypatch: pytest.MonkeyPatch): vector = oracle_module.OracleVector.__new__(oracle_module.OracleVector) vector.table_name = "embedding_collection_1" vector.input_type_handler = MagicMock() @@ -279,7 +279,7 @@ def _fake_nltk_module(*, missing_data=False): return nltk, nltk_corpus -def test_search_by_full_text_chinese_and_english_paths(oracle_module, monkeypatch): +def test_search_by_full_text_chinese_and_english_paths(oracle_module, monkeypatch: pytest.MonkeyPatch): vector = oracle_module.OracleVector.__new__(oracle_module.OracleVector) vector.table_name = "embedding_collection_1" @@ -305,7 +305,7 @@ def test_search_by_full_text_chinese_and_english_paths(oracle_module, monkeypatc assert "doc_id_0" in en_params -def test_search_by_full_text_empty_query_and_missing_nltk(oracle_module, monkeypatch): +def test_search_by_full_text_empty_query_and_missing_nltk(oracle_module, monkeypatch: pytest.MonkeyPatch): vector = oracle_module.OracleVector.__new__(oracle_module.OracleVector) vector.table_name = "embedding_collection_1" vector._get_connection = MagicMock() @@ -320,7 +320,7 @@ def test_search_by_full_text_empty_query_and_missing_nltk(oracle_module, monkeyp vector.search_by_full_text("english query") -def test_create_collection_cache_and_execute_path(oracle_module, monkeypatch): +def test_create_collection_cache_and_execute_path(oracle_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -346,7 +346,9 @@ def test_create_collection_cache_and_execute_path(oracle_module, monkeypatch): oracle_module.redis_client.set.assert_called_once() -def test_oracle_factory_init_vector_uses_existing_or_generated_collection(oracle_module, monkeypatch): +def test_oracle_factory_init_vector_uses_existing_or_generated_collection( + oracle_module, monkeypatch: pytest.MonkeyPatch +): factory = oracle_module.OracleVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-pgvecto-rs/tests/unit_tests/test_pgvecto_rs.py b/api/providers/vdb/vdb-pgvecto-rs/tests/unit_tests/test_pgvecto_rs.py index c3291f7f12..1841e88139 100644 --- a/api/providers/vdb/vdb-pgvecto-rs/tests/unit_tests/test_pgvecto_rs.py +++ b/api/providers/vdb/vdb-pgvecto-rs/tests/unit_tests/test_pgvecto_rs.py @@ -79,7 +79,7 @@ def _patch_both(monkeypatch, module, calls, execute_results=None): @pytest.fixture -def pgvecto_module(monkeypatch): +def pgvecto_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_pgvecto_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -126,7 +126,7 @@ def test_collection_base_has_expected_annotations(pgvecto_module): assert {"id", "text", "meta", "vector"} <= set(annotations) -def test_init_get_type_and_create_delegate(pgvecto_module, monkeypatch): +def test_init_get_type_and_create_delegate(pgvecto_module, monkeypatch: pytest.MonkeyPatch): module, _ = pgvecto_module session_calls = [] monkeypatch.setattr(module, "create_engine", MagicMock(return_value="engine")) @@ -145,7 +145,7 @@ def test_init_get_type_and_create_delegate(pgvecto_module, monkeypatch): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_create_collection_cache_and_sql_execution(pgvecto_module, monkeypatch): +def test_create_collection_cache_and_sql_execution(pgvecto_module, monkeypatch: pytest.MonkeyPatch): module, _ = pgvecto_module session_calls = [] monkeypatch.setattr(module, "create_engine", MagicMock(return_value="engine")) @@ -169,7 +169,7 @@ def test_create_collection_cache_and_sql_execution(pgvecto_module, monkeypatch): module.redis_client.set.assert_called() -def test_add_texts_get_ids_and_delete_methods(pgvecto_module, monkeypatch): +def test_add_texts_get_ids_and_delete_methods(pgvecto_module, monkeypatch: pytest.MonkeyPatch): module, _ = pgvecto_module init_calls = [] runtime_calls = [] @@ -241,7 +241,7 @@ def test_add_texts_get_ids_and_delete_methods(pgvecto_module, monkeypatch): assert any("DROP TABLE IF EXISTS collection_1" in str(args[0]) for args, _ in runtime_calls) -def test_text_exists_search_and_full_text(pgvecto_module, monkeypatch): +def test_text_exists_search_and_full_text(pgvecto_module, monkeypatch: pytest.MonkeyPatch): module, _ = pgvecto_module init_calls = [] monkeypatch.setattr(module, "create_engine", MagicMock(return_value="engine")) @@ -313,7 +313,7 @@ def test_text_exists_search_and_full_text(pgvecto_module, monkeypatch): assert vector.search_by_full_text("hello") == [] -def test_factory_uses_existing_or_generated_collection(pgvecto_module, monkeypatch): +def test_factory_uses_existing_or_generated_collection(pgvecto_module, monkeypatch: pytest.MonkeyPatch): module, _ = pgvecto_module factory = module.PGVectoRSFactory() dataset_with_index = SimpleNamespace( diff --git a/api/providers/vdb/vdb-pgvector/tests/unit_tests/test_pgvector.py b/api/providers/vdb/vdb-pgvector/tests/unit_tests/test_pgvector.py index 99a6e00c16..38e472df63 100644 --- a/api/providers/vdb/vdb-pgvector/tests/unit_tests/test_pgvector.py +++ b/api/providers/vdb/vdb-pgvector/tests/unit_tests/test_pgvector.py @@ -336,7 +336,7 @@ def test_create_delegates_collection_creation_and_insert(): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_add_texts_uses_execute_values_and_returns_ids(monkeypatch): +def test_add_texts_uses_execute_values_and_returns_ids(monkeypatch: pytest.MonkeyPatch): vector = PGVector.__new__(PGVector) vector.table_name = "embedding_collection_1" @@ -387,7 +387,7 @@ def test_text_get_and_delete_methods(): assert any("DROP TABLE IF EXISTS embedding_collection_1" in sql for sql in executed_sql) -def test_delete_by_ids_handles_empty_undefined_table_and_generic_exception(monkeypatch): +def test_delete_by_ids_handles_empty_undefined_table_and_generic_exception(monkeypatch: pytest.MonkeyPatch): vector = PGVector.__new__(PGVector) vector.table_name = "embedding_collection_1" cursor = MagicMock() @@ -464,7 +464,7 @@ def test_search_by_full_text_branches_for_bigm_and_standard(): assert "bigm_similarity" in cursor.execute.call_args_list[1].args[0] -def test_pgvector_factory_initializes_expected_collection_name(monkeypatch): +def test_pgvector_factory_initializes_expected_collection_name(monkeypatch: pytest.MonkeyPatch): factory = pgvector_module.PGVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-qdrant/tests/unit_tests/test_qdrant_vector.py b/api/providers/vdb/vdb-qdrant/tests/unit_tests/test_qdrant_vector.py index 0ed5491fbe..89ee0a47f1 100644 --- a/api/providers/vdb/vdb-qdrant/tests/unit_tests/test_qdrant_vector.py +++ b/api/providers/vdb/vdb-qdrant/tests/unit_tests/test_qdrant_vector.py @@ -121,7 +121,7 @@ def _build_fake_qdrant_modules(): @pytest.fixture -def qdrant_module(monkeypatch): +def qdrant_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_qdrant_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -170,7 +170,7 @@ def test_init_and_basic_behaviour(qdrant_module): vector.add_texts.assert_called_once() -def test_create_collection_and_add_texts(qdrant_module, monkeypatch): +def test_create_collection_and_add_texts(qdrant_module, monkeypatch: pytest.MonkeyPatch): vector = qdrant_module.QdrantVector("collection_1", "group-1", _config(qdrant_module)) lock = MagicMock() lock.__enter__.return_value = None @@ -288,7 +288,7 @@ def test_search_and_helper_methods(qdrant_module): assert doc.page_content == "doc" -def test_qdrant_factory_paths(qdrant_module, monkeypatch): +def test_qdrant_factory_paths(qdrant_module, monkeypatch: pytest.MonkeyPatch): factory = qdrant_module.QdrantVectorFactory() dataset = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-relyt/tests/unit_tests/test_relyt_vector.py b/api/providers/vdb/vdb-relyt/tests/unit_tests/test_relyt_vector.py index f97ad1400a..c5f3a9f847 100644 --- a/api/providers/vdb/vdb-relyt/tests/unit_tests/test_relyt_vector.py +++ b/api/providers/vdb/vdb-relyt/tests/unit_tests/test_relyt_vector.py @@ -59,7 +59,7 @@ def _patch_both(monkeypatch, module, session): @pytest.fixture -def relyt_module(monkeypatch): +def relyt_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_relyt_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -97,7 +97,7 @@ def test_relyt_config_validation(relyt_module, field, value, message): relyt_module.RelytConfig.model_validate(values) -def test_init_get_type_and_create_delegate(relyt_module, monkeypatch): +def test_init_get_type_and_create_delegate(relyt_module, monkeypatch: pytest.MonkeyPatch): engine = MagicMock() monkeypatch.setattr(relyt_module, "create_engine", MagicMock(return_value=engine)) vector = relyt_module.RelytVector("collection_1", _config(relyt_module), group_id="group-1") @@ -114,7 +114,7 @@ def test_init_get_type_and_create_delegate(relyt_module, monkeypatch): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_create_collection_cache_and_sql_execution(relyt_module, monkeypatch): +def test_create_collection_cache_and_sql_execution(relyt_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -142,7 +142,7 @@ def test_create_collection_cache_and_sql_execution(relyt_module, monkeypatch): relyt_module.redis_client.set.assert_called_once() -def test_add_texts_and_metadata_queries(relyt_module, monkeypatch): +def test_add_texts_and_metadata_queries(relyt_module, monkeypatch: pytest.MonkeyPatch): vector = relyt_module.RelytVector.__new__(relyt_module.RelytVector) vector._collection_name = "collection_1" vector._group_id = "group-1" @@ -212,7 +212,7 @@ def test_delete_by_metadata_field_calls_delete_by_uuids(relyt_module): # 3. delete_by_ids translates to uuids -def test_delete_by_ids_translates_to_uuids(relyt_module, monkeypatch): +def test_delete_by_ids_translates_to_uuids(relyt_module, monkeypatch: pytest.MonkeyPatch): vector = relyt_module.RelytVector.__new__(relyt_module.RelytVector) vector._collection_name = "collection_1" vector.client = MagicMock() @@ -225,7 +225,7 @@ def test_delete_by_ids_translates_to_uuids(relyt_module, monkeypatch): # 4. text_exists True -def test_text_exists_true(relyt_module, monkeypatch): +def test_text_exists_true(relyt_module, monkeypatch: pytest.MonkeyPatch): vector = relyt_module.RelytVector.__new__(relyt_module.RelytVector) vector._collection_name = "collection_1" vector.client = MagicMock() @@ -236,7 +236,7 @@ def test_text_exists_true(relyt_module, monkeypatch): # 5. text_exists False -def test_text_exists_false(relyt_module, monkeypatch): +def test_text_exists_false(relyt_module, monkeypatch: pytest.MonkeyPatch): vector = relyt_module.RelytVector.__new__(relyt_module.RelytVector) vector._collection_name = "collection_1" vector.client = MagicMock() @@ -284,7 +284,7 @@ def test_search_by_vector_filters_by_score_and_ids(relyt_module): # 8. delete commits session -def test_delete_drops_table(relyt_module, monkeypatch): +def test_delete_drops_table(relyt_module, monkeypatch: pytest.MonkeyPatch): vector = relyt_module.RelytVector.__new__(relyt_module.RelytVector) vector._collection_name = "collection_1" vector.client = MagicMock() @@ -295,7 +295,7 @@ def test_delete_drops_table(relyt_module, monkeypatch): session.execute.assert_called_once() -def test_relyt_factory_existing_and_generated_collection(relyt_module, monkeypatch): +def test_relyt_factory_existing_and_generated_collection(relyt_module, monkeypatch: pytest.MonkeyPatch): factory = relyt_module.RelytVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-tablestore/tests/unit_tests/test_tablestore_vector.py b/api/providers/vdb/vdb-tablestore/tests/unit_tests/test_tablestore_vector.py index 62a11e0445..49d4b160cf 100644 --- a/api/providers/vdb/vdb-tablestore/tests/unit_tests/test_tablestore_vector.py +++ b/api/providers/vdb/vdb-tablestore/tests/unit_tests/test_tablestore_vector.py @@ -77,7 +77,7 @@ def _build_fake_tablestore_module(): @pytest.fixture -def tablestore_module(monkeypatch): +def tablestore_module(monkeypatch: pytest.MonkeyPatch): fake_module = _build_fake_tablestore_module() monkeypatch.setitem(sys.modules, "tablestore", fake_module) @@ -177,7 +177,7 @@ def test_get_by_ids_text_exists_delete_and_wrappers(tablestore_module): vector._delete_table_if_exist.assert_called_once() -def test_create_collection_and_table_index_lifecycle(tablestore_module, monkeypatch): +def test_create_collection_and_table_index_lifecycle(tablestore_module, monkeypatch: pytest.MonkeyPatch): vector = tablestore_module.TableStoreVector("collection_1", _config(tablestore_module)) lock = MagicMock() lock.__enter__.return_value = None @@ -289,7 +289,7 @@ def test_write_row_and_search_helpers(tablestore_module): assert "score" not in docs[0].metadata -def test_tablestore_factory_uses_existing_or_generated_collection(tablestore_module, monkeypatch): +def test_tablestore_factory_uses_existing_or_generated_collection(tablestore_module, monkeypatch: pytest.MonkeyPatch): factory = tablestore_module.TableStoreVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-tencent/tests/unit_tests/test_tencent_vector.py b/api/providers/vdb/vdb-tencent/tests/unit_tests/test_tencent_vector.py index 299e40ee1e..e1fe227a29 100644 --- a/api/providers/vdb/vdb-tencent/tests/unit_tests/test_tencent_vector.py +++ b/api/providers/vdb/vdb-tencent/tests/unit_tests/test_tencent_vector.py @@ -136,7 +136,7 @@ def _build_fake_tencent_modules(): @pytest.fixture -def tencent_module(monkeypatch): +def tencent_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_tencent_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -187,7 +187,7 @@ def test_config_and_init_paths(tencent_module): assert vector._enable_hybrid_search is False -def test_create_collection_branches(tencent_module, monkeypatch): +def test_create_collection_branches(tencent_module, monkeypatch: pytest.MonkeyPatch): vector = tencent_module.TencentVector("collection_1", _config(tencent_module)) lock = MagicMock() @@ -279,7 +279,7 @@ def test_create_add_delete_and_search_behaviour(tencent_module): vector._client.drop_collection.assert_called_once() -def test_tencent_factory_existing_and_generated_collection(tencent_module, monkeypatch): +def test_tencent_factory_existing_and_generated_collection(tencent_module, monkeypatch: pytest.MonkeyPatch): factory = tencent_module.TencentVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-tidb-on-qdrant/src/dify_vdb_tidb_on_qdrant/tidb_service.py b/api/providers/vdb/vdb-tidb-on-qdrant/src/dify_vdb_tidb_on_qdrant/tidb_service.py index ece061db67..6283dbb986 100644 --- a/api/providers/vdb/vdb-tidb-on-qdrant/src/dify_vdb_tidb_on_qdrant/tidb_service.py +++ b/api/providers/vdb/vdb-tidb-on-qdrant/src/dify_vdb_tidb_on_qdrant/tidb_service.py @@ -246,8 +246,18 @@ class TidbService: userPrefix = item["userPrefix"] if state == "ACTIVE" and len(userPrefix) > 0: cluster_info = tidb_serverless_list_map[item["clusterId"]] - cluster_info.status = TidbAuthBindingStatus.ACTIVE cluster_info.account = f"{userPrefix}.root" + if not cluster_info.qdrant_endpoint: + cluster_info.qdrant_endpoint = TidbService.extract_qdrant_endpoint( + item + ) or TidbService.fetch_qdrant_endpoint(api_url, public_key, private_key, item["clusterId"]) + if cluster_info.qdrant_endpoint: + cluster_info.status = TidbAuthBindingStatus.ACTIVE + else: + logger.warning( + "Cluster %s is ACTIVE but qdrant endpoint is not ready; will retry later", + item["clusterId"], + ) db.session.add(cluster_info) db.session.commit() else: diff --git a/api/providers/vdb/vdb-tidb-on-qdrant/tests/unit_tests/test_tidb_service.py b/api/providers/vdb/vdb-tidb-on-qdrant/tests/unit_tests/test_tidb_service.py index c1ffbacbbc..20a42f6cc3 100644 --- a/api/providers/vdb/vdb-tidb-on-qdrant/tests/unit_tests/test_tidb_service.py +++ b/api/providers/vdb/vdb-tidb-on-qdrant/tests/unit_tests/test_tidb_service.py @@ -1,8 +1,11 @@ +from types import SimpleNamespace from unittest.mock import MagicMock, patch import pytest from dify_vdb_tidb_on_qdrant.tidb_service import TidbService +from models.enums import TidbAuthBindingStatus + class TestExtractQdrantEndpoint: """Unit tests for TidbService.extract_qdrant_endpoint.""" @@ -216,3 +219,86 @@ class TestBatchCreateEdgeCases: private_key="priv", region="us-east-1", ) + + +class TestBatchUpdateTidbServerlessClusterStatus: + """Verify that status updates only expose clusters after qdrant endpoint is ready.""" + + @patch("dify_vdb_tidb_on_qdrant.tidb_service.db") + @patch("dify_vdb_tidb_on_qdrant.tidb_service._tidb_http_client") + def test_sets_active_when_batch_response_contains_endpoint(self, mock_http, mock_db): + binding = SimpleNamespace( + cluster_id="c-1", + status=TidbAuthBindingStatus.CREATING, + account="root", + qdrant_endpoint=None, + ) + mock_http.get.return_value = MagicMock( + status_code=200, + json=lambda: { + "clusters": [ + { + "clusterId": "c-1", + "state": "ACTIVE", + "userPrefix": "pfx", + "endpoints": {"public": {"host": "gw.tidbcloud.com"}}, + } + ] + }, + ) + + TidbService.batch_update_tidb_serverless_cluster_status([binding], "proj", "url", "iam", "pub", "priv") + + assert binding.account == "pfx.root" + assert binding.qdrant_endpoint == "https://qdrant-gw.tidbcloud.com" + assert binding.status == TidbAuthBindingStatus.ACTIVE + mock_db.session.add.assert_called_once_with(binding) + mock_db.session.commit.assert_called_once() + + @patch.object(TidbService, "fetch_qdrant_endpoint", return_value="https://qdrant-gw.tidbcloud.com") + @patch("dify_vdb_tidb_on_qdrant.tidb_service.db") + @patch("dify_vdb_tidb_on_qdrant.tidb_service._tidb_http_client") + def test_fetches_endpoint_when_batch_response_omits_it(self, mock_http, mock_db, mock_fetch_endpoint): + binding = SimpleNamespace( + cluster_id="c-1", + status=TidbAuthBindingStatus.CREATING, + account="root", + qdrant_endpoint=None, + ) + mock_http.get.return_value = MagicMock( + status_code=200, + json=lambda: {"clusters": [{"clusterId": "c-1", "state": "ACTIVE", "userPrefix": "pfx", "endpoints": {}}]}, + ) + + TidbService.batch_update_tidb_serverless_cluster_status([binding], "proj", "url", "iam", "pub", "priv") + + assert binding.account == "pfx.root" + assert binding.qdrant_endpoint == "https://qdrant-gw.tidbcloud.com" + assert binding.status == TidbAuthBindingStatus.ACTIVE + mock_fetch_endpoint.assert_called_once_with("url", "pub", "priv", "c-1") + mock_db.session.add.assert_called_once_with(binding) + mock_db.session.commit.assert_called_once() + + @patch.object(TidbService, "fetch_qdrant_endpoint", return_value=None) + @patch("dify_vdb_tidb_on_qdrant.tidb_service.db") + @patch("dify_vdb_tidb_on_qdrant.tidb_service._tidb_http_client") + def test_keeps_creating_when_endpoint_is_not_ready(self, mock_http, mock_db, mock_fetch_endpoint): + binding = SimpleNamespace( + cluster_id="c-1", + status=TidbAuthBindingStatus.CREATING, + account="root", + qdrant_endpoint=None, + ) + mock_http.get.return_value = MagicMock( + status_code=200, + json=lambda: {"clusters": [{"clusterId": "c-1", "state": "ACTIVE", "userPrefix": "pfx", "endpoints": {}}]}, + ) + + TidbService.batch_update_tidb_serverless_cluster_status([binding], "proj", "url", "iam", "pub", "priv") + + assert binding.account == "pfx.root" + assert binding.qdrant_endpoint is None + assert binding.status == TidbAuthBindingStatus.CREATING + mock_fetch_endpoint.assert_called_once_with("url", "pub", "priv", "c-1") + mock_db.session.add.assert_called_once_with(binding) + mock_db.session.commit.assert_called_once() diff --git a/api/providers/vdb/vdb-tidb-vector/tests/unit_tests/test_tidb_vector.py b/api/providers/vdb/vdb-tidb-vector/tests/unit_tests/test_tidb_vector.py index bdbed2f740..ed03cbee88 100644 --- a/api/providers/vdb/vdb-tidb-vector/tests/unit_tests/test_tidb_vector.py +++ b/api/providers/vdb/vdb-tidb-vector/tests/unit_tests/test_tidb_vector.py @@ -46,7 +46,7 @@ def test_tidb_config_validation(tidb_module, field, value, message): tidb_module.TiDBVectorConfig.model_validate(values) -def test_init_get_type_and_distance_func(tidb_module, monkeypatch): +def test_init_get_type_and_distance_func(tidb_module, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(tidb_module, "create_engine", MagicMock(return_value="engine")) vector = tidb_module.TiDBVector("collection_1", _config(tidb_module), distance_func="L2") @@ -63,7 +63,7 @@ def test_init_get_type_and_distance_func(tidb_module, monkeypatch): assert vector._get_distance_func() == "VEC_COSINE_DISTANCE" -def test_table_builds_columns_with_tidb_vector_type(tidb_module, monkeypatch): +def test_table_builds_columns_with_tidb_vector_type(tidb_module, monkeypatch: pytest.MonkeyPatch): fake_tidb_vector = types.ModuleType("tidb_vector") fake_tidb_sqlalchemy = types.ModuleType("tidb_vector.sqlalchemy") @@ -107,7 +107,7 @@ def test_create_calls_collection_and_add_texts(tidb_module): assert vector._dimension == 2 -def test_create_collection_skips_when_cache_hit(tidb_module, monkeypatch): +def test_create_collection_skips_when_cache_hit(tidb_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -127,7 +127,7 @@ def test_create_collection_skips_when_cache_hit(tidb_module, monkeypatch): tidb_module.redis_client.set.assert_not_called() -def test_create_collection_executes_create_sql_and_sets_cache(tidb_module, monkeypatch): +def test_create_collection_executes_create_sql_and_sets_cache(tidb_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -160,7 +160,7 @@ def test_create_collection_executes_create_sql_and_sets_cache(tidb_module, monke tidb_module.redis_client.set.assert_called_once() -def test_add_texts_batches_inserts_and_returns_ids(tidb_module, monkeypatch): +def test_add_texts_batches_inserts_and_returns_ids(tidb_module, monkeypatch: pytest.MonkeyPatch): class _InsertStmt: def __init__(self, table): self.table = table @@ -198,7 +198,7 @@ def test_add_texts_batches_inserts_and_returns_ids(tidb_module, monkeypatch): @pytest.fixture -def tidb_vector_with_session(tidb_module, monkeypatch): +def tidb_vector_with_session(tidb_module, monkeypatch: pytest.MonkeyPatch): vector = tidb_module.TiDBVector.__new__(tidb_module.TiDBVector) vector._collection_name = "collection_1" vector._engine = MagicMock() @@ -354,7 +354,7 @@ def test_delete_by_metadata_field_does_nothing_when_no_ids(tidb_module): # Test search_by_vector filters and scores -def test_search_by_vector_filters_and_scores(tidb_module, monkeypatch): +def test_search_by_vector_filters_and_scores(tidb_module, monkeypatch: pytest.MonkeyPatch): session = MagicMock() session.execute.return_value = [ ('{"doc_id":"id-1","document_id":"d-1"}', "text-1", 0.2), @@ -392,7 +392,7 @@ def test_search_by_vector_filters_and_scores(tidb_module, monkeypatch): # Test delete drops table -def test_delete_drops_table(tidb_module, monkeypatch): +def test_delete_drops_table(tidb_module, monkeypatch: pytest.MonkeyPatch): session = MagicMock() session.execute.return_value = None @@ -413,7 +413,7 @@ def test_delete_drops_table(tidb_module, monkeypatch): assert "DROP TABLE IF EXISTS collection_1" in drop_sql -def test_tidb_factory_uses_existing_or_generated_collection(tidb_module, monkeypatch): +def test_tidb_factory_uses_existing_or_generated_collection(tidb_module, monkeypatch: pytest.MonkeyPatch): factory = tidb_module.TiDBVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-upstash/tests/unit_tests/test_upstash_vector.py b/api/providers/vdb/vdb-upstash/tests/unit_tests/test_upstash_vector.py index a884275c89..55d27ad264 100644 --- a/api/providers/vdb/vdb-upstash/tests/unit_tests/test_upstash_vector.py +++ b/api/providers/vdb/vdb-upstash/tests/unit_tests/test_upstash_vector.py @@ -36,7 +36,7 @@ def _build_fake_upstash_module(): @pytest.fixture -def upstash_module(monkeypatch): +def upstash_module(monkeypatch: pytest.MonkeyPatch): # Remove patched modules if present for modname in ["upstash_vector", "dify_vdb_upstash.upstash_vector"]: if modname in sys.modules: @@ -65,7 +65,7 @@ def test_upstash_config_validation(upstash_module, field, value, message): upstash_module.UpstashVectorConfig.model_validate(values) -def test_init_get_type_and_dimension(upstash_module, monkeypatch): +def test_init_get_type_and_dimension(upstash_module, monkeypatch: pytest.MonkeyPatch): vector = upstash_module.UpstashVector("collection_1", _config(upstash_module)) assert vector.get_type() == upstash_module.VectorType.UPSTASH @@ -162,7 +162,7 @@ def test_search_by_vector_filter_threshold_and_delete(upstash_module): vector.index.reset.assert_called_once() -def test_upstash_factory_uses_existing_or_generated_collection(upstash_module, monkeypatch): +def test_upstash_factory_uses_existing_or_generated_collection(upstash_module, monkeypatch: pytest.MonkeyPatch): factory = upstash_module.UpstashVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-vastbase/tests/unit_tests/test_vastbase_vector.py b/api/providers/vdb/vdb-vastbase/tests/unit_tests/test_vastbase_vector.py index 4dfb956c00..32f47c67ed 100644 --- a/api/providers/vdb/vdb-vastbase/tests/unit_tests/test_vastbase_vector.py +++ b/api/providers/vdb/vdb-vastbase/tests/unit_tests/test_vastbase_vector.py @@ -37,7 +37,7 @@ def _build_fake_psycopg2_modules(): @pytest.fixture -def vastbase_module(monkeypatch): +def vastbase_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_psycopg2_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -93,7 +93,7 @@ def test_vastbase_config_rejects_invalid_connection_window(vastbase_module): ) -def test_init_and_get_cursor_context_manager(vastbase_module, monkeypatch): +def test_init_and_get_cursor_context_manager(vastbase_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(vastbase_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) @@ -114,7 +114,7 @@ def test_init_and_get_cursor_context_manager(vastbase_module, monkeypatch): pool.putconn.assert_called_once_with(conn) -def test_create_and_add_texts(vastbase_module, monkeypatch): +def test_create_and_add_texts(vastbase_module, monkeypatch: pytest.MonkeyPatch): vector = vastbase_module.VastbaseVector.__new__(vastbase_module.VastbaseVector) vector.table_name = "embedding_collection_1" vector._create_collection = MagicMock() @@ -205,7 +205,7 @@ def test_search_by_vector_and_full_text(vastbase_module): assert full_docs[0].page_content == "full-text" -def test_create_collection_cache_and_dimension_branches(vastbase_module, monkeypatch): +def test_create_collection_cache_and_dimension_branches(vastbase_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -240,7 +240,7 @@ def test_create_collection_cache_and_dimension_branches(vastbase_module, monkeyp vastbase_module.redis_client.set.assert_called() -def test_vastbase_factory_uses_existing_or_generated_collection(vastbase_module, monkeypatch): +def test_vastbase_factory_uses_existing_or_generated_collection(vastbase_module, monkeypatch: pytest.MonkeyPatch): factory = vastbase_module.VastbaseVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-vikingdb/tests/unit_tests/test_vikingdb_vector.py b/api/providers/vdb/vdb-vikingdb/tests/unit_tests/test_vikingdb_vector.py index 544b8163be..6559ad97d2 100644 --- a/api/providers/vdb/vdb-vikingdb/tests/unit_tests/test_vikingdb_vector.py +++ b/api/providers/vdb/vdb-vikingdb/tests/unit_tests/test_vikingdb_vector.py @@ -79,7 +79,7 @@ def _build_fake_vikingdb_modules(): @pytest.fixture -def vikingdb_module(monkeypatch): +def vikingdb_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_vikingdb_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -117,7 +117,7 @@ def test_init_get_type_and_has_checks(vikingdb_module): assert vector._has_index() is False -def test_create_collection_cache_and_creation_paths(vikingdb_module, monkeypatch): +def test_create_collection_cache_and_creation_paths(vikingdb_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -253,7 +253,7 @@ def test_delete_drops_index_and_collection_when_present(vikingdb_module): vector._client.drop_collection.assert_not_called() -def test_vikingdb_factory_validates_config_and_builds_vector(vikingdb_module, monkeypatch): +def test_vikingdb_factory_validates_config_and_builds_vector(vikingdb_module, monkeypatch: pytest.MonkeyPatch): factory = vikingdb_module.VikingDBVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", @@ -293,7 +293,9 @@ def test_vikingdb_factory_validates_config_and_builds_vector(vikingdb_module, mo ("VIKINGDB_SCHEME", "VIKINGDB_SCHEME should not be None"), ], ) -def test_vikingdb_factory_raises_when_required_config_missing(vikingdb_module, monkeypatch, field, message): +def test_vikingdb_factory_raises_when_required_config_missing( + vikingdb_module, monkeypatch: pytest.MonkeyPatch, field, message +): factory = vikingdb_module.VikingDBVectorFactory() dataset = SimpleNamespace( id="dataset-1", index_struct_dict={"vector_store": {"class_prefix": "existing"}}, index_struct=None diff --git a/api/pyproject.toml b/api/pyproject.toml index 2587d9e0bf..604d01594e 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -1,12 +1,12 @@ [project] name = "dify-api" -version = "1.13.3" +version = "1.14.0" requires-python = "~=3.12.0" dependencies = [ # Legacy: mature and widely deployed "bleach>=6.3.0", - "boto3>=1.42.96", + "boto3>=1.43.6", "celery>=5.6.3", "croniter>=6.2.2", "flask>=3.1.3,<4.0.0", @@ -14,8 +14,8 @@ dependencies = [ "gevent>=26.4.0", "gevent-websocket>=0.10.1", "gmpy2>=2.3.0", - "google-api-python-client>=2.194.0", - "gunicorn>=25.3.0", + "google-api-python-client>=2.196.0", + "gunicorn>=26.0.0", "psycogreen>=1.0.2", "psycopg2-binary>=2.9.12", "python-socketio>=5.13.0", @@ -31,7 +31,7 @@ dependencies = [ "flask-migrate>=4.1.0,<5.0.0", "flask-orjson>=2.0.0,<3.0.0", "flask-restx>=1.3.2,<2.0.0", - "google-cloud-aiplatform>=1.148.1,<2.0.0", + "google-cloud-aiplatform>=1.151.0,<2.0.0", "httpx[socks]>=0.28.1,<1.0.0", "opentelemetry-distro>=0.62b1,<1.0.0", "opentelemetry-instrumentation-celery>=0.62b0,<1.0.0", @@ -45,7 +45,7 @@ dependencies = [ # Emerging: newer and fast-moving, use compatible pins "fastopenapi[flask]~=0.7.0", - "graphon~=0.2.2", + "graphon~=0.3.1", "httpx-sse~=0.4.0", "json-repair~=0.59.4", ] @@ -103,6 +103,7 @@ dify-trace-weave = { workspace = true } default-groups = ["storage", "tools", "vdb-all", "trace-all"] package = false override-dependencies = [ + "litellm>=1.83.7", "pyarrow>=18.0.0", ] @@ -127,7 +128,7 @@ dev = [ "testcontainers>=4.14.2", "types-aiofiles>=25.1.0", "types-beautifulsoup4>=4.12.0", - "types-cachetools>=6.2.0", + "types-cachetools>=7.0.0.20260503", "types-colorama>=0.4.15", "types-defusedxml>=0.7.0", "types-deprecated>=1.3.1", @@ -135,7 +136,7 @@ dev = [ "types-flask-cors>=6.0.0", "types-flask-migrate>=4.1.0", "types-gevent>=26.4.0", - "types-greenlet>=3.4.0", + "types-greenlet>=3.5.0.20260428", "types-html5lib>=1.1.11", "types-markdown>=3.10.2", "types-oauthlib>=3.3.0", @@ -143,7 +144,7 @@ dev = [ "types-olefile>=0.47.0", "types-openpyxl>=3.1.5", "types-pexpect>=4.9.0", - "types-protobuf>=7.34.1", + "types-protobuf>=7.34.1.20260503", "types-psutil>=7.2.2", "types-psycopg2>=2.9.21.20260422", "types-pygments>=2.20.0", @@ -158,11 +159,11 @@ dev = [ "types-tensorflow>=2.18.0.20260408", "types-tqdm>=4.67.3.20260408", "types-ujson>=5.10.0", - "boto3-stubs>=1.42.96", + "boto3-stubs>=1.43.2", "types-jmespath>=1.1.0.20260408", - "hypothesis>=6.152.3", + "hypothesis>=6.152.4", "types_pyOpenSSL>=24.1.0", - "types_cffi>=2.0.0.20260408", + "types_cffi>=2.0.0.20260429", "types_setuptools>=82.0.0.20260408", "pandas-stubs>=3.0.0", "scipy-stubs>=1.17.1.4", @@ -174,7 +175,7 @@ dev = [ # "locust>=2.40.4", # Temporarily removed due to compatibility issues. Uncomment when resolved. "pytest-timeout>=2.4.0", "pytest-xdist>=3.8.0", - "pyrefly>=0.62.0", + "pyrefly>=0.64.0", "xinference-client>=2.7.0", ] @@ -184,13 +185,13 @@ dev = [ ############################################################ storage = [ "azure-storage-blob>=12.28.0", - "bce-python-sdk>=0.9.70", + "bce-python-sdk>=0.9.71", "cos-python-sdk-v5>=1.9.42", "esdk-obs-python>=3.22.2", "google-cloud-storage>=3.10.1", "opendal>=0.46.0", "oss2>=2.19.1", - "supabase>=2.29.0", + "supabase>=2.30.0", "tos>=2.9.0", ] diff --git a/api/services/annotation_service.py b/api/services/annotation_service.py index 0229a1f43a..aa6b8ffc6e 100644 --- a/api/services/annotation_service.py +++ b/api/services/annotation_service.py @@ -425,7 +425,7 @@ class AppAnnotationService: return {"deleted_count": deleted_count} @classmethod - def batch_import_app_annotations(cls, app_id, file: FileStorage): + def batch_import_app_annotations(cls, app_id: str, file: FileStorage): """ Batch import annotations from CSV file with enhanced security checks. diff --git a/api/services/app_service.py b/api/services/app_service.py index a046b909b3..6716833f6c 100644 --- a/api/services/app_service.py +++ b/api/services/app_service.py @@ -1,9 +1,10 @@ import json import logging -from typing import Any, TypedDict, cast +from typing import Any, Literal, TypedDict, cast import sqlalchemy as sa from flask_sqlalchemy.pagination import Pagination +from pydantic import BaseModel, Field from sqlalchemy import select from configs import dify_config @@ -31,39 +32,59 @@ from tasks.remove_app_and_related_data_task import remove_app_and_related_data_t logger = logging.getLogger(__name__) +class AppListParams(BaseModel): + page: int = Field(default=1, ge=1) + limit: int = Field(default=20, ge=1, le=100) + mode: Literal["completion", "chat", "advanced-chat", "workflow", "agent-chat", "channel", "all"] = "all" + name: str | None = None + tag_ids: list[str] | None = None + is_created_by_me: bool | None = None + + +class CreateAppParams(BaseModel): + name: str = Field(min_length=1) + description: str | None = None + mode: Literal["chat", "agent-chat", "advanced-chat", "workflow", "completion"] + icon_type: str | None = None + icon: str | None = None + icon_background: str | None = None + api_rph: int = 0 + api_rpm: int = 0 + max_active_requests: int | None = None + + class AppService: - def get_paginate_apps(self, user_id: str, tenant_id: str, args: dict[str, Any]) -> Pagination | None: + def get_paginate_apps(self, user_id: str, tenant_id: str, params: AppListParams) -> Pagination | None: """ Get app list with pagination :param user_id: user id :param tenant_id: tenant id - :param args: request args + :param params: query parameters :return: """ filters = [App.tenant_id == tenant_id, App.is_universal == False] - if args["mode"] == "workflow": + if params.mode == "workflow": filters.append(App.mode == AppMode.WORKFLOW) - elif args["mode"] == "completion": + elif params.mode == "completion": filters.append(App.mode == AppMode.COMPLETION) - elif args["mode"] == "chat": + elif params.mode == "chat": filters.append(App.mode == AppMode.CHAT) - elif args["mode"] == "advanced-chat": + elif params.mode == "advanced-chat": filters.append(App.mode == AppMode.ADVANCED_CHAT) - elif args["mode"] == "agent-chat": + elif params.mode == "agent-chat": filters.append(App.mode == AppMode.AGENT_CHAT) - if args.get("is_created_by_me", False): + if params.is_created_by_me: filters.append(App.created_by == user_id) - if args.get("name"): + if params.name: from libs.helper import escape_like_pattern - name = args["name"][:30] + name = params.name[:30] escaped_name = escape_like_pattern(name) filters.append(App.name.ilike(f"%{escaped_name}%", escape="\\")) - # Check if tag_ids is not empty to avoid WHERE false condition - if args.get("tag_ids") and len(args["tag_ids"]) > 0: - target_ids = TagService.get_target_ids_by_tag_ids("app", tenant_id, args["tag_ids"]) + if params.tag_ids and len(params.tag_ids) > 0: + target_ids = TagService.get_target_ids_by_tag_ids("app", tenant_id, params.tag_ids) if target_ids and len(target_ids) > 0: filters.append(App.id.in_(target_ids)) else: @@ -71,21 +92,21 @@ class AppService: app_models = db.paginate( sa.select(App).where(*filters).order_by(App.created_at.desc()), - page=args["page"], - per_page=args["limit"], + page=params.page, + per_page=params.limit, error_out=False, ) return app_models - def create_app(self, tenant_id: str, args: dict[str, Any], account: Account) -> App: + def create_app(self, tenant_id: str, params: CreateAppParams, account: Account) -> App: """ Create app :param tenant_id: tenant id - :param args: request args + :param params: app creation parameters :param account: Account instance """ - app_mode = AppMode.value_of(args["mode"]) + app_mode = AppMode.value_of(params.mode) app_template = default_app_templates[app_mode] # get model config @@ -143,15 +164,16 @@ class AppService: default_model_config["model"] = json.dumps(default_model_dict) app = App(**app_template["app"]) - app.name = args["name"] - app.description = args.get("description", "") - app.mode = args["mode"] - app.icon_type = args.get("icon_type", "emoji") - app.icon = args["icon"] - app.icon_background = args["icon_background"] + app.name = params.name + app.description = params.description or "" + app.mode = app_mode + app.icon_type = IconType(params.icon_type) if params.icon_type else IconType.EMOJI + app.icon = params.icon + app.icon_background = params.icon_background app.tenant_id = tenant_id - app.api_rph = args.get("api_rph", 0) - app.api_rpm = args.get("api_rpm", 0) + app.api_rph = params.api_rph + app.api_rpm = params.api_rpm + app.max_active_requests = params.max_active_requests app.created_by = account.id app.updated_by = account.id diff --git a/api/services/audio_service.py b/api/services/audio_service.py index 60948e652b..c80b2f43fd 100644 --- a/api/services/audio_service.py +++ b/api/services/audio_service.py @@ -54,7 +54,7 @@ class AudioService: if extension not in [f"audio/{ext}" for ext in AUDIO_EXTENSIONS]: raise UnsupportedAudioTypeServiceError() - file_content = file.read() + file_content = file.stream.read() file_size = len(file_content) if file_size > FILE_SIZE_LIMIT: diff --git a/api/services/credit_pool_service.py b/api/services/credit_pool_service.py index 2d210db121..1f419d7a5b 100644 --- a/api/services/credit_pool_service.py +++ b/api/services/credit_pool_service.py @@ -1,7 +1,7 @@ import logging -from sqlalchemy import select, update -from sqlalchemy.orm import sessionmaker +from sqlalchemy import select +from sqlalchemy.orm import Session, sessionmaker from configs import dify_config from core.errors.error import QuotaExceededError @@ -13,6 +13,18 @@ logger = logging.getLogger(__name__) class CreditPoolService: + @staticmethod + def _get_locked_pool(session: Session, tenant_id: str, pool_type: str) -> TenantCreditPool | None: + return session.scalar( + select(TenantCreditPool) + .where( + TenantCreditPool.tenant_id == tenant_id, + TenantCreditPool.pool_type == pool_type, + ) + .limit(1) + .with_for_update() + ) + @classmethod def create_default_pool(cls, tenant_id: str) -> TenantCreditPool: """create default credit pool for new tenant""" @@ -59,31 +71,57 @@ class CreditPoolService: credits_required: int, pool_type: str = "trial", ) -> int: - """check and deduct credits, returns actual credits deducted""" - - pool = cls.get_pool(tenant_id, pool_type) - if not pool: - raise QuotaExceededError("Credit pool not found") - - if pool.remaining_credits <= 0: - raise QuotaExceededError("No credits remaining") - - # deduct all remaining credits if less than required - actual_credits = min(credits_required, pool.remaining_credits) + """Deduct exactly the requested credits or raise without mutating the pool.""" + if credits_required <= 0: + return 0 try: - with sessionmaker(db.engine).begin() as session: - stmt = ( - update(TenantCreditPool) - .where( - TenantCreditPool.tenant_id == tenant_id, - TenantCreditPool.pool_type == pool_type, - ) - .values(quota_used=TenantCreditPool.quota_used + actual_credits) - ) - session.execute(stmt) + with sessionmaker(db.engine, expire_on_commit=False).begin() as session: + pool = cls._get_locked_pool(session=session, tenant_id=tenant_id, pool_type=pool_type) + if not pool: + raise QuotaExceededError("Credit pool not found") + + remaining_credits = pool.remaining_credits + if remaining_credits <= 0: + raise QuotaExceededError("No credits remaining") + if remaining_credits < credits_required: + raise QuotaExceededError("Insufficient credits remaining") + + pool.quota_used += credits_required + except QuotaExceededError: + raise except Exception: logger.exception("Failed to deduct credits for tenant %s", tenant_id) raise QuotaExceededError("Failed to deduct credits") - return actual_credits + return credits_required + + @classmethod + def deduct_credits_capped( + cls, + tenant_id: str, + credits_required: int, + pool_type: str = "trial", + ) -> int: + """Deduct up to the available balance and return the actual deducted credits.""" + if credits_required <= 0: + return 0 + + try: + with sessionmaker(db.engine, expire_on_commit=False).begin() as session: + pool = cls._get_locked_pool(session=session, tenant_id=tenant_id, pool_type=pool_type) + if not pool: + logger.warning("Credit pool not found, tenant_id=%s, pool_type=%s", tenant_id, pool_type) + return 0 + + deducted_credits = min(credits_required, pool.remaining_credits) + if deducted_credits <= 0: + return 0 + + pool.quota_used += deducted_credits + return deducted_credits + except QuotaExceededError: + raise + except Exception: + logger.exception("Failed to deduct capped credits for tenant %s", tenant_id) + raise QuotaExceededError("Failed to deduct credits") diff --git a/api/services/dataset_service.py b/api/services/dataset_service.py index eef38f1ce2..383474f4f6 100644 --- a/api/services/dataset_service.py +++ b/api/services/dataset_service.py @@ -108,7 +108,7 @@ logger = logging.getLogger(__name__) class ProcessRulesDict(TypedDict): - mode: str + mode: ProcessRuleMode rules: dict[str, Any] @@ -204,7 +204,7 @@ class DatasetService: mode = dataset_process_rule.mode rules = dataset_process_rule.rules_dict or {} else: - mode = str(DocumentService.DEFAULT_RULES["mode"]) + mode = ProcessRuleMode(DocumentService.DEFAULT_RULES["mode"]) rules = dict(DocumentService.DEFAULT_RULES.get("rules") or {}) return {"mode": mode, "rules": rules} @@ -1984,7 +1984,7 @@ class DocumentService: if process_rule.rules: dataset_process_rule = DatasetProcessRule( dataset_id=dataset.id, - mode=process_rule.mode, + mode=ProcessRuleMode(process_rule.mode), rules=process_rule.rules.model_dump_json() if process_rule.rules else None, created_by=account.id, ) @@ -1995,7 +1995,7 @@ class DocumentService: elif process_rule.mode == ProcessRuleMode.AUTOMATIC: dataset_process_rule = DatasetProcessRule( dataset_id=dataset.id, - mode=process_rule.mode, + mode=ProcessRuleMode.AUTOMATIC, rules=json.dumps(DatasetProcessRule.AUTOMATIC_RULES), created_by=account.id, ) @@ -2572,14 +2572,14 @@ class DocumentService: if process_rule.mode in {ProcessRuleMode.CUSTOM, ProcessRuleMode.HIERARCHICAL}: dataset_process_rule = DatasetProcessRule( dataset_id=dataset.id, - mode=process_rule.mode, + mode=ProcessRuleMode(process_rule.mode), rules=process_rule.rules.model_dump_json() if process_rule.rules else None, created_by=account.id, ) elif process_rule.mode == ProcessRuleMode.AUTOMATIC: dataset_process_rule = DatasetProcessRule( dataset_id=dataset.id, - mode=process_rule.mode, + mode=ProcessRuleMode.AUTOMATIC, rules=json.dumps(DatasetProcessRule.AUTOMATIC_RULES), created_by=account.id, ) diff --git a/api/services/entities/knowledge_entities/knowledge_entities.py b/api/services/entities/knowledge_entities/knowledge_entities.py index b1fe352861..910f54bebc 100644 --- a/api/services/entities/knowledge_entities/knowledge_entities.py +++ b/api/services/entities/knowledge_entities/knowledge_entities.py @@ -3,6 +3,7 @@ from typing import Any, Literal from pydantic import BaseModel, field_validator from core.rag.entities import Rule +from core.rag.entities.metadata_entities import MetadataFilteringCondition from core.rag.index_processor.constant.index_type import IndexStructureType from core.rag.retrieval.retrieval_methods import RetrievalMethod @@ -83,6 +84,7 @@ class RetrievalModel(BaseModel): score_threshold_enabled: bool score_threshold: float | None = None weights: WeightModel | None = None + metadata_filtering_conditions: MetadataFilteringCondition | None = None class MetaDataConfig(BaseModel): diff --git a/api/services/feature_service.py b/api/services/feature_service.py index 16d2597963..a88614b251 100644 --- a/api/services/feature_service.py +++ b/api/services/feature_service.py @@ -166,7 +166,7 @@ class SystemFeatureModel(BaseModel): enable_email_code_login: bool = False enable_email_password_login: bool = True enable_social_oauth_login: bool = False - enable_collaboration_mode: bool = False + enable_collaboration_mode: bool = True is_allow_register: bool = False is_allow_create_workspace: bool = False is_email_setup: bool = False diff --git a/api/services/file_service.py b/api/services/file_service.py index f60afe2f19..b683a2f3d4 100644 --- a/api/services/file_service.py +++ b/api/services/file_service.py @@ -107,15 +107,14 @@ class FileService: hash=hashlib.sha3_256(content).hexdigest(), source_url=source_url, ) - # The `UploadFile` ID is generated within its constructor, so flushing to retrieve the ID is unnecessary. - # We can directly generate the `source_url` here before committing. - if not upload_file.source_url: - upload_file.source_url = file_helpers.get_signed_file_url(upload_file_id=upload_file.id) with self._session_maker(expire_on_commit=False) as session: session.add(upload_file) session.commit() + if not upload_file.source_url: + upload_file.source_url = file_helpers.get_signed_file_url(upload_file_id=upload_file.id) + return upload_file @staticmethod diff --git a/api/services/recommend_app/category_order.py b/api/services/recommend_app/category_order.py new file mode 100644 index 0000000000..be6b112aa4 --- /dev/null +++ b/api/services/recommend_app/category_order.py @@ -0,0 +1,49 @@ +"""Apply Redis-backed category ordering for DB-backed Explore apps.""" + +import json +import logging +from collections.abc import Collection +from typing import Any + +from extensions.ext_redis import redis_client + +logger = logging.getLogger(__name__) + +EXPLORE_APP_CATEGORY_ORDER_KEY_PREFIX = "explore:apps:category_order" + + +def _category_order_key(language: str) -> str: + return f"{EXPLORE_APP_CATEGORY_ORDER_KEY_PREFIX}:{language}" + + +def get_explore_app_category_order(language: str) -> list[str]: + try: + raw_categories = redis_client.get(_category_order_key(language)) + except Exception: + logger.exception("Failed to read explore app category order from Redis.") + return [] + + if not raw_categories: + return [] + + if isinstance(raw_categories, bytes): + raw_categories = raw_categories.decode("utf-8") + + try: + categories: Any = json.loads(raw_categories) + except (TypeError, json.JSONDecodeError): + logger.warning("Invalid explore app category order payload for language %s.", language) + return [] + + if not isinstance(categories, list): + return [] + + return [category for category in categories if isinstance(category, str)] + + +def order_categories(categories: Collection[str], language: str) -> list[str]: + configured_order = get_explore_app_category_order(language) + if configured_order: + return configured_order + + return sorted(categories) diff --git a/api/services/recommend_app/database/database_retrieval.py b/api/services/recommend_app/database/database_retrieval.py index 1df5fd13b6..ac870f0700 100644 --- a/api/services/recommend_app/database/database_retrieval.py +++ b/api/services/recommend_app/database/database_retrieval.py @@ -6,6 +6,7 @@ from constants.languages import languages from extensions.ext_database import db from models.model import App, RecommendedApp from services.app_dsl_service import AppDslService +from services.recommend_app.category_order import order_categories from services.recommend_app.recommend_app_base import RecommendAppRetrievalBase from services.recommend_app.recommend_app_type import RecommendAppType @@ -18,7 +19,7 @@ class RecommendedAppItemDict(TypedDict): copyright: Any privacy_policy: Any custom_disclaimer: str - category: str + categories: list[str] position: int is_listed: bool @@ -80,6 +81,7 @@ class DatabaseRecommendAppRetrieval(RecommendAppRetrievalBase): if not site: continue + app_categories = recommended_app.categories or [] recommended_app_result: RecommendedAppItemDict = { "id": recommended_app.id, "app": recommended_app.app, @@ -88,15 +90,18 @@ class DatabaseRecommendAppRetrieval(RecommendAppRetrievalBase): "copyright": site.copyright, "privacy_policy": site.privacy_policy, "custom_disclaimer": site.custom_disclaimer, - "category": recommended_app.category, + "categories": app_categories, "position": recommended_app.position, "is_listed": recommended_app.is_listed, } recommended_apps_result.append(recommended_app_result) - categories.add(recommended_app.category) + categories.update(app_categories) - return RecommendedAppsResultDict(recommended_apps=recommended_apps_result, categories=sorted(categories)) + return RecommendedAppsResultDict( + recommended_apps=recommended_apps_result, + categories=order_categories(categories, language), + ) @classmethod def fetch_recommended_app_detail_from_db(cls, app_id: str) -> RecommendedAppDetailDict | None: diff --git a/api/services/tag_service.py b/api/services/tag_service.py index 1882c855ea..8043a99be1 100644 --- a/api/services/tag_service.py +++ b/api/services/tag_service.py @@ -1,9 +1,11 @@ import uuid +from typing import cast import sqlalchemy as sa from flask_login import current_user from pydantic import BaseModel, Field -from sqlalchemy import func, select +from sqlalchemy import delete, func, select +from sqlalchemy.engine import CursorResult from werkzeug.exceptions import NotFound from extensions.ext_database import db @@ -29,7 +31,7 @@ class TagBindingCreatePayload(BaseModel): class TagBindingDeletePayload(BaseModel): - tag_id: str + tag_ids: list[str] = Field(min_length=1) target_id: str type: TagType @@ -178,13 +180,18 @@ class TagService: @staticmethod def delete_tag_binding(payload: TagBindingDeletePayload): TagService.check_target_exists(payload.type, payload.target_id) - tag_binding = db.session.scalar( - select(TagBinding) - .where(TagBinding.target_id == payload.target_id, TagBinding.tag_id == payload.tag_id) - .limit(1) + result = cast( + CursorResult, + db.session.execute( + delete(TagBinding).where( + TagBinding.target_id == payload.target_id, + TagBinding.tag_id.in_(payload.tag_ids), + TagBinding.tenant_id == current_user.current_tenant_id, + ) + ), ) - if tag_binding: - db.session.delete(tag_binding) + + if result.rowcount: db.session.commit() @staticmethod diff --git a/api/services/tools/builtin_tools_manage_service.py b/api/services/tools/builtin_tools_manage_service.py index b8242ab3a5..20de1f4058 100644 --- a/api/services/tools/builtin_tools_manage_service.py +++ b/api/services/tools/builtin_tools_manage_service.py @@ -408,7 +408,7 @@ class BuiltinToolManageService: return {"result": "success"} @staticmethod - def set_default_provider(tenant_id: str, user_id: str, provider: str, id: str): + def set_default_provider(tenant_id: str, provider: str, id: str): """ set default provider """ @@ -422,12 +422,11 @@ class BuiltinToolManageService: if target_provider is None: raise ValueError("provider not found") - # clear default provider + # clear default provider (tenant-scoped: only one default per provider per workspace) session.execute( update(BuiltinToolProvider) .where( BuiltinToolProvider.tenant_id == tenant_id, - BuiltinToolProvider.user_id == user_id, BuiltinToolProvider.provider == provider, BuiltinToolProvider.is_default.is_(True), ) diff --git a/api/services/trigger/trigger_subscription_builder_service.py b/api/services/trigger/trigger_subscription_builder_service.py index 889717df72..cff735b39d 100644 --- a/api/services/trigger/trigger_subscription_builder_service.py +++ b/api/services/trigger/trigger_subscription_builder_service.py @@ -121,9 +121,7 @@ class TriggerSubscriptionBuilderService: if not subscription_builder.name: raise ValueError("Subscription builder name is required") - credential_type = CredentialType.of( - subscription_builder.credential_type or CredentialType.UNAUTHORIZED.value - ) + credential_type = CredentialType.of(subscription_builder.credential_type or CredentialType.UNAUTHORIZED) if credential_type == CredentialType.UNAUTHORIZED: # manually create TriggerProviderService.add_trigger_subscription( @@ -321,9 +319,7 @@ class TriggerSubscriptionBuilderService: raise ValueError("Subscription builder name is required") # Build - credential_type = CredentialType.of( - subscription_builder.credential_type or CredentialType.UNAUTHORIZED.value - ) + credential_type = CredentialType.of(subscription_builder.credential_type or CredentialType.UNAUTHORIZED) if credential_type == CredentialType.UNAUTHORIZED: # manually create TriggerProviderService.add_trigger_subscription( diff --git a/api/services/trigger/webhook_service.py b/api/services/trigger/webhook_service.py index 5d99900a04..592f678421 100644 --- a/api/services/trigger/webhook_service.py +++ b/api/services/trigger/webhook_service.py @@ -402,7 +402,7 @@ class WebhookService: for name, file in files.items(): if file and file.filename: try: - file_content = file.read() + file_content = file.stream.read() mimetype = file.content_type or mimetypes.guess_type(file.filename)[0] or "application/octet-stream" file_obj = cls._create_file_from_binary(file_content, mimetype, webhook_trigger) processed_files[name] = file_obj.to_dict() diff --git a/api/services/variable_truncator.py b/api/services/variable_truncator.py index 1529c2b98f..5dd5f6873f 100644 --- a/api/services/variable_truncator.py +++ b/api/services/variable_truncator.py @@ -194,14 +194,15 @@ class VariableTruncator(BaseTruncator): result: _PartResult[Any] # Apply type-specific truncation with target size - if isinstance(segment, ArraySegment): - result = self._truncate_array(segment.value, target_size) - elif isinstance(segment, StringSegment): - result = self._truncate_string(segment.value, target_size) - elif isinstance(segment, ObjectSegment): - result = self._truncate_object(segment.value, target_size) - else: - raise AssertionError("this should be unreachable.") + match segment: + case ArraySegment(): + result = self._truncate_array(segment.value, target_size) + case StringSegment(): + result = self._truncate_string(segment.value, target_size) + case ObjectSegment(): + result = self._truncate_object(segment.value, target_size) + case _: + raise AssertionError("this should be unreachable.") return _PartResult( value=segment.model_copy(update={"value": result.value}), @@ -219,40 +220,41 @@ class VariableTruncator(BaseTruncator): return VariableTruncator.calculate_json_size(value.model_dump(), depth=depth + 1) if depth > _MAX_DEPTH: raise MaxDepthExceededError() - if isinstance(value, str): - # Ideally, the size of strings should be calculated based on their utf-8 encoded length. - # However, this adds complexity as we would need to compute encoded sizes consistently - # throughout the code. Therefore, we approximate the size using the string's length. - # Rough estimate: number of characters, plus 2 for quotes - return len(value) + 2 - elif isinstance(value, (int, float)): - return len(str(value)) - elif isinstance(value, bool): - return 4 if value else 5 # "true" or "false" - elif value is None: - return 4 # "null" - elif isinstance(value, list): - # Size = sum of elements + separators + brackets - total = 2 # "[]" - for i, item in enumerate(value): - if i > 0: - total += 1 # "," - total += VariableTruncator.calculate_json_size(item, depth=depth + 1) - return total - elif isinstance(value, dict): - # Size = sum of keys + values + separators + brackets - total = 2 # "{}" - for index, key in enumerate(value.keys()): - if index > 0: - total += 1 # "," - total += VariableTruncator.calculate_json_size(str(key), depth=depth + 1) # Key as string - total += 1 # ":" - total += VariableTruncator.calculate_json_size(value[key], depth=depth + 1) - return total - elif isinstance(value, File): - return VariableTruncator.calculate_json_size(value.model_dump(), depth=depth + 1) - else: - raise UnknownTypeError(f"got unknown type {type(value)}") + match value: + case str(): + # Ideally, the size of strings should be calculated based on their utf-8 encoded length. + # However, this adds complexity as we would need to compute encoded sizes consistently + # throughout the code. Therefore, we approximate the size using the string's length. + # Rough estimate: number of characters, plus 2 for quotes + return len(value) + 2 + case bool(): + return 4 if value else 5 # "true" or "false" + case int() | float(): + return len(str(value)) + case None: + return 4 # "null" + case list(): + # Size = sum of elements + separators + brackets + total = 2 # "[]" + for i, item in enumerate(value): + if i > 0: + total += 1 # "," + total += VariableTruncator.calculate_json_size(item, depth=depth + 1) + return total + case dict(): + # Size = sum of keys + values + separators + brackets + total = 2 # "{}" + for index, key in enumerate(value.keys()): + if index > 0: + total += 1 # "," + total += VariableTruncator.calculate_json_size(str(key), depth=depth + 1) # Key as string + total += 1 # ":" + total += VariableTruncator.calculate_json_size(value[key], depth=depth + 1) + return total + case File(): + return VariableTruncator.calculate_json_size(value.model_dump(), depth=depth + 1) + case _: + raise UnknownTypeError(f"got unknown type {type(value)}") def _truncate_string(self, value: str, target_size: int) -> _PartResult[str]: if (size := self.calculate_json_size(value)) < target_size: @@ -419,22 +421,23 @@ class VariableTruncator(BaseTruncator): target_size: int, ) -> _PartResult[Any]: """Truncate a value within an object to fit within budget.""" - if isinstance(val, UpdatedVariable): - # TODO(Workflow): push UpdatedVariable normalization closer to its producer. - return self._truncate_object(val.model_dump(), target_size) - elif isinstance(val, str): - return self._truncate_string(val, target_size) - elif isinstance(val, list): - return self._truncate_array(val, target_size) - elif isinstance(val, dict): - return self._truncate_object(val, target_size) - elif isinstance(val, File): - # File objects should not be truncated, return as-is - return _PartResult(val, self.calculate_json_size(val), False) - elif val is None or isinstance(val, (bool, int, float)): - return _PartResult(val, self.calculate_json_size(val), False) - else: - raise AssertionError("this statement should be unreachable.") + match val: + case UpdatedVariable(): + # TODO(Workflow): push UpdatedVariable normalization closer to its producer. + return self._truncate_object(val.model_dump(), target_size) + case str(): + return self._truncate_string(val, target_size) + case list(): + return self._truncate_array(val, target_size) + case dict(): + return self._truncate_object(val, target_size) + case File(): + # File objects should not be truncated, return as-is + return _PartResult(val, self.calculate_json_size(val), False) + case None | bool() | int() | float(): + return _PartResult(val, self.calculate_json_size(val), False) + case _: + raise AssertionError("this statement should be unreachable.") class DummyVariableTruncator(BaseTruncator): diff --git a/api/services/workflow_draft_variable_service.py b/api/services/workflow_draft_variable_service.py index 96f936ff9b..59db147576 100644 --- a/api/services/workflow_draft_variable_service.py +++ b/api/services/workflow_draft_variable_service.py @@ -157,8 +157,8 @@ class DraftVarLoader(VariableLoader): # This approach reduces loading time by querying external systems concurrently. with ThreadPoolExecutor(max_workers=10) as executor: offloaded_variables = executor.map(self._load_offloaded_variable, offloaded_draft_vars) - for selector, variable in offloaded_variables: - variable_by_selector[selector] = variable + for selector, offloaded_variable in offloaded_variables: + variable_by_selector[selector] = offloaded_variable return list(variable_by_selector.values()) @@ -1083,10 +1083,9 @@ class DraftVariableSaver: mimetype=content_type, user=self._user, ) - + assert self._user.current_tenant_id # Create WorkflowDraftVariableFile record variable_file = WorkflowDraftVariableFile( - id=uuidv7(), upload_file_id=upload_file.id, size=original_size, length=original_length, @@ -1095,6 +1094,7 @@ class DraftVariableSaver: tenant_id=self._user.current_tenant_id, user_id=self._user.id, ) + variable_file.id = str(uuidv7()) engine = bind = self._session.get_bind() assert isinstance(engine, Engine) with sessionmaker(bind=engine, expire_on_commit=False).begin() as session: diff --git a/api/services/workflow_service.py b/api/services/workflow_service.py index f97b85dc2b..eb78e0a68b 100644 --- a/api/services/workflow_service.py +++ b/api/services/workflow_service.py @@ -1066,8 +1066,13 @@ class WorkflowService: ) rendered_content = node.render_form_content_before_submission() + selected_action = next( + (user_action for user_action in node_data.user_actions if user_action.id == action), + None, + ) outputs: dict[str, Any] = dict(form_inputs) outputs["__action_id"] = action + outputs["__action_value"] = selected_action.title if selected_action else "" outputs["__rendered_content"] = node.render_form_content_with_outputs( rendered_content, outputs, node_data.outputs_field_names() ) @@ -1251,7 +1256,7 @@ class WorkflowService: node_data = HumanInputNode.validate_node_data(adapt_human_input_node_data_for_graph(node_config["data"])) node = HumanInputNode( node_id=node_config["id"], - config=node_data, + data=node_data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, runtime=DifyHumanInputNodeRuntime(run_context), diff --git a/api/tasks/ops_trace_task.py b/api/tasks/ops_trace_task.py index c95b8db078..49fe68ad7e 100644 --- a/api/tasks/ops_trace_task.py +++ b/api/tasks/ops_trace_task.py @@ -1,11 +1,31 @@ +""" +Celery task for asynchronous ops trace dispatch. + +Trace providers may report explicitly retryable dispatch failures through the +core retryable exception contract. The task preserves the payload file only +when Celery accepts the retry request; successful dispatches and terminal +failures clean up the stored payload. + +One concrete producer today is Phoenix nested workflow tracing. The outer +workflow tool span publishes a restorable parent span context asynchronously, +while the nested workflow trace may be picked up by Celery first. In that +ordering window, the provider raises a retryable core exception instead of +dropping the trace or emitting it under the wrong parent. The task intentionally +does not know that the provider is Phoenix; it only honors the core retryable +dispatch contract. +""" + import json import logging from celery import shared_task +from celery.exceptions import Retry from flask import current_app +from configs import dify_config from core.ops.entities.config_entity import OPS_FILE_PATH, OPS_TRACE_FAILED_KEY from core.ops.entities.trace_entity import trace_info_info_map +from core.ops.exceptions import RetryableTraceDispatchError from core.rag.models.document import Document from extensions.ext_redis import redis_client from extensions.ext_storage import storage @@ -14,9 +34,17 @@ from models.workflow import WorkflowRun logger = logging.getLogger(__name__) +_RETRYABLE_TRACE_DISPATCH_LIMIT = dify_config.OPS_TRACE_RETRYABLE_DISPATCH_MAX_RETRIES +_RETRYABLE_TRACE_DISPATCH_DELAY_SECONDS = dify_config.OPS_TRACE_RETRYABLE_DISPATCH_DELAY_SECONDS -@shared_task(queue="ops_trace") -def process_trace_tasks(file_info): + +@shared_task( + queue="ops_trace", + bind=True, + max_retries=_RETRYABLE_TRACE_DISPATCH_LIMIT, + default_retry_delay=_RETRYABLE_TRACE_DISPATCH_DELAY_SECONDS, +) +def process_trace_tasks(self, file_info): """ Async process trace tasks Usage: process_trace_tasks.delay(tasks_data) @@ -29,6 +57,7 @@ def process_trace_tasks(file_info): file_data = json.loads(storage.load(file_path)) trace_info = file_data.get("trace_info") trace_info_type = file_data.get("trace_info_type") + enterprise_trace_dispatched = bool(file_data.get("_enterprise_trace_dispatched")) trace_instance = OpsTraceManager.get_ops_trace_instance(app_id) if trace_info.get("message_data"): @@ -38,6 +67,8 @@ def process_trace_tasks(file_info): if trace_info.get("documents"): trace_info["documents"] = [Document.model_validate(doc) for doc in trace_info["documents"]] + should_delete_file = True + try: trace_type = trace_info_info_map.get(trace_info_type) if trace_type: @@ -45,30 +76,66 @@ def process_trace_tasks(file_info): from extensions.ext_enterprise_telemetry import is_enabled as is_ee_telemetry_enabled - if is_ee_telemetry_enabled(): + if is_ee_telemetry_enabled() and not enterprise_trace_dispatched: from enterprise.telemetry.enterprise_trace import EnterpriseOtelTrace try: EnterpriseOtelTrace().trace(trace_info) except Exception: logger.exception("Enterprise trace failed for app_id: %s", app_id) + else: + file_data["_enterprise_trace_dispatched"] = True + enterprise_trace_dispatched = True if trace_instance: with current_app.app_context(): trace_instance.trace(trace_info) logger.info("Processing trace tasks success, app_id: %s", app_id) + except RetryableTraceDispatchError as e: + # Retryable dispatch failures represent a transient provider-side + # ordering gap, not corrupt payload data. Keep the payload only after + # Celery accepts the retry request; otherwise this attempt becomes a + # terminal failure and the stored file is cleaned up in `finally`. + # + # Enterprise telemetry runs before provider dispatch. If it already ran + # and provider dispatch asks for a retry, persist that private flag so + # the next attempt does not emit the same enterprise trace twice. + if self.request.retries >= _RETRYABLE_TRACE_DISPATCH_LIMIT: + logger.exception("Retryable trace dispatch budget exhausted, app_id: %s", app_id) + failed_key = f"{OPS_TRACE_FAILED_KEY}_{app_id}" + redis_client.incr(failed_key) + else: + logger.warning( + "Retryable trace dispatch failure, scheduling retry %s/%s for app_id %s: %s", + self.request.retries + 1, + _RETRYABLE_TRACE_DISPATCH_LIMIT, + app_id, + e, + ) + try: + if enterprise_trace_dispatched: + storage.save(file_path, json.dumps(file_data).encode("utf-8")) + raise self.retry(exc=e, countdown=_RETRYABLE_TRACE_DISPATCH_DELAY_SECONDS) + except Retry: + should_delete_file = False + raise + except Exception: + logger.exception("Failed to schedule trace dispatch retry, app_id: %s", app_id) + failed_key = f"{OPS_TRACE_FAILED_KEY}_{app_id}" + redis_client.incr(failed_key) except Exception as e: logger.exception("Processing trace tasks failed, app_id: %s", app_id) failed_key = f"{OPS_TRACE_FAILED_KEY}_{app_id}" redis_client.incr(failed_key) finally: - try: - storage.delete(file_path) - except Exception as e: - logger.warning( - "Failed to delete trace file %s for app_id %s: %s", - file_path, - app_id, - e, - ) + if should_delete_file: + try: + storage.delete(file_path) + except Exception as e: + logger.warning( + "Failed to delete trace file %s for app_id %s: %s", + file_path, + app_id, + e, + ) diff --git a/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py b/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py index 3b5e822b90..90131fe98d 100644 --- a/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py +++ b/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py @@ -13,7 +13,7 @@ from controllers.console.app import wraps from libs.datetime_utils import naive_utc_now from models import App, Tenant from models.account import Account, TenantAccountJoin, TenantAccountRole -from models.enums import ConversationFromSource +from models.enums import AppStatus, ConversationFromSource from models.model import AppMode from services.app_generate_service import AppGenerateService @@ -28,7 +28,7 @@ class TestChatMessageApiPermissions: app.id = str(uuid.uuid4()) app.mode = AppMode.CHAT app.tenant_id = str(uuid.uuid4()) - app.status = "normal" + app.status = AppStatus.NORMAL return app @pytest.fixture @@ -78,7 +78,7 @@ class TestChatMessageApiPermissions: self, test_client: FlaskClient, auth_header, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account, role: TenantAccountRole, @@ -130,7 +130,7 @@ class TestChatMessageApiPermissions: self, test_client: FlaskClient, auth_header, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account, role: TenantAccountRole, diff --git a/api/tests/integration_tests/controllers/console/app/test_feedback_export_api.py b/api/tests/integration_tests/controllers/console/app/test_feedback_export_api.py index 309a0b015a..c4db0d5111 100644 --- a/api/tests/integration_tests/controllers/console/app/test_feedback_export_api.py +++ b/api/tests/integration_tests/controllers/console/app/test_feedback_export_api.py @@ -14,7 +14,7 @@ from controllers.console.app import wraps from libs.datetime_utils import naive_utc_now from models import App, Tenant from models.account import Account, TenantAccountJoin, TenantAccountRole -from models.enums import FeedbackFromSource, FeedbackRating +from models.enums import AppStatus, FeedbackFromSource, FeedbackRating from models.model import AppMode, MessageFeedback from services.feedback_service import FeedbackService @@ -29,7 +29,7 @@ class TestFeedbackExportApi: app.id = str(uuid.uuid4()) app.mode = AppMode.CHAT app.tenant_id = str(uuid.uuid4()) - app.status = "normal" + app.status = AppStatus.NORMAL app.name = "Test App" return app @@ -135,7 +135,7 @@ class TestFeedbackExportApi: self, test_client: FlaskClient, auth_header, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account, role: TenantAccountRole, @@ -167,7 +167,13 @@ class TestFeedbackExportApi: mock_export_feedbacks.assert_called_once() def test_feedback_export_csv_format( - self, test_client: FlaskClient, auth_header, monkeypatch, mock_app_model, mock_account, sample_feedback_data + self, + test_client: FlaskClient, + auth_header, + monkeypatch: pytest.MonkeyPatch, + mock_app_model, + mock_account, + sample_feedback_data, ): """Test feedback export in CSV format.""" @@ -202,7 +208,13 @@ class TestFeedbackExportApi: assert "text/csv" in response.content_type def test_feedback_export_json_format( - self, test_client: FlaskClient, auth_header, monkeypatch, mock_app_model, mock_account, sample_feedback_data + self, + test_client: FlaskClient, + auth_header, + monkeypatch: pytest.MonkeyPatch, + mock_app_model, + mock_account, + sample_feedback_data, ): """Test feedback export in JSON format.""" @@ -246,7 +258,7 @@ class TestFeedbackExportApi: assert "application/json" in response.content_type def test_feedback_export_with_filters( - self, test_client: FlaskClient, auth_header, monkeypatch, mock_app_model, mock_account + self, test_client: FlaskClient, auth_header, monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account ): """Test feedback export with various filters.""" @@ -287,7 +299,7 @@ class TestFeedbackExportApi: ) def test_feedback_export_invalid_date_format( - self, test_client: FlaskClient, auth_header, monkeypatch, mock_app_model, mock_account + self, test_client: FlaskClient, auth_header, monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account ): """Test feedback export with invalid date format.""" @@ -312,7 +324,7 @@ class TestFeedbackExportApi: assert "Parameter validation error" in response_json["error"] def test_feedback_export_server_error( - self, test_client: FlaskClient, auth_header, monkeypatch, mock_app_model, mock_account + self, test_client: FlaskClient, auth_header, monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account ): """Test feedback export with server error.""" diff --git a/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py b/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py index 04945e57a0..ab08c7a6d8 100644 --- a/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py +++ b/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py @@ -11,6 +11,7 @@ from controllers.console.app import wraps from libs.datetime_utils import naive_utc_now from models import App, Tenant from models.account import Account, TenantAccountJoin, TenantAccountRole +from models.enums import AppStatus from models.model import AppMode from services.app_model_config_service import AppModelConfigService @@ -25,7 +26,7 @@ class TestModelConfigResourcePermissions: app.id = str(uuid.uuid4()) app.mode = AppMode.CHAT app.tenant_id = str(uuid.uuid4()) - app.status = "normal" + app.status = AppStatus.NORMAL app.app_model_config_id = str(uuid.uuid4()) return app @@ -73,7 +74,7 @@ class TestModelConfigResourcePermissions: self, test_client: FlaskClient, auth_header, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account, role: TenantAccountRole, diff --git a/api/tests/integration_tests/core/datasource/test_datasource_manager_integration.py b/api/tests/integration_tests/core/datasource/test_datasource_manager_integration.py index a876b0c4aa..7d0b575262 100644 --- a/api/tests/integration_tests/core/datasource/test_datasource_manager_integration.py +++ b/api/tests/integration_tests/core/datasource/test_datasource_manager_integration.py @@ -1,5 +1,7 @@ from collections.abc import Generator +from pytest_mock import MockerFixture + from core.datasource.datasource_manager import DatasourceManager from core.datasource.entities.datasource_entities import DatasourceMessage from graphon.node_events import StreamCompletedEvent @@ -19,7 +21,7 @@ def _gen_var_stream() -> Generator[DatasourceMessage, None, None]: ) -def test_stream_node_events_accumulates_variables(mocker): +def test_stream_node_events_accumulates_variables(mocker: MockerFixture): mocker.patch.object(DatasourceManager, "stream_online_results", return_value=_gen_var_stream()) events = list( DatasourceManager.stream_node_events( diff --git a/api/tests/integration_tests/core/workflow/nodes/datasource/test_datasource_node_integration.py b/api/tests/integration_tests/core/workflow/nodes/datasource/test_datasource_node_integration.py index 2392084c36..b9f09ccadd 100644 --- a/api/tests/integration_tests/core/workflow/nodes/datasource/test_datasource_node_integration.py +++ b/api/tests/integration_tests/core/workflow/nodes/datasource/test_datasource_node_integration.py @@ -1,3 +1,5 @@ +from pytest_mock import MockerFixture + from core.app.entities.app_invoke_entities import DIFY_RUN_CONTEXT_KEY from core.workflow.nodes.datasource.datasource_node import DatasourceNode from core.workflow.nodes.datasource.entities import DatasourceNodeData @@ -44,7 +46,7 @@ class _GP: call_depth = 0 -def test_node_integration_minimal_stream(mocker): +def test_node_integration_minimal_stream(mocker: MockerFixture): sys_d = { "sys": { "datasource_type": "online_document", @@ -71,7 +73,7 @@ def test_node_integration_minimal_stream(mocker): node = DatasourceNode( node_id="n", - config=DatasourceNodeData( + data=DatasourceNodeData( type="datasource", version="1", title="Datasource", diff --git a/api/tests/integration_tests/workflow/nodes/__mock/model.py b/api/tests/integration_tests/workflow/nodes/__mock/model.py index a9a2617bae..a77fe5970a 100644 --- a/api/tests/integration_tests/workflow/nodes/__mock/model.py +++ b/api/tests/integration_tests/workflow/nodes/__mock/model.py @@ -4,7 +4,7 @@ from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEnti from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle from core.entities.provider_entities import CustomConfiguration, CustomProviderConfiguration, SystemConfiguration from core.model_manager import ModelInstance -from core.plugin.impl.model_runtime_factory import create_plugin_model_provider_factory +from core.plugin.impl.model_runtime_factory import create_plugin_model_assembly from graphon.model_runtime.entities.model_entities import ModelType from models.provider import ProviderType @@ -15,8 +15,9 @@ def get_mocked_fetch_model_config( mode: str, credentials: dict, ): - model_provider_factory = create_plugin_model_provider_factory(tenant_id="9d2074fc-6f86-45a9-b09d-6ecc63b9056b") - model_type_instance = model_provider_factory.get_model_type_instance(provider, ModelType.LLM) + model_assembly = create_plugin_model_assembly(tenant_id="9d2074fc-6f86-45a9-b09d-6ecc63b9056b") + model_provider_factory = model_assembly.model_provider_factory + model_type_instance = model_assembly.create_model_type_instance(provider=provider, model_type=ModelType.LLM) provider_model_bundle = ProviderModelBundle( configuration=ProviderConfiguration( tenant_id="1", diff --git a/api/tests/integration_tests/workflow/nodes/test_code.py b/api/tests/integration_tests/workflow/nodes/test_code.py index aaa6092993..9345113aa3 100644 --- a/api/tests/integration_tests/workflow/nodes/test_code.py +++ b/api/tests/integration_tests/workflow/nodes/test_code.py @@ -45,7 +45,7 @@ def init_code_node(code_config: dict): ) # construct variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id="aaa", files=[]), user_inputs={}, environment_variables=[], @@ -66,7 +66,7 @@ def init_code_node(code_config: dict): node = CodeNode( node_id=str(uuid.uuid4()), - config=CodeNodeData.model_validate(code_config["data"]), + data=CodeNodeData.model_validate(code_config["data"]), graph_init_params=init_params, graph_runtime_state=graph_runtime_state, code_executor=node_factory._code_executor, diff --git a/api/tests/integration_tests/workflow/nodes/test_http.py b/api/tests/integration_tests/workflow/nodes/test_http.py index b9f7b9575b..7cd7f50b77 100644 --- a/api/tests/integration_tests/workflow/nodes/test_http.py +++ b/api/tests/integration_tests/workflow/nodes/test_http.py @@ -55,7 +55,7 @@ def init_http_node(config: dict): ) # construct variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id="aaa", files=[]), user_inputs={}, environment_variables=[], @@ -76,7 +76,7 @@ def init_http_node(config: dict): node = HttpRequestNode( node_id=str(uuid.uuid4()), - config=HttpRequestNodeData.model_validate(config["data"]), + data=HttpRequestNodeData.model_validate(config["data"]), graph_init_params=init_params, graph_runtime_state=graph_runtime_state, http_request_config=HTTP_REQUEST_CONFIG, @@ -204,7 +204,7 @@ def test_custom_auth_with_empty_api_key_raises_error(setup_http_mock): from graphon.runtime import VariablePool # Create variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id="test", files=[]), user_inputs={}, environment_variables=[], @@ -702,7 +702,7 @@ def test_nested_object_variable_selector(setup_http_mock): ) # Create independent variable pool for this test only - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id="aaa", files=[]), user_inputs={}, environment_variables=[], @@ -724,7 +724,7 @@ def test_nested_object_variable_selector(setup_http_mock): node = HttpRequestNode( node_id=str(uuid.uuid4()), - config=HttpRequestNodeData.model_validate(graph_config["nodes"][1]["data"]), + data=HttpRequestNodeData.model_validate(graph_config["nodes"][1]["data"]), graph_init_params=init_params, graph_runtime_state=graph_runtime_state, http_request_config=HTTP_REQUEST_CONFIG, diff --git a/api/tests/integration_tests/workflow/nodes/test_llm.py b/api/tests/integration_tests/workflow/nodes/test_llm.py index 3eead70163..5b7790f6f4 100644 --- a/api/tests/integration_tests/workflow/nodes/test_llm.py +++ b/api/tests/integration_tests/workflow/nodes/test_llm.py @@ -53,7 +53,7 @@ def init_llm_node(config: dict) -> LLMNode: ) # construct variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( user_id="aaa", app_id=app_id, @@ -77,7 +77,7 @@ def init_llm_node(config: dict) -> LLMNode: node = LLMNode( node_id=str(uuid.uuid4()), - config=LLMNodeData.model_validate(config["data"]), + data=LLMNodeData.model_validate(config["data"]), graph_init_params=init_params, graph_runtime_state=graph_runtime_state, credentials_provider=MagicMock(spec=CredentialsProvider), @@ -91,7 +91,11 @@ def init_llm_node(config: dict) -> LLMNode: return node -def test_execute_llm(): +def _mock_db_session_close(monkeypatch) -> None: + monkeypatch.setattr(db.session, "close", MagicMock()) + + +def test_execute_llm(monkeypatch): node = init_llm_node( config={ "id": "llm", @@ -118,7 +122,7 @@ def test_execute_llm(): }, ) - db.session.close = MagicMock() + _mock_db_session_close(monkeypatch) def build_mock_model_instance() -> MagicMock: from decimal import Decimal @@ -195,7 +199,7 @@ def test_execute_llm(): assert item.node_run_result.outputs.get("usage", {})["total_tokens"] > 0 -def test_execute_llm_with_jinja2(): +def test_execute_llm_with_jinja2(monkeypatch): """ Test execute LLM node with jinja2 """ @@ -233,8 +237,7 @@ def test_execute_llm_with_jinja2(): }, ) - # Mock db.session.close() - db.session.close = MagicMock() + _mock_db_session_close(monkeypatch) def build_mock_model_instance() -> MagicMock: from decimal import Decimal diff --git a/api/tests/integration_tests/workflow/nodes/test_parameter_extractor.py b/api/tests/integration_tests/workflow/nodes/test_parameter_extractor.py index f2eabb86c3..fc230a2a68 100644 --- a/api/tests/integration_tests/workflow/nodes/test_parameter_extractor.py +++ b/api/tests/integration_tests/workflow/nodes/test_parameter_extractor.py @@ -56,7 +56,7 @@ def init_parameter_extractor_node(config: dict, memory=None): ) # construct variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( user_id="aaa", files=[], query="what's the weather in SF", conversation_id="abababa" ), @@ -71,7 +71,7 @@ def init_parameter_extractor_node(config: dict, memory=None): node = ParameterExtractorNode( node_id=str(uuid.uuid4()), - config=ParameterExtractorNodeData.model_validate(config["data"]), + data=ParameterExtractorNodeData.model_validate(config["data"]), graph_init_params=init_params, graph_runtime_state=graph_runtime_state, credentials_provider=MagicMock(spec=CredentialsProvider), @@ -83,7 +83,11 @@ def init_parameter_extractor_node(config: dict, memory=None): return node -def test_function_calling_parameter_extractor(setup_model_mock): +def _mock_db_session_close(monkeypatch) -> None: + monkeypatch.setattr(db.session, "close", MagicMock()) + + +def test_function_calling_parameter_extractor(setup_model_mock, monkeypatch): """ Test function calling for parameter extractor. """ @@ -114,7 +118,7 @@ def test_function_calling_parameter_extractor(setup_model_mock): mode="chat", credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")}, )() - db.session.close = MagicMock() + _mock_db_session_close(monkeypatch) result = node._run() @@ -124,7 +128,7 @@ def test_function_calling_parameter_extractor(setup_model_mock): assert result.outputs.get("__reason") == None -def test_instructions(setup_model_mock): +def test_instructions(setup_model_mock, monkeypatch): """ Test chat parameter extractor. """ @@ -155,7 +159,7 @@ def test_instructions(setup_model_mock): mode="chat", credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")}, )() - db.session.close = MagicMock() + _mock_db_session_close(monkeypatch) result = node._run() @@ -174,7 +178,7 @@ def test_instructions(setup_model_mock): assert "what's the weather in SF" in prompt.get("text") -def test_chat_parameter_extractor(setup_model_mock): +def test_chat_parameter_extractor(setup_model_mock, monkeypatch): """ Test chat parameter extractor. """ @@ -205,7 +209,7 @@ def test_chat_parameter_extractor(setup_model_mock): mode="chat", credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")}, )() - db.session.close = MagicMock() + _mock_db_session_close(monkeypatch) result = node._run() @@ -225,7 +229,7 @@ def test_chat_parameter_extractor(setup_model_mock): assert '\n{"type": "object"' in prompt.get("text") -def test_completion_parameter_extractor(setup_model_mock): +def test_completion_parameter_extractor(setup_model_mock, monkeypatch): """ Test completion parameter extractor. """ @@ -256,7 +260,7 @@ def test_completion_parameter_extractor(setup_model_mock): mode="completion", credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")}, )() - db.session.close = MagicMock() + _mock_db_session_close(monkeypatch) result = node._run() @@ -350,7 +354,7 @@ def test_extract_json_from_tool_call(): assert result["location"] == "kawaii" -def test_chat_parameter_extractor_with_memory(setup_model_mock): +def test_chat_parameter_extractor_with_memory(setup_model_mock, monkeypatch): """ Test chat parameter extractor with memory. """ @@ -382,7 +386,7 @@ def test_chat_parameter_extractor_with_memory(setup_model_mock): mode="chat", credentials={"openai_api_key": os.environ.get("OPENAI_API_KEY")}, )() - db.session.close = MagicMock() + _mock_db_session_close(monkeypatch) result = node._run() diff --git a/api/tests/integration_tests/workflow/nodes/test_template_transform.py b/api/tests/integration_tests/workflow/nodes/test_template_transform.py index e2e0723fb8..80489e6809 100644 --- a/api/tests/integration_tests/workflow/nodes/test_template_transform.py +++ b/api/tests/integration_tests/workflow/nodes/test_template_transform.py @@ -66,7 +66,7 @@ def test_execute_template_transform(): ) # construct variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id="aaa", files=[]), user_inputs={}, environment_variables=[], @@ -88,7 +88,7 @@ def test_execute_template_transform(): node = TemplateTransformNode( node_id=str(uuid.uuid4()), - config=TemplateTransformNodeData.model_validate(config["data"]), + data=TemplateTransformNodeData.model_validate(config["data"]), graph_init_params=init_params, graph_runtime_state=graph_runtime_state, jinja2_template_renderer=_SimpleJinja2Renderer(), diff --git a/api/tests/integration_tests/workflow/nodes/test_tool.py b/api/tests/integration_tests/workflow/nodes/test_tool.py index a8e9422c1e..78c12e7ea5 100644 --- a/api/tests/integration_tests/workflow/nodes/test_tool.py +++ b/api/tests/integration_tests/workflow/nodes/test_tool.py @@ -2,6 +2,8 @@ import time import uuid from unittest.mock import MagicMock, patch +import pytest + from core.app.entities.app_invoke_entities import InvokeFrom, UserFrom from core.tools.utils.configuration import ToolParameterConfigurationManager from core.workflow.node_factory import DifyNodeFactory @@ -41,7 +43,7 @@ def init_tool_node(config: dict): ) # construct variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id="aaa", files=[]), user_inputs={}, environment_variables=[], @@ -62,7 +64,7 @@ def init_tool_node(config: dict): node = ToolNode( node_id=str(uuid.uuid4()), - config=ToolNodeData.model_validate(config["data"]), + data=ToolNodeData.model_validate(config["data"]), graph_init_params=init_params, graph_runtime_state=graph_runtime_state, tool_file_manager_factory=tool_file_manager_factory, @@ -71,7 +73,7 @@ def init_tool_node(config: dict): return node -def test_tool_variable_invoke(monkeypatch): +def test_tool_variable_invoke(monkeypatch: pytest.MonkeyPatch): node = init_tool_node( config={ "id": "1", @@ -106,7 +108,7 @@ def test_tool_variable_invoke(monkeypatch): assert item.node_run_result.outputs.get("text") is not None -def test_tool_mixed_invoke(monkeypatch): +def test_tool_mixed_invoke(monkeypatch: pytest.MonkeyPatch): node = init_tool_node( config={ "id": "1", diff --git a/api/tests/test_containers_integration_tests/conftest.py b/api/tests/test_containers_integration_tests/conftest.py index 66a25e5daf..b4482674da 100644 --- a/api/tests/test_containers_integration_tests/conftest.py +++ b/api/tests/test_containers_integration_tests/conftest.py @@ -433,7 +433,7 @@ def flask_app_with_containers(set_up_containers_and_env) -> Flask: @pytest.fixture -def flask_req_ctx_with_containers(flask_app_with_containers) -> Generator[None, None, None]: +def flask_req_ctx_with_containers(flask_app_with_containers: Flask) -> Generator[None, None, None]: """ Request context fixture for containerized Flask application. @@ -454,7 +454,7 @@ def flask_req_ctx_with_containers(flask_app_with_containers) -> Generator[None, @pytest.fixture -def test_client_with_containers(flask_app_with_containers) -> Generator[FlaskClient, None, None]: +def test_client_with_containers(flask_app_with_containers: Flask) -> Generator[FlaskClient, None, None]: """ Test client fixture for containerized Flask application. @@ -475,7 +475,7 @@ def test_client_with_containers(flask_app_with_containers) -> Generator[FlaskCli @pytest.fixture -def db_session_with_containers(flask_app_with_containers) -> Generator[Session, None, None]: +def db_session_with_containers(flask_app_with_containers: Flask) -> Generator[Session, None, None]: """ Database session fixture for containerized testing. diff --git a/api/tests/test_containers_integration_tests/controllers/console/app/test_app_apis.py b/api/tests/test_containers_integration_tests/controllers/console/app/test_app_apis.py index 18755ef012..bb737754a1 100644 --- a/api/tests/test_containers_integration_tests/controllers/console/app/test_app_apis.py +++ b/api/tests/test_containers_integration_tests/controllers/console/app/test_app_apis.py @@ -7,6 +7,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock, patch import pytest +from flask import Flask from pydantic import ValidationError from werkzeug.exceptions import BadRequest, NotFound @@ -69,7 +70,7 @@ def _unwrap(func): class TestCompletionEndpoints: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers def test_completion_create_payload(self): @@ -86,7 +87,7 @@ class TestCompletionEndpoints: ) assert payload.query == "hi" - def test_completion_api_success(self, app, monkeypatch): + def test_completion_api_success(self, app: Flask, monkeypatch: pytest.MonkeyPatch): api = completion_module.CompletionMessageApi() method = _unwrap(api.post) @@ -116,7 +117,7 @@ class TestCompletionEndpoints: assert resp == {"result": {"text": "ok"}} - def test_completion_api_conversation_not_exists(self, app, monkeypatch): + def test_completion_api_conversation_not_exists(self, app: Flask, monkeypatch: pytest.MonkeyPatch): api = completion_module.CompletionMessageApi() method = _unwrap(api.post) @@ -142,7 +143,7 @@ class TestCompletionEndpoints: with pytest.raises(NotFound): method(app_model=MagicMock(id="app-1")) - def test_completion_api_provider_not_initialized(self, app, monkeypatch): + def test_completion_api_provider_not_initialized(self, app: Flask, monkeypatch: pytest.MonkeyPatch): api = completion_module.CompletionMessageApi() method = _unwrap(api.post) @@ -166,7 +167,7 @@ class TestCompletionEndpoints: with pytest.raises(completion_module.ProviderNotInitializeError): method(app_model=MagicMock(id="app-1")) - def test_completion_api_quota_exceeded(self, app, monkeypatch): + def test_completion_api_quota_exceeded(self, app: Flask, monkeypatch: pytest.MonkeyPatch): api = completion_module.CompletionMessageApi() method = _unwrap(api.post) @@ -193,10 +194,10 @@ class TestCompletionEndpoints: class TestAppEndpoints: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_app_put_should_preserve_icon_type_when_payload_omits_it(self, app, monkeypatch): + def test_app_put_should_preserve_icon_type_when_payload_omits_it(self, app: Flask, monkeypatch: pytest.MonkeyPatch): api = app_module.AppApi() method = _unwrap(api.put) payload = { @@ -234,7 +235,7 @@ class TestAppEndpoints: } ) - def test_app_icon_post_should_forward_icon_type(self, app, monkeypatch): + def test_app_icon_post_should_forward_icon_type(self, app: Flask, monkeypatch: pytest.MonkeyPatch): api = app_module.AppIconApi() method = _unwrap(api.post) payload = { @@ -266,7 +267,7 @@ class TestAppEndpoints: class TestOpsTraceEndpoints: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers def test_ops_trace_query_basic(self): @@ -277,7 +278,7 @@ class TestOpsTraceEndpoints: payload = TraceConfigPayload(tracing_provider="langfuse", tracing_config={"api_key": "k"}) assert payload.tracing_config["api_key"] == "k" - def test_trace_app_config_get_empty(self, app, monkeypatch): + def test_trace_app_config_get_empty(self, app: Flask, monkeypatch: pytest.MonkeyPatch): api = ops_trace_module.TraceAppConfigApi() method = _unwrap(api.get) @@ -292,7 +293,7 @@ class TestOpsTraceEndpoints: assert result == {"has_not_configured": True} - def test_trace_app_config_post_invalid(self, app, monkeypatch): + def test_trace_app_config_post_invalid(self, app: Flask, monkeypatch: pytest.MonkeyPatch): api = ops_trace_module.TraceAppConfigApi() method = _unwrap(api.post) @@ -309,7 +310,7 @@ class TestOpsTraceEndpoints: with pytest.raises(BadRequest): method(app_id="app-1") - def test_trace_app_config_delete_not_found(self, app, monkeypatch): + def test_trace_app_config_delete_not_found(self, app: Flask, monkeypatch: pytest.MonkeyPatch): api = ops_trace_module.TraceAppConfigApi() method = _unwrap(api.delete) @@ -326,7 +327,7 @@ class TestOpsTraceEndpoints: class TestSiteEndpoints: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers def test_site_response_structure(self): @@ -337,7 +338,7 @@ class TestSiteEndpoints: payload = AppSiteUpdatePayload(default_language="en-US") assert payload.default_language == "en-US" - def test_app_site_update_post(self, app, monkeypatch): + def test_app_site_update_post(self, app: Flask, monkeypatch: pytest.MonkeyPatch): api = site_module.AppSite() method = _unwrap(api.post) @@ -375,7 +376,7 @@ class TestSiteEndpoints: assert isinstance(result, dict) assert result["title"] == "My Site" - def test_app_site_access_token_reset(self, app, monkeypatch): + def test_app_site_access_token_reset(self, app: Flask, monkeypatch: pytest.MonkeyPatch): api = site_module.AppSiteAccessTokenReset() method = _unwrap(api.post) @@ -427,7 +428,7 @@ class TestWorkflowEndpoints: class TestWorkflowAppLogEndpoints: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers def test_workflow_app_log_query(self): @@ -438,7 +439,7 @@ class TestWorkflowAppLogEndpoints: query = WorkflowAppLogQuery(detail="true") assert query.detail is True - def test_workflow_app_log_api_get(self, app, monkeypatch): + def test_workflow_app_log_api_get(self, app: Flask, monkeypatch: pytest.MonkeyPatch): api = workflow_app_log_module.WorkflowAppLogApi() method = _unwrap(api.get) @@ -477,14 +478,14 @@ class TestWorkflowAppLogEndpoints: class TestWorkflowDraftVariableEndpoints: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers def test_workflow_variable_creation(self): payload = WorkflowDraftVariableUpdatePayload(name="var1", value="test") assert payload.name == "var1" - def test_workflow_variable_collection_get(self, app, monkeypatch): + def test_workflow_variable_collection_get(self, app: Flask, monkeypatch: pytest.MonkeyPatch): api = workflow_draft_variable_module.WorkflowVariableCollectionApi() method = _unwrap(api.get) @@ -529,7 +530,7 @@ class TestWorkflowDraftVariableEndpoints: class TestWorkflowStatisticEndpoints: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers def test_workflow_statistic_time_range(self): @@ -541,7 +542,7 @@ class TestWorkflowStatisticEndpoints: assert query.start is None assert query.end is None - def test_workflow_daily_runs_statistic(self, app, monkeypatch): + def test_workflow_daily_runs_statistic(self, app: Flask, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(workflow_statistic_module, "db", SimpleNamespace(engine=MagicMock())) monkeypatch.setattr( workflow_statistic_module.DifyAPIRepositoryFactory, @@ -567,7 +568,7 @@ class TestWorkflowStatisticEndpoints: assert response.get_json() == {"data": [{"date": "2024-01-01"}]} - def test_workflow_daily_terminals_statistic(self, app, monkeypatch): + def test_workflow_daily_terminals_statistic(self, app: Flask, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(workflow_statistic_module, "db", SimpleNamespace(engine=MagicMock())) monkeypatch.setattr( workflow_statistic_module.DifyAPIRepositoryFactory, @@ -598,7 +599,7 @@ class TestWorkflowStatisticEndpoints: class TestWorkflowTriggerEndpoints: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers def test_webhook_trigger_payload(self): @@ -608,7 +609,7 @@ class TestWorkflowTriggerEndpoints: enable_payload = ParserEnable(trigger_id="trigger-1", enable_trigger=True) assert enable_payload.enable_trigger is True - def test_webhook_trigger_api_get(self, app, monkeypatch): + def test_webhook_trigger_api_get(self, app: Flask, monkeypatch: pytest.MonkeyPatch): api = workflow_trigger_module.WebhookTriggerApi() method = _unwrap(api.get) diff --git a/api/tests/test_containers_integration_tests/controllers/console/app/test_app_import_api.py b/api/tests/test_containers_integration_tests/controllers/console/app/test_app_import_api.py index 25d19cf35a..bcb6e41ef7 100644 --- a/api/tests/test_containers_integration_tests/controllers/console/app/test_app_import_api.py +++ b/api/tests/test_containers_integration_tests/controllers/console/app/test_app_import_api.py @@ -6,6 +6,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock import pytest +from flask import Flask from controllers.console.app import app_import as app_import_module from services.app_dsl_service import ImportStatus @@ -36,10 +37,10 @@ def _install_features(monkeypatch: pytest.MonkeyPatch, enabled: bool) -> None: class TestAppImportApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_import_post_returns_failed_status(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_import_post_returns_failed_status(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: api = app_import_module.AppImportApi() method = _unwrap(api.post) @@ -57,7 +58,7 @@ class TestAppImportApi: assert status == 400 assert response["status"] == ImportStatus.FAILED - def test_import_post_returns_pending_status(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_import_post_returns_pending_status(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: api = app_import_module.AppImportApi() method = _unwrap(api.post) @@ -75,7 +76,7 @@ class TestAppImportApi: assert status == 202 assert response["status"] == ImportStatus.PENDING - def test_import_post_updates_webapp_auth_when_enabled(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_import_post_updates_webapp_auth_when_enabled(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: api = app_import_module.AppImportApi() method = _unwrap(api.post) @@ -96,7 +97,7 @@ class TestAppImportApi: assert status == 200 assert response["status"] == ImportStatus.COMPLETED - def test_import_post_commits_session_on_success(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_import_post_commits_session_on_success(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: api = app_import_module.AppImportApi() method = _unwrap(api.post) @@ -121,7 +122,7 @@ class TestAppImportApi: assert status == 200 assert response["status"] == ImportStatus.COMPLETED - def test_import_post_rolls_back_session_on_failure(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_import_post_rolls_back_session_on_failure(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: api = app_import_module.AppImportApi() method = _unwrap(api.post) @@ -149,10 +150,10 @@ class TestAppImportApi: class TestAppImportConfirmApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_import_confirm_returns_failed_status(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_import_confirm_returns_failed_status(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: api = app_import_module.AppImportConfirmApi() method = _unwrap(api.post) @@ -172,10 +173,10 @@ class TestAppImportConfirmApi: class TestAppImportCheckDependenciesApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_import_check_dependencies_returns_result(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_import_check_dependencies_returns_result(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: api = app_import_module.AppImportCheckDependenciesApi() method = _unwrap(api.get) diff --git a/api/tests/test_containers_integration_tests/controllers/console/app/test_workflow_draft_variable.py b/api/tests/test_containers_integration_tests/controllers/console/app/test_workflow_draft_variable.py index 290be87697..a071d22ee9 100644 --- a/api/tests/test_containers_integration_tests/controllers/console/app/test_workflow_draft_variable.py +++ b/api/tests/test_containers_integration_tests/controllers/console/app/test_workflow_draft_variable.py @@ -168,6 +168,7 @@ def test_node_variable_collection_get_success( account, tenant = create_console_account_and_tenant(db_session_with_containers) app = create_console_app(db_session_with_containers, tenant.id, account.id, AppMode.WORKFLOW) node_variable = _create_node_variable(db_session_with_containers, app.id, account.id, node_id="node_123") + node_variable_id = node_variable.id _create_node_variable(db_session_with_containers, app.id, account.id, node_id="node_456", name="other") response = test_client_with_containers.get( @@ -178,7 +179,7 @@ def test_node_variable_collection_get_success( assert response.status_code == 200 payload = response.get_json() assert payload is not None - assert [item["id"] for item in payload["items"]] == [node_variable.id] + assert [item["id"] for item in payload["items"]] == [node_variable_id] def test_node_variable_collection_get_invalid_node_id( @@ -377,6 +378,7 @@ def test_system_variable_collection_get( account, tenant = create_console_account_and_tenant(db_session_with_containers) app = create_console_app(db_session_with_containers, tenant.id, account.id, AppMode.WORKFLOW) variable = _create_system_variable(db_session_with_containers, app.id, account.id) + variable_id = variable.id response = test_client_with_containers.get( f"/console/api/apps/{app.id}/workflows/draft/system-variables", @@ -386,7 +388,7 @@ def test_system_variable_collection_get( assert response.status_code == 200 payload = response.get_json() assert payload is not None - assert [item["id"] for item in payload["items"]] == [variable.id] + assert [item["id"] for item in payload["items"]] == [variable_id] def test_environment_variable_collection_get( diff --git a/api/tests/test_containers_integration_tests/controllers/console/auth/test_data_source_oauth.py b/api/tests/test_containers_integration_tests/controllers/console/auth/test_data_source_oauth.py index 81b5423261..f2c45f76da 100644 --- a/api/tests/test_containers_integration_tests/controllers/console/auth/test_data_source_oauth.py +++ b/api/tests/test_containers_integration_tests/controllers/console/auth/test_data_source_oauth.py @@ -17,6 +17,8 @@ def test_get_oauth_url_successful( test_client_with_containers: FlaskClient, ) -> None: account, tenant = create_console_account_and_tenant(db_session_with_containers) + tenant_id = tenant.id + current_tenant_id = account.current_tenant_id provider = MagicMock() provider.get_authorization_url.return_value = "http://oauth.provider/auth" @@ -29,7 +31,7 @@ def test_get_oauth_url_successful( headers=authenticate_console_client(test_client_with_containers, account), ) - assert tenant.id == account.current_tenant_id + assert tenant_id == current_tenant_id assert response.status_code == 200 assert response.get_json() == {"data": "http://oauth.provider/auth"} provider.get_authorization_url.assert_called_once() diff --git a/api/tests/test_containers_integration_tests/controllers/console/auth/test_email_register.py b/api/tests/test_containers_integration_tests/controllers/console/auth/test_email_register.py index 320da85b60..1fcce9ca44 100644 --- a/api/tests/test_containers_integration_tests/controllers/console/auth/test_email_register.py +++ b/api/tests/test_containers_integration_tests/controllers/console/auth/test_email_register.py @@ -6,6 +6,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock, patch import pytest +from flask import Flask from controllers.console.auth.email_register import ( EmailRegisterCheckApi, @@ -16,7 +17,7 @@ from services.account_service import AccountService @pytest.fixture -def app(flask_app_with_containers): +def app(flask_app_with_containers: Flask): return flask_app_with_containers @@ -33,7 +34,7 @@ class TestEmailRegisterSendEmailApi: mock_is_freeze, mock_send_mail, mock_get_account, - app, + app: Flask, ): mock_send_mail.return_value = "token-123" mock_is_freeze.return_value = False @@ -75,7 +76,7 @@ class TestEmailRegisterCheckApi: mock_revoke, mock_generate_token, mock_reset_rate, - app, + app: Flask, ): mock_rate_limit_check.return_value = False mock_get_data.return_value = {"email": "User@Example.com", "code": "4321"} @@ -120,7 +121,7 @@ class TestEmailRegisterResetApi: mock_create_account, mock_login, mock_reset_login_rate, - app, + app: Flask, ): mock_get_data.return_value = {"phase": "register", "email": "Invitee@Example.com"} mock_create_account.return_value = MagicMock() diff --git a/api/tests/test_containers_integration_tests/controllers/console/auth/test_forgot_password.py b/api/tests/test_containers_integration_tests/controllers/console/auth/test_forgot_password.py index d2703ed5cc..014c1588fe 100644 --- a/api/tests/test_containers_integration_tests/controllers/console/auth/test_forgot_password.py +++ b/api/tests/test_containers_integration_tests/controllers/console/auth/test_forgot_password.py @@ -6,6 +6,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock, patch import pytest +from flask import Flask from controllers.console.auth.forgot_password import ( ForgotPasswordCheckApi, @@ -16,7 +17,7 @@ from services.account_service import AccountService @pytest.fixture -def app(flask_app_with_containers): +def app(flask_app_with_containers: Flask): return flask_app_with_containers @@ -31,7 +32,7 @@ class TestForgotPasswordSendEmailApi: mock_is_ip_limit, mock_send_email, mock_get_account, - app, + app: Flask, ): mock_account = MagicMock() mock_get_account.return_value = mock_account @@ -80,7 +81,7 @@ class TestForgotPasswordCheckApi: mock_revoke_token, mock_generate_token, mock_reset_rate, - app, + app: Flask, ): mock_rate_limit_check.return_value = False mock_get_data.return_value = {"email": "Admin@Example.com", "code": "4321"} @@ -123,7 +124,7 @@ class TestForgotPasswordResetApi: mock_db, mock_get_account, mock_update_account, - app, + app: Flask, ): mock_get_reset_data.return_value = {"phase": "reset", "email": "User@Example.com"} mock_account = MagicMock() diff --git a/api/tests/test_containers_integration_tests/controllers/console/auth/test_oauth.py b/api/tests/test_containers_integration_tests/controllers/console/auth/test_oauth.py index 1eabb45422..55b6a919d8 100644 --- a/api/tests/test_containers_integration_tests/controllers/console/auth/test_oauth.py +++ b/api/tests/test_containers_integration_tests/controllers/console/auth/test_oauth.py @@ -5,6 +5,7 @@ from __future__ import annotations from unittest.mock import MagicMock, patch import pytest +from flask import Flask from controllers.console.auth.oauth import ( OAuthCallback, @@ -21,7 +22,7 @@ from services.errors.account import AccountRegisterError class TestGetOAuthProviders: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers @pytest.mark.parametrize( @@ -65,7 +66,7 @@ class TestOAuthLogin: return OAuthLogin() @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers @pytest.fixture @@ -89,7 +90,7 @@ class TestOAuthLogin: mock_redirect, mock_get_providers, resource, - app, + app: Flask, mock_oauth_provider, invite_token, expected_token, @@ -130,7 +131,7 @@ class TestOAuthCallback: return OAuthCallback() @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers @pytest.fixture @@ -164,7 +165,7 @@ class TestOAuthCallback: mock_get_providers, mock_config, resource, - app, + app: Flask, oauth_setup, ): mock_config.CONSOLE_WEB_URL = "http://localhost:3000" @@ -217,7 +218,7 @@ class TestOAuthCallback: mock_get_providers, mock_config, resource, - app, + app: Flask, oauth_setup, ): mock_config.CONSOLE_WEB_URL = "http://localhost:3000" @@ -261,7 +262,7 @@ class TestOAuthCallback: mock_tenant_service, mock_account_service, resource, - app, + app: Flask, oauth_setup, account_status, expected_redirect, @@ -300,7 +301,7 @@ class TestOAuthCallback: mock_get_providers, mock_config, resource, - app, + app: Flask, oauth_setup, ): mock_get_providers.return_value = {"github": oauth_setup["provider"]} @@ -336,7 +337,7 @@ class TestOAuthCallback: mock_get_providers, mock_config, resource, - app, + app: Flask, oauth_setup, ): """Defensive test for CLOSED account status handling in OAuth callback. @@ -394,7 +395,7 @@ class TestOAuthCallback: class TestAccountGeneration: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers @pytest.fixture @@ -465,7 +466,7 @@ class TestAccountGeneration: mock_register_service, mock_feature_service, mock_get_account, - app, + app: Flask, user_info, mock_account, allow_register, @@ -504,7 +505,7 @@ class TestAccountGeneration: mock_register_service, mock_feature_service, mock_get_account, - app, + app: Flask, ): user_info = OAuthUserInfo(id="123", name="Test User", email="Upper@Example.com") mock_feature_service.get_system_features.return_value.is_allow_register = True @@ -529,7 +530,7 @@ class TestAccountGeneration: mock_feature_service, mock_tenant_service, mock_get_account, - app, + app: Flask, user_info, mock_account, ): diff --git a/api/tests/test_containers_integration_tests/controllers/console/auth/test_password_reset.py b/api/tests/test_containers_integration_tests/controllers/console/auth/test_password_reset.py index 50249bcd74..5fc3b3084a 100644 --- a/api/tests/test_containers_integration_tests/controllers/console/auth/test_password_reset.py +++ b/api/tests/test_containers_integration_tests/controllers/console/auth/test_password_reset.py @@ -5,6 +5,8 @@ from __future__ import annotations from unittest.mock import MagicMock, patch import pytest +from flask import Flask +from sqlalchemy.orm import Session from controllers.console.auth.error import ( EmailCodeError, @@ -19,13 +21,15 @@ from controllers.console.auth.forgot_password import ( ForgotPasswordSendEmailApi, ) from controllers.console.error import AccountNotFound, EmailSendIpLimitError +from tests.test_containers_integration_tests.controllers.console.helpers import ensure_dify_setup class TestForgotPasswordSendEmailApi: """Test cases for sending password reset emails.""" @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask, db_session_with_containers: Session): + ensure_dify_setup(db_session_with_containers) return flask_app_with_containers @pytest.fixture @@ -46,7 +50,7 @@ class TestForgotPasswordSendEmailApi: mock_send_email, mock_get_account, mock_is_ip_limit, - app, + app: Flask, mock_account, ): # Arrange @@ -68,7 +72,7 @@ class TestForgotPasswordSendEmailApi: mock_send_email.assert_called_once() @patch("controllers.console.auth.forgot_password.AccountService.is_email_send_ip_limit") - def test_send_reset_email_ip_rate_limited(self, mock_is_ip_limit, app): + def test_send_reset_email_ip_rate_limited(self, mock_is_ip_limit, app: Flask): """ Test password reset email blocked by IP rate limit. @@ -104,7 +108,7 @@ class TestForgotPasswordSendEmailApi: mock_send_email, mock_get_account, mock_is_ip_limit, - app, + app: Flask, mock_account, language_input, expected_language, @@ -138,7 +142,8 @@ class TestForgotPasswordCheckApi: """Test cases for verifying password reset codes.""" @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask, db_session_with_containers: Session): + ensure_dify_setup(db_session_with_containers) return flask_app_with_containers @patch("controllers.console.auth.forgot_password.AccountService.is_forgot_password_error_rate_limit") @@ -153,7 +158,7 @@ class TestForgotPasswordCheckApi: mock_revoke_token, mock_get_data, mock_is_rate_limit, - app, + app: Flask, ): """ Test successful verification code validation. @@ -200,7 +205,7 @@ class TestForgotPasswordCheckApi: mock_revoke_token, mock_get_data, mock_is_rate_limit, - app, + app: Flask, ): mock_is_rate_limit.return_value = False mock_get_data.return_value = {"email": "User@Example.com", "code": "999888"} @@ -221,7 +226,7 @@ class TestForgotPasswordCheckApi: mock_reset_rate_limit.assert_called_once_with("user@example.com") @patch("controllers.console.auth.forgot_password.AccountService.is_forgot_password_error_rate_limit") - def test_verify_code_rate_limited(self, mock_is_rate_limit, app): + def test_verify_code_rate_limited(self, mock_is_rate_limit, app: Flask): """ Test code verification blocked by rate limit. @@ -244,7 +249,7 @@ class TestForgotPasswordCheckApi: @patch("controllers.console.auth.forgot_password.AccountService.is_forgot_password_error_rate_limit") @patch("controllers.console.auth.forgot_password.AccountService.get_reset_password_data") - def test_verify_code_invalid_token(self, mock_get_data, mock_is_rate_limit, app): + def test_verify_code_invalid_token(self, mock_get_data, mock_is_rate_limit, app: Flask): """ Test code verification with invalid token. @@ -267,7 +272,7 @@ class TestForgotPasswordCheckApi: @patch("controllers.console.auth.forgot_password.AccountService.is_forgot_password_error_rate_limit") @patch("controllers.console.auth.forgot_password.AccountService.get_reset_password_data") - def test_verify_code_email_mismatch(self, mock_get_data, mock_is_rate_limit, app): + def test_verify_code_email_mismatch(self, mock_get_data, mock_is_rate_limit, app: Flask): """ Test code verification with mismatched email. @@ -292,7 +297,7 @@ class TestForgotPasswordCheckApi: @patch("controllers.console.auth.forgot_password.AccountService.is_forgot_password_error_rate_limit") @patch("controllers.console.auth.forgot_password.AccountService.get_reset_password_data") @patch("controllers.console.auth.forgot_password.AccountService.add_forgot_password_error_rate_limit") - def test_verify_code_wrong_code(self, mock_add_rate_limit, mock_get_data, mock_is_rate_limit, app): + def test_verify_code_wrong_code(self, mock_add_rate_limit, mock_get_data, mock_is_rate_limit, app: Flask): """ Test code verification with incorrect code. @@ -321,7 +326,8 @@ class TestForgotPasswordResetApi: """Test cases for resetting password with verified token.""" @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask, db_session_with_containers: Session): + ensure_dify_setup(db_session_with_containers) return flask_app_with_containers @pytest.fixture @@ -344,7 +350,7 @@ class TestForgotPasswordResetApi: mock_get_account, mock_revoke_token, mock_get_data, - app, + app: Flask, mock_account, ): """ @@ -375,7 +381,7 @@ class TestForgotPasswordResetApi: mock_revoke_token.assert_called_once_with("valid_token") @patch("controllers.console.auth.forgot_password.AccountService.get_reset_password_data") - def test_reset_password_mismatch(self, mock_get_data, app): + def test_reset_password_mismatch(self, mock_get_data, app: Flask): """ Test password reset with mismatched passwords. @@ -397,7 +403,7 @@ class TestForgotPasswordResetApi: api.post() @patch("controllers.console.auth.forgot_password.AccountService.get_reset_password_data") - def test_reset_password_invalid_token(self, mock_get_data, app): + def test_reset_password_invalid_token(self, mock_get_data, app: Flask): """ Test password reset with invalid token. @@ -418,7 +424,7 @@ class TestForgotPasswordResetApi: api.post() @patch("controllers.console.auth.forgot_password.AccountService.get_reset_password_data") - def test_reset_password_wrong_phase(self, mock_get_data, app): + def test_reset_password_wrong_phase(self, mock_get_data, app: Flask): """ Test password reset with token not in reset phase. @@ -442,7 +448,7 @@ class TestForgotPasswordResetApi: @patch("controllers.console.auth.forgot_password.AccountService.get_reset_password_data") @patch("controllers.console.auth.forgot_password.AccountService.revoke_reset_password_token") @patch("controllers.console.auth.forgot_password.AccountService.get_account_by_email_with_case_fallback") - def test_reset_password_account_not_found(self, mock_get_account, mock_revoke_token, mock_get_data, app): + def test_reset_password_account_not_found(self, mock_get_account, mock_revoke_token, mock_get_data, app: Flask): """ Test password reset for non-existent account. diff --git a/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline.py b/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline.py index d5ae95dfb7..7aa4aff1cc 100644 --- a/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline.py +++ b/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline.py @@ -6,6 +6,7 @@ from unittest.mock import MagicMock, patch from uuid import uuid4 import pytest +from flask import Flask from sqlalchemy.orm import Session from controllers.console import console_ns @@ -26,10 +27,10 @@ def unwrap(func): class TestPipelineTemplateListApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = PipelineTemplateListApi() method = unwrap(api.get) @@ -50,10 +51,10 @@ class TestPipelineTemplateListApi: class TestPipelineTemplateDetailApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = PipelineTemplateDetailApi() method = unwrap(api.get) @@ -74,7 +75,7 @@ class TestPipelineTemplateDetailApi: assert status == 200 assert response == template - def test_get_returns_404_when_template_not_found(self, app): + def test_get_returns_404_when_template_not_found(self, app: Flask): api = PipelineTemplateDetailApi() method = unwrap(api.get) @@ -93,7 +94,7 @@ class TestPipelineTemplateDetailApi: assert status == 404 assert "error" in response - def test_get_returns_404_for_customized_type_not_found(self, app): + def test_get_returns_404_for_customized_type_not_found(self, app: Flask): api = PipelineTemplateDetailApi() method = unwrap(api.get) @@ -115,10 +116,10 @@ class TestPipelineTemplateDetailApi: class TestCustomizedPipelineTemplateApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_patch_success(self, app): + def test_patch_success(self, app: Flask): api = CustomizedPipelineTemplateApi() method = unwrap(api.patch) @@ -140,7 +141,7 @@ class TestCustomizedPipelineTemplateApi: update_mock.assert_called_once() assert response == 200 - def test_delete_success(self, app): + def test_delete_success(self, app: Flask): api = CustomizedPipelineTemplateApi() method = unwrap(api.delete) @@ -155,7 +156,7 @@ class TestCustomizedPipelineTemplateApi: delete_mock.assert_called_once_with("tpl-1") assert response == 200 - def test_post_success(self, app, db_session_with_containers: Session): + def test_post_success(self, app: Flask, db_session_with_containers: Session): api = CustomizedPipelineTemplateApi() method = unwrap(api.post) @@ -182,7 +183,7 @@ class TestCustomizedPipelineTemplateApi: assert status == 200 assert response == {"data": "yaml-data"} - def test_post_template_not_found(self, app): + def test_post_template_not_found(self, app: Flask): api = CustomizedPipelineTemplateApi() method = unwrap(api.post) @@ -193,10 +194,10 @@ class TestCustomizedPipelineTemplateApi: class TestPublishCustomizedPipelineTemplateApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_post_success(self, app): + def test_post_success(self, app: Flask): api = PublishCustomizedPipelineTemplateApi() method = unwrap(api.post) diff --git a/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline_datasets.py b/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline_datasets.py index 64e3de2ca3..7624c1150f 100644 --- a/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline_datasets.py +++ b/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline_datasets.py @@ -5,6 +5,7 @@ from __future__ import annotations from unittest.mock import MagicMock, patch import pytest +from flask import Flask from werkzeug.exceptions import Forbidden import services @@ -24,13 +25,13 @@ def unwrap(func): class TestCreateRagPipelineDatasetApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers def _valid_payload(self): return {"yaml_content": "name: test"} - def test_post_success(self, app): + def test_post_success(self, app: Flask): api = CreateRagPipelineDatasetApi() method = unwrap(api.post) @@ -58,7 +59,7 @@ class TestCreateRagPipelineDatasetApi: assert status == 201 assert response == import_info - def test_post_forbidden_non_editor(self, app): + def test_post_forbidden_non_editor(self, app: Flask): api = CreateRagPipelineDatasetApi() method = unwrap(api.post) @@ -76,7 +77,7 @@ class TestCreateRagPipelineDatasetApi: with pytest.raises(Forbidden): method(api) - def test_post_dataset_name_duplicate(self, app): + def test_post_dataset_name_duplicate(self, app: Flask): api = CreateRagPipelineDatasetApi() method = unwrap(api.post) @@ -101,7 +102,7 @@ class TestCreateRagPipelineDatasetApi: with pytest.raises(DatasetNameDuplicateError): method(api) - def test_post_invalid_payload(self, app): + def test_post_invalid_payload(self, app: Flask): api = CreateRagPipelineDatasetApi() method = unwrap(api.post) @@ -122,10 +123,10 @@ class TestCreateRagPipelineDatasetApi: class TestCreateEmptyRagPipelineDatasetApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_post_success(self, app): + def test_post_success(self, app: Flask): api = CreateEmptyRagPipelineDatasetApi() method = unwrap(api.post) @@ -152,7 +153,7 @@ class TestCreateEmptyRagPipelineDatasetApi: assert status == 201 assert response == {"id": "ds-1"} - def test_post_forbidden_non_editor(self, app): + def test_post_forbidden_non_editor(self, app: Flask): api = CreateEmptyRagPipelineDatasetApi() method = unwrap(api.post) diff --git a/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline_import.py b/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline_import.py index cb67892878..44eb5c336c 100644 --- a/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline_import.py +++ b/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline_import.py @@ -5,6 +5,7 @@ from __future__ import annotations from unittest.mock import MagicMock, patch import pytest +from flask import Flask from controllers.console import console_ns from controllers.console.datasets.rag_pipeline.rag_pipeline_import import ( @@ -25,7 +26,7 @@ def unwrap(func): class TestRagPipelineImportApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers def _payload(self, mode="create"): @@ -35,7 +36,7 @@ class TestRagPipelineImportApi: "name": "Test", } - def test_post_success_200(self, app): + def test_post_success_200(self, app: Flask): api = RagPipelineImportApi() method = unwrap(api.post) @@ -65,7 +66,7 @@ class TestRagPipelineImportApi: assert status == 200 assert response == {"status": "success"} - def test_post_failed_400(self, app): + def test_post_failed_400(self, app: Flask): api = RagPipelineImportApi() method = unwrap(api.post) @@ -95,7 +96,7 @@ class TestRagPipelineImportApi: assert status == 400 assert response == {"status": "failed"} - def test_post_pending_202(self, app): + def test_post_pending_202(self, app: Flask): api = RagPipelineImportApi() method = unwrap(api.post) @@ -128,10 +129,10 @@ class TestRagPipelineImportApi: class TestRagPipelineImportConfirmApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_confirm_success(self, app): + def test_confirm_success(self, app: Flask): api = RagPipelineImportConfirmApi() method = unwrap(api.post) @@ -159,7 +160,7 @@ class TestRagPipelineImportConfirmApi: assert status == 200 assert response == {"ok": True} - def test_confirm_failed(self, app): + def test_confirm_failed(self, app: Flask): api = RagPipelineImportConfirmApi() method = unwrap(api.post) @@ -190,10 +191,10 @@ class TestRagPipelineImportConfirmApi: class TestRagPipelineImportCheckDependenciesApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = RagPipelineImportCheckDependenciesApi() method = unwrap(api.get) @@ -219,10 +220,10 @@ class TestRagPipelineImportCheckDependenciesApi: class TestRagPipelineExportApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_get_with_include_secret(self, app): + def test_get_with_include_secret(self, app: Flask): api = RagPipelineExportApi() method = unwrap(api.get) diff --git a/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline_workflow.py b/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline_workflow.py index c1f3122c2b..ba59780d59 100644 --- a/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline_workflow.py +++ b/api/tests/test_containers_integration_tests/controllers/console/datasets/rag_pipeline/test_rag_pipeline_workflow.py @@ -3,10 +3,12 @@ from __future__ import annotations from datetime import datetime +from types import SimpleNamespace from unittest.mock import MagicMock, patch from uuid import uuid4 import pytest +from flask import Flask from sqlalchemy.orm import Session from werkzeug.exceptions import BadRequest, Forbidden, HTTPException, NotFound @@ -43,12 +45,41 @@ def unwrap(func): return func +def make_node_execution(**overrides): + payload = { + "id": "node-exec-1", + "index": 1, + "predecessor_node_id": None, + "node_id": "node1", + "node_type": "start", + "title": "Start", + "inputs_dict": {"query": "hello"}, + "process_data_dict": {}, + "outputs_dict": {"answer": "world"}, + "status": "succeeded", + "error": None, + "elapsed_time": 1.0, + "execution_metadata_dict": {}, + "extras": {}, + "created_at": datetime(2026, 1, 1, 0, 0, 0), + "created_by_role": "account", + "created_by_account": None, + "created_by_end_user": None, + "finished_at": datetime(2026, 1, 1, 0, 0, 1), + "inputs_truncated": False, + "outputs_truncated": False, + "process_data_truncated": False, + } + payload.update(overrides) + return SimpleNamespace(**payload) + + class TestDraftWorkflowApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_get_draft_success(self, app): + def test_get_draft_success(self, app: Flask): api = DraftRagPipelineApi() method = unwrap(api.get) @@ -68,7 +99,7 @@ class TestDraftWorkflowApi: result = method(api, pipeline) assert result == workflow - def test_get_draft_not_exist(self, app): + def test_get_draft_not_exist(self, app: Flask): api = DraftRagPipelineApi() method = unwrap(api.get) @@ -86,7 +117,7 @@ class TestDraftWorkflowApi: with pytest.raises(DraftWorkflowNotExist): method(api, pipeline) - def test_sync_hash_not_match(self, app): + def test_sync_hash_not_match(self, app: Flask): api = DraftRagPipelineApi() method = unwrap(api.post) @@ -111,7 +142,7 @@ class TestDraftWorkflowApi: with pytest.raises(DraftWorkflowNotSync): method(api, pipeline) - def test_sync_invalid_text_plain(self, app): + def test_sync_invalid_text_plain(self, app: Flask): api = DraftRagPipelineApi() method = unwrap(api.post) @@ -128,7 +159,7 @@ class TestDraftWorkflowApi: response, status = method(api, pipeline) assert status == 400 - def test_restore_published_workflow_to_draft_success(self, app): + def test_restore_published_workflow_to_draft_success(self, app: Flask): api = RagPipelineDraftWorkflowRestoreApi() method = unwrap(api.post) @@ -155,7 +186,7 @@ class TestDraftWorkflowApi: assert result["result"] == "success" assert result["hash"] == "restored-hash" - def test_restore_published_workflow_to_draft_not_found(self, app): + def test_restore_published_workflow_to_draft_not_found(self, app: Flask): api = RagPipelineDraftWorkflowRestoreApi() method = unwrap(api.post) @@ -179,7 +210,7 @@ class TestDraftWorkflowApi: with pytest.raises(NotFound): method(api, pipeline, "published-workflow") - def test_restore_published_workflow_to_draft_returns_400_for_draft_source(self, app): + def test_restore_published_workflow_to_draft_returns_400_for_draft_source(self, app: Flask): api = RagPipelineDraftWorkflowRestoreApi() method = unwrap(api.post) @@ -211,10 +242,10 @@ class TestDraftWorkflowApi: class TestDraftRunNodes: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_iteration_node_success(self, app): + def test_iteration_node_success(self, app: Flask): api = RagPipelineDraftRunIterationNodeApi() method = unwrap(api.post) @@ -240,7 +271,7 @@ class TestDraftRunNodes: result = method(api, pipeline, "node") assert result == {"ok": True} - def test_iteration_node_conversation_not_exists(self, app): + def test_iteration_node_conversation_not_exists(self, app: Flask): api = RagPipelineDraftRunIterationNodeApi() method = unwrap(api.post) @@ -262,7 +293,7 @@ class TestDraftRunNodes: with pytest.raises(NotFound): method(api, pipeline, "node") - def test_loop_node_success(self, app): + def test_loop_node_success(self, app: Flask): api = RagPipelineDraftRunLoopNodeApi() method = unwrap(api.post) @@ -290,10 +321,10 @@ class TestDraftRunNodes: class TestPipelineRunApis: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_draft_run_success(self, app): + def test_draft_run_success(self, app: Flask): api = DraftRagPipelineRunApi() method = unwrap(api.post) @@ -325,7 +356,7 @@ class TestPipelineRunApis: ): assert method(api, pipeline) == {"ok": True} - def test_draft_run_rate_limit(self, app): + def test_draft_run_rate_limit(self, app: Flask): api = DraftRagPipelineRunApi() method = unwrap(api.post) @@ -356,10 +387,10 @@ class TestPipelineRunApis: class TestDraftNodeRun: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_execution_not_found(self, app): + def test_execution_not_found(self, app: Flask): api = RagPipelineDraftNodeRunApi() method = unwrap(api.post) @@ -387,10 +418,10 @@ class TestDraftNodeRun: class TestPublishedPipelineApis: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_publish_success(self, app, db_session_with_containers: Session): + def test_publish_success(self, app: Flask, db_session_with_containers: Session): from models.dataset import Pipeline api = PublishedRagPipelineApi() @@ -436,10 +467,10 @@ class TestPublishedPipelineApis: class TestMiscApis: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_task_stop(self, app): + def test_task_stop(self, app: Flask): api = RagPipelineTaskStopApi() method = unwrap(api.post) @@ -460,7 +491,7 @@ class TestMiscApis: stop_mock.assert_called_once() assert result["result"] == "success" - def test_transform_forbidden(self, app): + def test_transform_forbidden(self, app: Flask): api = RagPipelineTransformApi() method = unwrap(api.post) @@ -476,7 +507,7 @@ class TestMiscApis: with pytest.raises(Forbidden): method(api, "ds1") - def test_recommended_plugins(self, app): + def test_recommended_plugins(self, app: Flask): api = RagPipelineRecommendedPluginApi() method = unwrap(api.get) @@ -496,10 +527,10 @@ class TestMiscApis: class TestPublishedRagPipelineRunApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_published_run_success(self, app): + def test_published_run_success(self, app: Flask): api = PublishedRagPipelineRunApi() method = unwrap(api.post) @@ -533,7 +564,7 @@ class TestPublishedRagPipelineRunApi: result = method(api, pipeline) assert result == {"ok": True} - def test_published_run_rate_limit(self, app): + def test_published_run_rate_limit(self, app: Flask): api = PublishedRagPipelineRunApi() method = unwrap(api.post) @@ -565,10 +596,10 @@ class TestPublishedRagPipelineRunApi: class TestDefaultBlockConfigApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_get_block_config_success(self, app): + def test_get_block_config_success(self, app: Flask): api = DefaultRagPipelineBlockConfigApi() method = unwrap(api.get) @@ -587,7 +618,7 @@ class TestDefaultBlockConfigApi: result = method(api, pipeline, "llm") assert result == {"k": "v"} - def test_get_block_config_invalid_json(self, app): + def test_get_block_config_invalid_json(self, app: Flask): api = DefaultRagPipelineBlockConfigApi() method = unwrap(api.get) @@ -600,10 +631,10 @@ class TestDefaultBlockConfigApi: class TestPublishedAllRagPipelineApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_get_published_workflows_success(self, app): + def test_get_published_workflows_success(self, app: Flask): api = PublishedAllRagPipelineApi() method = unwrap(api.get) @@ -629,7 +660,7 @@ class TestPublishedAllRagPipelineApi: assert result["items"] == [{"id": "w1"}] assert result["has_more"] is False - def test_get_published_workflows_forbidden(self, app): + def test_get_published_workflows_forbidden(self, app: Flask): api = PublishedAllRagPipelineApi() method = unwrap(api.get) @@ -649,10 +680,10 @@ class TestPublishedAllRagPipelineApi: class TestRagPipelineByIdApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_patch_success(self, app): + def test_patch_success(self, app: Flask): api = RagPipelineByIdApi() method = unwrap(api.patch) @@ -682,7 +713,7 @@ class TestRagPipelineByIdApi: assert result == workflow - def test_patch_no_fields(self, app): + def test_patch_no_fields(self, app: Flask): api = RagPipelineByIdApi() method = unwrap(api.patch) @@ -700,7 +731,7 @@ class TestRagPipelineByIdApi: result, status = method(api, pipeline, "w1") assert status == 400 - def test_delete_success(self, app): + def test_delete_success(self, app: Flask): api = RagPipelineByIdApi() method = unwrap(api.delete) @@ -720,7 +751,7 @@ class TestRagPipelineByIdApi: workflow_service.delete_workflow.assert_called_once() assert result == (None, 204) - def test_delete_active_workflow_rejected(self, app): + def test_delete_active_workflow_rejected(self, app: Flask): api = RagPipelineByIdApi() method = unwrap(api.delete) @@ -733,16 +764,16 @@ class TestRagPipelineByIdApi: class TestRagPipelineWorkflowLastRunApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_last_run_success(self, app): + def test_last_run_success(self, app: Flask): api = RagPipelineWorkflowLastRunApi() method = unwrap(api.get) pipeline = MagicMock() workflow = MagicMock() - node_exec = MagicMock() + node_exec = make_node_execution() service = MagicMock() service.get_draft_workflow.return_value = workflow @@ -756,9 +787,11 @@ class TestRagPipelineWorkflowLastRunApi: ), ): result = method(api, pipeline, "node1") - assert result == node_exec + assert result["id"] == "node-exec-1" + assert result["inputs"] == {"query": "hello"} + assert result["outputs"] == {"answer": "world"} - def test_last_run_not_found(self, app): + def test_last_run_not_found(self, app: Flask): api = RagPipelineWorkflowLastRunApi() method = unwrap(api.get) @@ -780,10 +813,10 @@ class TestRagPipelineWorkflowLastRunApi: class TestRagPipelineDatasourceVariableApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_set_datasource_variables_success(self, app): + def test_set_datasource_variables_success(self, app: Flask): api = RagPipelineDatasourceVariableApi() method = unwrap(api.post) @@ -798,7 +831,7 @@ class TestRagPipelineDatasourceVariableApi: } service = MagicMock() - service.set_datasource_variables.return_value = MagicMock() + service.set_datasource_variables.return_value = make_node_execution(node_id="n1") with ( app.test_request_context("/", json=payload), @@ -813,4 +846,5 @@ class TestRagPipelineDatasourceVariableApi: ), ): result = method(api, pipeline) - assert result is not None + assert result["node_id"] == "n1" + assert result["process_data"] == {} diff --git a/api/tests/test_containers_integration_tests/controllers/console/datasets/test_data_source.py b/api/tests/test_containers_integration_tests/controllers/console/datasets/test_data_source.py index 1c4c6a899f..b59009f7c4 100644 --- a/api/tests/test_containers_integration_tests/controllers/console/datasets/test_data_source.py +++ b/api/tests/test_containers_integration_tests/controllers/console/datasets/test_data_source.py @@ -5,6 +5,7 @@ from __future__ import annotations from unittest.mock import MagicMock, PropertyMock, patch import pytest +from flask import Flask from werkzeug.exceptions import NotFound from controllers.console.datasets import data_source @@ -51,10 +52,10 @@ def mock_engine(): class TestDataSourceApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_get_success(self, app, patch_tenant): + def test_get_success(self, app: Flask, patch_tenant): api = DataSourceApi() method = unwrap(api.get) @@ -78,7 +79,7 @@ class TestDataSourceApi: assert status == 200 assert response["data"][0]["is_bound"] is True - def test_get_no_bindings(self, app, patch_tenant): + def test_get_no_bindings(self, app: Flask, patch_tenant): api = DataSourceApi() method = unwrap(api.get) @@ -94,7 +95,7 @@ class TestDataSourceApi: assert status == 200 assert response["data"] == [] - def test_patch_enable_binding(self, app, patch_tenant, mock_engine): + def test_patch_enable_binding(self, app: Flask, patch_tenant, mock_engine): api = DataSourceApi() method = unwrap(api.patch) @@ -115,7 +116,7 @@ class TestDataSourceApi: assert status == 200 assert binding.disabled is False - def test_patch_disable_binding(self, app, patch_tenant, mock_engine): + def test_patch_disable_binding(self, app: Flask, patch_tenant, mock_engine): api = DataSourceApi() method = unwrap(api.patch) @@ -136,7 +137,7 @@ class TestDataSourceApi: assert status == 200 assert binding.disabled is True - def test_patch_binding_not_found(self, app, patch_tenant, mock_engine): + def test_patch_binding_not_found(self, app: Flask, patch_tenant, mock_engine): api = DataSourceApi() method = unwrap(api.patch) @@ -151,7 +152,7 @@ class TestDataSourceApi: with pytest.raises(NotFound): method(api, "b1", "enable") - def test_patch_enable_already_enabled(self, app, patch_tenant, mock_engine): + def test_patch_enable_already_enabled(self, app: Flask, patch_tenant, mock_engine): api = DataSourceApi() method = unwrap(api.patch) @@ -168,7 +169,7 @@ class TestDataSourceApi: with pytest.raises(ValueError): method(api, "b1", "enable") - def test_patch_disable_already_disabled(self, app, patch_tenant, mock_engine): + def test_patch_disable_already_disabled(self, app: Flask, patch_tenant, mock_engine): api = DataSourceApi() method = unwrap(api.patch) @@ -188,10 +189,10 @@ class TestDataSourceApi: class TestDataSourceNotionListApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_get_credential_not_found(self, app, patch_tenant): + def test_get_credential_not_found(self, app: Flask, patch_tenant): api = DataSourceNotionListApi() method = unwrap(api.get) @@ -205,7 +206,7 @@ class TestDataSourceNotionListApi: with pytest.raises(NotFound): method(api) - def test_get_success_no_dataset_id(self, app, patch_tenant, mock_engine): + def test_get_success_no_dataset_id(self, app: Flask, patch_tenant, mock_engine): api = DataSourceNotionListApi() method = unwrap(api.get) @@ -246,7 +247,7 @@ class TestDataSourceNotionListApi: assert status == 200 - def test_get_success_with_dataset_id(self, app, patch_tenant, mock_engine): + def test_get_success_with_dataset_id(self, app: Flask, patch_tenant, mock_engine): api = DataSourceNotionListApi() method = unwrap(api.get) @@ -299,7 +300,7 @@ class TestDataSourceNotionListApi: assert status == 200 - def test_get_invalid_dataset_type(self, app, patch_tenant, mock_engine): + def test_get_invalid_dataset_type(self, app: Flask, patch_tenant, mock_engine): api = DataSourceNotionListApi() method = unwrap(api.get) @@ -323,10 +324,10 @@ class TestDataSourceNotionListApi: class TestDataSourceNotionApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_get_preview_success(self, app, patch_tenant): + def test_get_preview_success(self, app: Flask, patch_tenant): api = DataSourceNotionApi() method = unwrap(api.get) @@ -347,7 +348,7 @@ class TestDataSourceNotionApi: assert status == 200 - def test_post_indexing_estimate_success(self, app, patch_tenant): + def test_post_indexing_estimate_success(self, app: Flask, patch_tenant): api = DataSourceNotionApi() method = unwrap(api.post) @@ -381,10 +382,10 @@ class TestDataSourceNotionApi: class TestDataSourceNotionDatasetSyncApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_get_success(self, app, patch_tenant): + def test_get_success(self, app: Flask, patch_tenant): api = DataSourceNotionDatasetSyncApi() method = unwrap(api.get) @@ -407,7 +408,7 @@ class TestDataSourceNotionDatasetSyncApi: assert status == 200 - def test_get_dataset_not_found(self, app, patch_tenant): + def test_get_dataset_not_found(self, app: Flask, patch_tenant): api = DataSourceNotionDatasetSyncApi() method = unwrap(api.get) @@ -424,10 +425,10 @@ class TestDataSourceNotionDatasetSyncApi: class TestDataSourceNotionDocumentSyncApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_get_success(self, app, patch_tenant): + def test_get_success(self, app: Flask, patch_tenant): api = DataSourceNotionDocumentSyncApi() method = unwrap(api.get) @@ -450,7 +451,7 @@ class TestDataSourceNotionDocumentSyncApi: assert status == 200 - def test_get_document_not_found(self, app, patch_tenant): + def test_get_document_not_found(self, app: Flask, patch_tenant): api = DataSourceNotionDocumentSyncApi() method = unwrap(api.get) diff --git a/api/tests/test_containers_integration_tests/controllers/console/explore/test_conversation.py b/api/tests/test_containers_integration_tests/controllers/console/explore/test_conversation.py index 83492048ef..917aa35fe6 100644 --- a/api/tests/test_containers_integration_tests/controllers/console/explore/test_conversation.py +++ b/api/tests/test_containers_integration_tests/controllers/console/explore/test_conversation.py @@ -5,6 +5,7 @@ from __future__ import annotations from unittest.mock import MagicMock, patch import pytest +from flask import Flask from werkzeug.exceptions import NotFound import controllers.console.explore.conversation as conversation_module @@ -53,10 +54,10 @@ def user(): class TestConversationListApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_get_success(self, app, chat_app, user): + def test_get_success(self, app: Flask, chat_app, user): api = conversation_module.ConversationListApi() method = unwrap(api.get) @@ -81,7 +82,7 @@ class TestConversationListApi: assert result["has_more"] is False assert len(result["data"]) == 2 - def test_last_conversation_not_exists(self, app, chat_app, user): + def test_last_conversation_not_exists(self, app: Flask, chat_app, user): api = conversation_module.ConversationListApi() method = unwrap(api.get) @@ -97,7 +98,7 @@ class TestConversationListApi: with pytest.raises(NotFound): method(chat_app) - def test_wrong_app_mode(self, app, non_chat_app): + def test_wrong_app_mode(self, app: Flask, non_chat_app): api = conversation_module.ConversationListApi() method = unwrap(api.get) @@ -108,10 +109,10 @@ class TestConversationListApi: class TestConversationApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_delete_success(self, app, chat_app, user): + def test_delete_success(self, app: Flask, chat_app, user): api = conversation_module.ConversationApi() method = unwrap(api.delete) @@ -129,7 +130,7 @@ class TestConversationApi: assert status == 204 assert body["result"] == "success" - def test_delete_not_found(self, app, chat_app, user): + def test_delete_not_found(self, app: Flask, chat_app, user): api = conversation_module.ConversationApi() method = unwrap(api.delete) @@ -145,7 +146,7 @@ class TestConversationApi: with pytest.raises(NotFound): method(chat_app, "cid") - def test_delete_wrong_app_mode(self, app, non_chat_app): + def test_delete_wrong_app_mode(self, app: Flask, non_chat_app): api = conversation_module.ConversationApi() method = unwrap(api.delete) @@ -156,10 +157,10 @@ class TestConversationApi: class TestConversationRenameApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_rename_success(self, app, chat_app, user): + def test_rename_success(self, app: Flask, chat_app, user): api = conversation_module.ConversationRenameApi() method = unwrap(api.post) @@ -178,7 +179,7 @@ class TestConversationRenameApi: assert result["id"] == "cid" - def test_rename_not_found(self, app, chat_app, user): + def test_rename_not_found(self, app: Flask, chat_app, user): api = conversation_module.ConversationRenameApi() method = unwrap(api.post) @@ -197,10 +198,10 @@ class TestConversationRenameApi: class TestConversationPinApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_pin_success(self, app, chat_app, user): + def test_pin_success(self, app: Flask, chat_app, user): api = conversation_module.ConversationPinApi() method = unwrap(api.patch) @@ -219,10 +220,10 @@ class TestConversationPinApi: class TestConversationUnPinApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_unpin_success(self, app, chat_app, user): + def test_unpin_success(self, app: Flask, chat_app, user): api = conversation_module.ConversationUnPinApi() method = unwrap(api.patch) diff --git a/api/tests/test_containers_integration_tests/controllers/console/workspace/test_tool_provider.py b/api/tests/test_containers_integration_tests/controllers/console/workspace/test_tool_provider.py index f2e7104b18..d944613886 100644 --- a/api/tests/test_containers_integration_tests/controllers/console/workspace/test_tool_provider.py +++ b/api/tests/test_containers_integration_tests/controllers/console/workspace/test_tool_provider.py @@ -6,6 +6,7 @@ import json from unittest.mock import MagicMock, patch import pytest +from flask import Flask from werkzeug.exceptions import Forbidden from controllers.console.workspace.tool_providers import ( @@ -60,7 +61,7 @@ def _mock_user_tenant(): @pytest.fixture -def client(flask_app_with_containers): +def client(flask_app_with_containers: Flask): return flask_app_with_containers.test_client() @@ -147,10 +148,10 @@ class TestUtils: class TestToolProviderListApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = ToolProviderListApi() method = unwrap(api.get) @@ -170,10 +171,10 @@ class TestToolProviderListApi: class TestBuiltinProviderApis: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_list_tools(self, app): + def test_list_tools(self, app: Flask): api = ToolBuiltinProviderListToolsApi() method = unwrap(api.get) @@ -190,7 +191,7 @@ class TestBuiltinProviderApis: ): assert method(api, "provider") == [{"a": 1}] - def test_info(self, app): + def test_info(self, app: Flask): api = ToolBuiltinProviderInfoApi() method = unwrap(api.get) @@ -207,7 +208,7 @@ class TestBuiltinProviderApis: ): assert method(api, "provider") == {"x": 1} - def test_delete(self, app): + def test_delete(self, app: Flask): api = ToolBuiltinProviderDeleteApi() method = unwrap(api.post) @@ -224,7 +225,7 @@ class TestBuiltinProviderApis: ): assert method(api, "provider")["result"] == "success" - def test_add_invalid_type(self, app): + def test_add_invalid_type(self, app: Flask): api = ToolBuiltinProviderAddApi() method = unwrap(api.post) @@ -238,7 +239,7 @@ class TestBuiltinProviderApis: with pytest.raises(ValueError): method(api, "provider") - def test_add_success(self, app): + def test_add_success(self, app: Flask): api = ToolBuiltinProviderAddApi() method = unwrap(api.post) @@ -257,7 +258,7 @@ class TestBuiltinProviderApis: ): assert method(api, "provider")["id"] == 1 - def test_update(self, app): + def test_update(self, app: Flask): api = ToolBuiltinProviderUpdateApi() method = unwrap(api.post) @@ -276,7 +277,7 @@ class TestBuiltinProviderApis: ): assert method(api, "provider")["ok"] - def test_get_credentials(self, app): + def test_get_credentials(self, app: Flask): api = ToolBuiltinProviderGetCredentialsApi() method = unwrap(api.get) @@ -293,7 +294,7 @@ class TestBuiltinProviderApis: ): assert method(api, "provider") == {"k": "v"} - def test_icon(self, app): + def test_icon(self, app: Flask): api = ToolBuiltinProviderIconApi() method = unwrap(api.get) @@ -307,7 +308,7 @@ class TestBuiltinProviderApis: response = method(api, "provider") assert response.mimetype == "image/png" - def test_credentials_schema(self, app): + def test_credentials_schema(self, app: Flask): api = ToolBuiltinProviderCredentialsSchemaApi() method = unwrap(api.get) @@ -324,7 +325,7 @@ class TestBuiltinProviderApis: ): assert method(api, "provider", "oauth2") == {"schema": {}} - def test_set_default_credential(self, app): + def test_set_default_credential(self, app: Flask): api = ToolBuiltinProviderSetDefaultApi() method = unwrap(api.post) @@ -341,7 +342,7 @@ class TestBuiltinProviderApis: ): assert method(api, "provider")["ok"] - def test_get_credential_info(self, app): + def test_get_credential_info(self, app: Flask): api = ToolBuiltinProviderGetCredentialInfoApi() method = unwrap(api.get) @@ -358,7 +359,7 @@ class TestBuiltinProviderApis: ): assert method(api, "provider") == {"info": "x"} - def test_get_oauth_client_schema(self, app): + def test_get_oauth_client_schema(self, app: Flask): api = ToolBuiltinProviderGetOauthClientSchemaApi() method = unwrap(api.get) @@ -378,10 +379,10 @@ class TestBuiltinProviderApis: class TestApiProviderApis: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_add(self, app): + def test_add(self, app: Flask): api = ToolApiProviderAddApi() method = unwrap(api.post) @@ -406,7 +407,7 @@ class TestApiProviderApis: ): assert method(api)["id"] == 1 - def test_remote_schema(self, app): + def test_remote_schema(self, app: Flask): api = ToolApiProviderGetRemoteSchemaApi() method = unwrap(api.get) @@ -423,7 +424,7 @@ class TestApiProviderApis: ): assert method(api)["schema"] == "x" - def test_list_tools(self, app): + def test_list_tools(self, app: Flask): api = ToolApiProviderListToolsApi() method = unwrap(api.get) @@ -440,7 +441,7 @@ class TestApiProviderApis: ): assert method(api) == [{"tool": 1}] - def test_update(self, app): + def test_update(self, app: Flask): api = ToolApiProviderUpdateApi() method = unwrap(api.post) @@ -468,7 +469,7 @@ class TestApiProviderApis: ): assert method(api)["ok"] - def test_delete(self, app): + def test_delete(self, app: Flask): api = ToolApiProviderDeleteApi() method = unwrap(api.post) @@ -485,7 +486,7 @@ class TestApiProviderApis: ): assert method(api)["result"] == "success" - def test_get(self, app): + def test_get(self, app: Flask): api = ToolApiProviderGetApi() method = unwrap(api.get) @@ -505,10 +506,10 @@ class TestApiProviderApis: class TestWorkflowApis: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_create(self, app): + def test_create(self, app: Flask): api = ToolWorkflowProviderCreateApi() method = unwrap(api.post) @@ -534,7 +535,7 @@ class TestWorkflowApis: ): assert method(api)["id"] == 1 - def test_update_invalid(self, app): + def test_update_invalid(self, app: Flask): api = ToolWorkflowProviderUpdateApi() method = unwrap(api.post) @@ -560,7 +561,7 @@ class TestWorkflowApis: result = method(api) assert result["ok"] - def test_delete(self, app): + def test_delete(self, app: Flask): api = ToolWorkflowProviderDeleteApi() method = unwrap(api.post) @@ -577,7 +578,7 @@ class TestWorkflowApis: ): assert method(api)["ok"] - def test_get_error(self, app): + def test_get_error(self, app: Flask): api = ToolWorkflowProviderGetApi() method = unwrap(api.get) @@ -594,10 +595,10 @@ class TestWorkflowApis: class TestLists: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_builtin_list(self, app): + def test_builtin_list(self, app: Flask): api = ToolBuiltinListApi() method = unwrap(api.get) @@ -617,7 +618,7 @@ class TestLists: ): assert method(api) == [{"x": 1}] - def test_api_list(self, app): + def test_api_list(self, app: Flask): api = ToolApiListApi() method = unwrap(api.get) @@ -637,7 +638,7 @@ class TestLists: ): assert method(api) == [{"x": 1}] - def test_workflow_list(self, app): + def test_workflow_list(self, app: Flask): api = ToolWorkflowListApi() method = unwrap(api.get) @@ -660,10 +661,10 @@ class TestLists: class TestLabels: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_labels(self, app): + def test_labels(self, app: Flask): api = ToolLabelsApi() method = unwrap(api.get) @@ -679,10 +680,10 @@ class TestLabels: class TestOAuth: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_oauth_no_client(self, app): + def test_oauth_no_client(self, app: Flask): api = ToolPluginOAuthApi() method = unwrap(api.get) @@ -700,7 +701,7 @@ class TestOAuth: with pytest.raises(Forbidden): method(api, "provider") - def test_oauth_callback_no_cookie(self, app): + def test_oauth_callback_no_cookie(self, app: Flask): api = ToolOAuthCallback() method = unwrap(api.get) @@ -711,10 +712,10 @@ class TestOAuth: class TestOAuthCustomClient: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_save_custom_client(self, app): + def test_save_custom_client(self, app: Flask): api = ToolOAuthCustomClient() method = unwrap(api.post) @@ -731,7 +732,7 @@ class TestOAuthCustomClient: ): assert method(api, "provider")["ok"] - def test_get_custom_client(self, app): + def test_get_custom_client(self, app: Flask): api = ToolOAuthCustomClient() method = unwrap(api.get) @@ -748,7 +749,7 @@ class TestOAuthCustomClient: ): assert method(api, "provider") == {"client_id": "x"} - def test_delete_custom_client(self, app): + def test_delete_custom_client(self, app: Flask): api = ToolOAuthCustomClient() method = unwrap(api.delete) diff --git a/api/tests/test_containers_integration_tests/controllers/console/workspace/test_trigger_providers.py b/api/tests/test_containers_integration_tests/controllers/console/workspace/test_trigger_providers.py index ca8195af53..e41adccf3c 100644 --- a/api/tests/test_containers_integration_tests/controllers/console/workspace/test_trigger_providers.py +++ b/api/tests/test_containers_integration_tests/controllers/console/workspace/test_trigger_providers.py @@ -5,6 +5,7 @@ from __future__ import annotations from unittest.mock import MagicMock, patch import pytest +from flask import Flask from werkzeug.exceptions import BadRequest, Forbidden from controllers.console.workspace.trigger_providers import ( @@ -45,10 +46,10 @@ def mock_user(): class TestTriggerProviderApis: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_icon_success(self, app): + def test_icon_success(self, app: Flask): api = TriggerProviderIconApi() method = unwrap(api.get) @@ -62,7 +63,7 @@ class TestTriggerProviderApis: ): assert method(api, "github") == "icon" - def test_list_providers(self, app): + def test_list_providers(self, app: Flask): api = TriggerProviderListApi() method = unwrap(api.get) @@ -76,7 +77,7 @@ class TestTriggerProviderApis: ): assert method(api) == [] - def test_provider_info(self, app): + def test_provider_info(self, app: Flask): api = TriggerProviderInfoApi() method = unwrap(api.get) @@ -93,10 +94,10 @@ class TestTriggerProviderApis: class TestTriggerSubscriptionListApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_list_success(self, app): + def test_list_success(self, app: Flask): api = TriggerSubscriptionListApi() method = unwrap(api.get) @@ -110,7 +111,7 @@ class TestTriggerSubscriptionListApi: ): assert method(api, "github") == [] - def test_list_invalid_provider(self, app): + def test_list_invalid_provider(self, app: Flask): api = TriggerSubscriptionListApi() method = unwrap(api.get) @@ -128,10 +129,10 @@ class TestTriggerSubscriptionListApi: class TestTriggerSubscriptionBuilderApis: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_create_builder(self, app): + def test_create_builder(self, app: Flask): api = TriggerSubscriptionBuilderCreateApi() method = unwrap(api.post) @@ -146,7 +147,7 @@ class TestTriggerSubscriptionBuilderApis: result = method(api, "github") assert "subscription_builder" in result - def test_get_builder(self, app): + def test_get_builder(self, app: Flask): api = TriggerSubscriptionBuilderGetApi() method = unwrap(api.get) @@ -159,7 +160,7 @@ class TestTriggerSubscriptionBuilderApis: ): assert method(api, "github", "b1") == {"id": "b1"} - def test_verify_builder(self, app): + def test_verify_builder(self, app: Flask): api = TriggerSubscriptionBuilderVerifyApi() method = unwrap(api.post) @@ -173,7 +174,7 @@ class TestTriggerSubscriptionBuilderApis: ): assert method(api, "github", "b1") == {"ok": True} - def test_verify_builder_error(self, app): + def test_verify_builder_error(self, app: Flask): api = TriggerSubscriptionBuilderVerifyApi() method = unwrap(api.post) @@ -188,7 +189,7 @@ class TestTriggerSubscriptionBuilderApis: with pytest.raises(ValueError): method(api, "github", "b1") - def test_update_builder(self, app): + def test_update_builder(self, app: Flask): api = TriggerSubscriptionBuilderUpdateApi() method = unwrap(api.post) @@ -202,7 +203,7 @@ class TestTriggerSubscriptionBuilderApis: ): assert method(api, "github", "b1") == {"id": "b1"} - def test_logs(self, app): + def test_logs(self, app: Flask): api = TriggerSubscriptionBuilderLogsApi() method = unwrap(api.get) @@ -219,7 +220,7 @@ class TestTriggerSubscriptionBuilderApis: ): assert "logs" in method(api, "github", "b1") - def test_build(self, app): + def test_build(self, app: Flask): api = TriggerSubscriptionBuilderBuildApi() method = unwrap(api.post) @@ -236,10 +237,10 @@ class TestTriggerSubscriptionBuilderApis: class TestTriggerSubscriptionCrud: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_update_rename_only(self, app): + def test_update_rename_only(self, app: Flask): api = TriggerSubscriptionUpdateApi() method = unwrap(api.post) @@ -258,7 +259,7 @@ class TestTriggerSubscriptionCrud: ): assert method(api, "s1") == 200 - def test_update_not_found(self, app): + def test_update_not_found(self, app: Flask): api = TriggerSubscriptionUpdateApi() method = unwrap(api.post) @@ -273,7 +274,7 @@ class TestTriggerSubscriptionCrud: with pytest.raises(NotFoundError): method(api, "x") - def test_update_rebuild(self, app): + def test_update_rebuild(self, app: Flask): api = TriggerSubscriptionUpdateApi() method = unwrap(api.post) @@ -296,7 +297,7 @@ class TestTriggerSubscriptionCrud: ): assert method(api, "s1") == 200 - def test_delete_subscription(self, app): + def test_delete_subscription(self, app: Flask): api = TriggerSubscriptionDeleteApi() method = unwrap(api.post) @@ -319,7 +320,7 @@ class TestTriggerSubscriptionCrud: assert result["result"] == "success" - def test_delete_subscription_value_error(self, app): + def test_delete_subscription_value_error(self, app: Flask): api = TriggerSubscriptionDeleteApi() method = unwrap(api.post) @@ -342,10 +343,10 @@ class TestTriggerSubscriptionCrud: class TestTriggerOAuthApis: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_oauth_authorize_success(self, app): + def test_oauth_authorize_success(self, app: Flask): api = TriggerOAuthAuthorizeApi() method = unwrap(api.get) @@ -372,7 +373,7 @@ class TestTriggerOAuthApis: resp = method(api, "github") assert resp.status_code == 200 - def test_oauth_authorize_no_client(self, app): + def test_oauth_authorize_no_client(self, app: Flask): api = TriggerOAuthAuthorizeApi() method = unwrap(api.get) @@ -387,7 +388,7 @@ class TestTriggerOAuthApis: with pytest.raises(NotFoundError): method(api, "github") - def test_oauth_callback_forbidden(self, app): + def test_oauth_callback_forbidden(self, app: Flask): api = TriggerOAuthCallbackApi() method = unwrap(api.get) @@ -395,7 +396,7 @@ class TestTriggerOAuthApis: with pytest.raises(Forbidden): method(api, "github") - def test_oauth_callback_success(self, app): + def test_oauth_callback_success(self, app: Flask): api = TriggerOAuthCallbackApi() method = unwrap(api.get) @@ -425,7 +426,7 @@ class TestTriggerOAuthApis: resp = method(api, "github") assert resp.status_code == 302 - def test_oauth_callback_no_oauth_client(self, app): + def test_oauth_callback_no_oauth_client(self, app: Flask): api = TriggerOAuthCallbackApi() method = unwrap(api.get) @@ -449,7 +450,7 @@ class TestTriggerOAuthApis: with pytest.raises(Forbidden): method(api, "github") - def test_oauth_callback_empty_credentials(self, app): + def test_oauth_callback_empty_credentials(self, app: Flask): api = TriggerOAuthCallbackApi() method = unwrap(api.get) @@ -480,10 +481,10 @@ class TestTriggerOAuthApis: class TestTriggerOAuthClientManageApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_get_client(self, app): + def test_get_client(self, app: Flask): api = TriggerOAuthClientManageApi() method = unwrap(api.get) @@ -510,7 +511,7 @@ class TestTriggerOAuthClientManageApi: result = method(api, "github") assert "configured" in result - def test_post_client(self, app): + def test_post_client(self, app: Flask): api = TriggerOAuthClientManageApi() method = unwrap(api.post) @@ -524,7 +525,7 @@ class TestTriggerOAuthClientManageApi: ): assert method(api, "github") == {"ok": True} - def test_delete_client(self, app): + def test_delete_client(self, app: Flask): api = TriggerOAuthClientManageApi() method = unwrap(api.delete) @@ -538,7 +539,7 @@ class TestTriggerOAuthClientManageApi: ): assert method(api, "github") == {"ok": True} - def test_oauth_client_post_value_error(self, app): + def test_oauth_client_post_value_error(self, app: Flask): api = TriggerOAuthClientManageApi() method = unwrap(api.post) @@ -556,10 +557,10 @@ class TestTriggerOAuthClientManageApi: class TestTriggerSubscriptionVerifyApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_verify_success(self, app): + def test_verify_success(self, app: Flask): api = TriggerSubscriptionVerifyApi() method = unwrap(api.post) diff --git a/api/tests/test_containers_integration_tests/controllers/service_api/dataset/test_dataset.py b/api/tests/test_containers_integration_tests/controllers/service_api/dataset/test_dataset.py index 9b913d6d3d..b73d28e4c4 100644 --- a/api/tests/test_containers_integration_tests/controllers/service_api/dataset/test_dataset.py +++ b/api/tests/test_containers_integration_tests/controllers/service_api/dataset/test_dataset.py @@ -18,6 +18,7 @@ from types import SimpleNamespace from unittest.mock import Mock, patch import pytest +from flask import Flask from sqlalchemy.orm import Session from werkzeug.exceptions import Forbidden, NotFound @@ -217,10 +218,20 @@ class TestTagUnbindingPayload: """Test suite for TagUnbindingPayload Pydantic model.""" def test_payload_with_valid_data(self): - payload = TagUnbindingPayload(tag_id="tag_123", target_id="dataset_456") - assert payload.tag_id == "tag_123" + payload = TagUnbindingPayload(tag_ids=["tag_123"], target_id="dataset_456") + assert payload.tag_ids == ["tag_123"] assert payload.target_id == "dataset_456" + def test_payload_normalizes_legacy_tag_id(self): + payload = TagUnbindingPayload(tag_id="tag_123", target_id="dataset_456") + assert payload.tag_ids == ["tag_123"] + assert payload.target_id == "dataset_456" + + def test_payload_rejects_empty_tag_ids(self): + with pytest.raises(ValueError) as exc_info: + TagUnbindingPayload(tag_ids=[], target_id="dataset_456") + assert "Tag IDs is required" in str(exc_info.value) + # --------------------------------------------------------------------------- # Helpers @@ -236,7 +247,7 @@ def _unwrap(method): @pytest.fixture -def app(flask_app_with_containers): +def app(flask_app_with_containers: Flask): # Uses the full containerised app so that Flask config, extensions, and # blueprint registrations match production. Most tests mock the service # layer to isolate controller logic; a few (e.g. test_list_tags_from_db) @@ -280,7 +291,7 @@ class TestDatasetListApiGet: mock_current_user, mock_provider_mgr, mock_marshal, - app, + app: Flask, mock_tenant, ): from controllers.service_api.dataset.dataset import DatasetListApi @@ -315,7 +326,7 @@ class TestDatasetListApiPost: mock_dataset_svc, mock_current_user, mock_marshal, - app, + app: Flask, mock_tenant, ): from controllers.service_api.dataset.dataset import DatasetListApi @@ -341,7 +352,7 @@ class TestDatasetListApiPost: self, mock_dataset_svc, mock_current_user, - app, + app: Flask, mock_tenant, ): from controllers.service_api.dataset.dataset import DatasetListApi @@ -379,7 +390,7 @@ class TestDatasetApiGet: mock_provider_mgr, mock_marshal, mock_perm_svc, - app, + app: Flask, mock_dataset, ): from controllers.service_api.dataset.dataset import DatasetApi @@ -429,7 +440,7 @@ class TestDatasetApiGet: self, mock_dataset_svc, mock_current_user, - app, + app: Flask, mock_dataset, ): from controllers.service_api.dataset.dataset import DatasetApi @@ -457,7 +468,7 @@ class TestDatasetApiDelete: mock_dataset_svc, mock_current_user, mock_perm_svc, - app, + app: Flask, mock_dataset, ): from controllers.service_api.dataset.dataset import DatasetApi @@ -479,7 +490,7 @@ class TestDatasetApiDelete: self, mock_dataset_svc, mock_current_user, - app, + app: Flask, mock_dataset, ): from controllers.service_api.dataset.dataset import DatasetApi @@ -500,7 +511,7 @@ class TestDatasetApiDelete: self, mock_dataset_svc, mock_current_user, - app, + app: Flask, mock_dataset, ): from controllers.service_api.dataset.dataset import DatasetApi @@ -532,7 +543,7 @@ class TestDocumentStatusApiPatch: mock_dataset_svc, mock_current_user, mock_doc_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -563,7 +574,7 @@ class TestDocumentStatusApiPatch: def test_batch_update_status_dataset_not_found( self, mock_dataset_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -592,7 +603,7 @@ class TestDocumentStatusApiPatch: mock_dataset_svc, mock_current_user, mock_doc_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -625,7 +636,7 @@ class TestDocumentStatusApiPatch: mock_dataset_svc, mock_current_user, mock_doc_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -658,7 +669,7 @@ class TestDocumentStatusApiPatch: mock_dataset_svc, mock_current_user, mock_doc_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -698,7 +709,7 @@ class TestDatasetTagsApiGet: self, mock_current_user, mock_tag_svc, - app, + app: Flask, ): from controllers.service_api.dataset.dataset import DatasetTagsApi @@ -720,7 +731,7 @@ class TestDatasetTagsApiGet: def test_list_tags_from_db( self, mock_current_user, - app, + app: Flask, db_session_with_containers: Session, ): """Integration test: creates real Tag rows and retrieves them @@ -763,7 +774,7 @@ class TestDatasetTagsApiPost: self, mock_current_user, mock_tag_svc, - app, + app: Flask, ): from controllers.service_api.dataset.dataset import DatasetTagsApi @@ -786,7 +797,7 @@ class TestDatasetTagsApiPost: mock_tag_svc.save_tags.assert_called_once() @patch("controllers.service_api.dataset.dataset.current_user") - def test_create_tag_forbidden(self, mock_current_user, app): + def test_create_tag_forbidden(self, mock_current_user, app: Flask): from controllers.service_api.dataset.dataset import DatasetTagsApi mock_current_user.__class__ = Account @@ -815,7 +826,7 @@ class TestDatasetTagsApiPatch: mock_current_user, mock_service_api_ns, mock_tag_svc, - app, + app: Flask, ): from controllers.service_api.dataset.dataset import DatasetTagsApi @@ -841,7 +852,7 @@ class TestDatasetTagsApiPatch: mock_tag_svc.update_tags.assert_called_once_with({"name": "Updated Tag", "type": "knowledge"}, "tag-1") @patch("controllers.service_api.dataset.dataset.current_user") - def test_update_tag_forbidden(self, mock_current_user, app): + def test_update_tag_forbidden(self, mock_current_user, app: Flask): from controllers.service_api.dataset.dataset import DatasetTagsApi mock_current_user.__class__ = Account @@ -869,7 +880,7 @@ class TestDatasetTagsApiDelete: mock_current_user, mock_service_api_ns, mock_tag_svc, - app, + app: Flask, ): from controllers.service_api.dataset.dataset import DatasetTagsApi @@ -894,7 +905,7 @@ class TestDatasetTagsApiDelete: mock_tag_svc.delete_tag.assert_called_once_with("tag-1") @patch("libs.login.current_user") - def test_delete_tag_forbidden(self, mock_current_user, app): + def test_delete_tag_forbidden(self, mock_current_user, app: Flask): from controllers.service_api.dataset.dataset import DatasetTagsApi user_obj = Mock(spec=Account) @@ -922,7 +933,7 @@ class TestDatasetTagsBindingStatusApi: self, mock_current_user, mock_tag_svc, - app, + app: Flask, ): from controllers.service_api.dataset.dataset import DatasetTagsBindingStatusApi @@ -952,7 +963,7 @@ class TestDatasetTagBindingApiPost: self, mock_current_user, mock_tag_svc, - app, + app: Flask, ): from controllers.service_api.dataset.dataset import DatasetTagBindingApi @@ -977,7 +988,7 @@ class TestDatasetTagBindingApiPost: ) @patch("controllers.service_api.dataset.dataset.current_user") - def test_bind_tags_forbidden(self, mock_current_user, app): + def test_bind_tags_forbidden(self, mock_current_user, app: Flask): from controllers.service_api.dataset.dataset import DatasetTagBindingApi mock_current_user.__class__ = Account @@ -1003,7 +1014,37 @@ class TestDatasetTagUnbindingApiPost: self, mock_current_user, mock_tag_svc, - app, + app: Flask, + ): + from controllers.service_api.dataset.dataset import DatasetTagUnbindingApi + + mock_current_user.__class__ = Account + mock_current_user.has_edit_permission = True + mock_current_user.is_dataset_editor = True + mock_tag_svc.delete_tag_binding.return_value = None + + with app.test_request_context( + "/datasets/tags/unbinding", + method="POST", + json={"tag_ids": ["tag-1"], "target_id": "ds-1"}, + ): + api = DatasetTagUnbindingApi() + result = api.post(_=None) + + assert result == ("", 204) + from services.tag_service import TagBindingDeletePayload + + mock_tag_svc.delete_tag_binding.assert_called_once_with( + TagBindingDeletePayload(tag_ids=["tag-1"], target_id="ds-1", type="knowledge") + ) + + @patch("controllers.service_api.dataset.dataset.TagService") + @patch("controllers.service_api.dataset.dataset.current_user") + def test_unbind_legacy_tag_id_success( + self, + mock_current_user, + mock_tag_svc, + app: Flask, ): from controllers.service_api.dataset.dataset import DatasetTagUnbindingApi @@ -1024,11 +1065,11 @@ class TestDatasetTagUnbindingApiPost: from services.tag_service import TagBindingDeletePayload mock_tag_svc.delete_tag_binding.assert_called_once_with( - TagBindingDeletePayload(tag_id="tag-1", target_id="ds-1", type="knowledge") + TagBindingDeletePayload(tag_ids=["tag-1"], target_id="ds-1", type="knowledge") ) @patch("controllers.service_api.dataset.dataset.current_user") - def test_unbind_tag_forbidden(self, mock_current_user, app): + def test_unbind_tag_forbidden(self, mock_current_user, app: Flask): from controllers.service_api.dataset.dataset import DatasetTagUnbindingApi mock_current_user.__class__ = Account @@ -1038,7 +1079,7 @@ class TestDatasetTagUnbindingApiPost: with app.test_request_context( "/datasets/tags/unbinding", method="POST", - json={"tag_id": "tag-1", "target_id": "ds-1"}, + json={"tag_ids": ["tag-1"], "target_id": "ds-1"}, ): api = DatasetTagUnbindingApi() with pytest.raises(Forbidden): diff --git a/api/tests/test_containers_integration_tests/controllers/web/test_conversation.py b/api/tests/test_containers_integration_tests/controllers/web/test_conversation.py index e1e6741014..c34da27ebe 100644 --- a/api/tests/test_containers_integration_tests/controllers/web/test_conversation.py +++ b/api/tests/test_containers_integration_tests/controllers/web/test_conversation.py @@ -7,6 +7,7 @@ from unittest.mock import MagicMock, patch from uuid import uuid4 import pytest +from flask import Flask from werkzeug.exceptions import NotFound from controllers.web.conversation import ( @@ -34,16 +35,16 @@ def _end_user() -> SimpleNamespace: class TestConversationListApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_non_chat_mode_raises(self, app) -> None: + def test_non_chat_mode_raises(self, app: Flask) -> None: with app.test_request_context("/conversations"): with pytest.raises(NotChatAppError): ConversationListApi().get(_completion_app(), _end_user()) @patch("controllers.web.conversation.WebConversationService.pagination_by_last_id") - def test_happy_path(self, mock_paginate: MagicMock, app) -> None: + def test_happy_path(self, mock_paginate: MagicMock, app: Flask) -> None: conv_id = str(uuid4()) conv = SimpleNamespace( id=conv_id, @@ -65,16 +66,16 @@ class TestConversationListApi: class TestConversationApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_non_chat_mode_raises(self, app) -> None: + def test_non_chat_mode_raises(self, app: Flask) -> None: with app.test_request_context(f"/conversations/{uuid4()}"): with pytest.raises(NotChatAppError): ConversationApi().delete(_completion_app(), _end_user(), uuid4()) @patch("controllers.web.conversation.ConversationService.delete") - def test_delete_success(self, mock_delete: MagicMock, app) -> None: + def test_delete_success(self, mock_delete: MagicMock, app: Flask) -> None: c_id = uuid4() with app.test_request_context(f"/conversations/{c_id}"): result, status = ConversationApi().delete(_chat_app(), _end_user(), c_id) @@ -83,7 +84,7 @@ class TestConversationApi: assert result["result"] == "success" @patch("controllers.web.conversation.ConversationService.delete", side_effect=ConversationNotExistsError()) - def test_delete_not_found(self, mock_delete: MagicMock, app) -> None: + def test_delete_not_found(self, mock_delete: MagicMock, app: Flask) -> None: c_id = uuid4() with app.test_request_context(f"/conversations/{c_id}"): with pytest.raises(NotFound, match="Conversation Not Exists"): @@ -92,17 +93,17 @@ class TestConversationApi: class TestConversationRenameApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_non_chat_mode_raises(self, app) -> None: + def test_non_chat_mode_raises(self, app: Flask) -> None: with app.test_request_context(f"/conversations/{uuid4()}/name", method="POST", json={"name": "x"}): with pytest.raises(NotChatAppError): ConversationRenameApi().post(_completion_app(), _end_user(), uuid4()) @patch("controllers.web.conversation.ConversationService.rename") @patch("controllers.web.conversation.web_ns") - def test_rename_success(self, mock_ns: MagicMock, mock_rename: MagicMock, app) -> None: + def test_rename_success(self, mock_ns: MagicMock, mock_rename: MagicMock, app: Flask) -> None: c_id = uuid4() mock_ns.payload = {"name": "New Name", "auto_generate": False} conv = SimpleNamespace( @@ -126,7 +127,7 @@ class TestConversationRenameApi: side_effect=ConversationNotExistsError(), ) @patch("controllers.web.conversation.web_ns") - def test_rename_not_found(self, mock_ns: MagicMock, mock_rename: MagicMock, app) -> None: + def test_rename_not_found(self, mock_ns: MagicMock, mock_rename: MagicMock, app: Flask) -> None: c_id = uuid4() mock_ns.payload = {"name": "X", "auto_generate": False} @@ -137,16 +138,16 @@ class TestConversationRenameApi: class TestConversationPinApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_non_chat_mode_raises(self, app) -> None: + def test_non_chat_mode_raises(self, app: Flask) -> None: with app.test_request_context(f"/conversations/{uuid4()}/pin", method="PATCH"): with pytest.raises(NotChatAppError): ConversationPinApi().patch(_completion_app(), _end_user(), uuid4()) @patch("controllers.web.conversation.WebConversationService.pin") - def test_pin_success(self, mock_pin: MagicMock, app) -> None: + def test_pin_success(self, mock_pin: MagicMock, app: Flask) -> None: c_id = uuid4() with app.test_request_context(f"/conversations/{c_id}/pin", method="PATCH"): result = ConversationPinApi().patch(_chat_app(), _end_user(), c_id) @@ -154,7 +155,7 @@ class TestConversationPinApi: assert result["result"] == "success" @patch("controllers.web.conversation.WebConversationService.pin", side_effect=ConversationNotExistsError()) - def test_pin_not_found(self, mock_pin: MagicMock, app) -> None: + def test_pin_not_found(self, mock_pin: MagicMock, app: Flask) -> None: c_id = uuid4() with app.test_request_context(f"/conversations/{c_id}/pin", method="PATCH"): with pytest.raises(NotFound): @@ -163,16 +164,16 @@ class TestConversationPinApi: class TestConversationUnPinApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers - def test_non_chat_mode_raises(self, app) -> None: + def test_non_chat_mode_raises(self, app: Flask) -> None: with app.test_request_context(f"/conversations/{uuid4()}/unpin", method="PATCH"): with pytest.raises(NotChatAppError): ConversationUnPinApi().patch(_completion_app(), _end_user(), uuid4()) @patch("controllers.web.conversation.WebConversationService.unpin") - def test_unpin_success(self, mock_unpin: MagicMock, app) -> None: + def test_unpin_success(self, mock_unpin: MagicMock, app: Flask) -> None: c_id = uuid4() with app.test_request_context(f"/conversations/{c_id}/unpin", method="PATCH"): result = ConversationUnPinApi().patch(_chat_app(), _end_user(), c_id) diff --git a/api/tests/test_containers_integration_tests/controllers/web/test_web_forgot_password.py b/api/tests/test_containers_integration_tests/controllers/web/test_web_forgot_password.py index 635cfee2da..2c6a990240 100644 --- a/api/tests/test_containers_integration_tests/controllers/web/test_web_forgot_password.py +++ b/api/tests/test_containers_integration_tests/controllers/web/test_web_forgot_password.py @@ -7,6 +7,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock, patch import pytest +from flask import Flask from controllers.web.forgot_password import ( ForgotPasswordCheckApi, @@ -29,7 +30,7 @@ def _patch_wraps(): class TestForgotPasswordSendEmailApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers @patch("controllers.web.forgot_password.AccountService.send_reset_password_email") @@ -42,7 +43,7 @@ class TestForgotPasswordSendEmailApi: mock_rate_limit, mock_get_account, mock_send_mail, - app, + app: Flask, ): mock_account = MagicMock() mock_get_account.return_value = mock_account @@ -64,7 +65,7 @@ class TestForgotPasswordSendEmailApi: class TestForgotPasswordCheckApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers @patch("controllers.web.forgot_password.AccountService.reset_forgot_password_error_rate_limit") @@ -81,7 +82,7 @@ class TestForgotPasswordCheckApi: mock_revoke_token, mock_generate_token, mock_reset_rate, - app, + app: Flask, ): mock_is_rate_limit.return_value = False mock_get_data.return_value = {"email": "User@Example.com", "code": "1234"} @@ -117,7 +118,7 @@ class TestForgotPasswordCheckApi: mock_revoke_token, mock_generate_token, mock_reset_rate, - app, + app: Flask, ): mock_is_rate_limit.return_value = False mock_get_data.return_value = {"email": "MixedCase@Example.com", "code": "5678"} @@ -142,7 +143,7 @@ class TestForgotPasswordCheckApi: class TestForgotPasswordResetApi: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers @patch("controllers.web.forgot_password.ForgotPasswordResetApi._update_existing_account") @@ -157,7 +158,7 @@ class TestForgotPasswordResetApi: mock_db, mock_get_account, mock_update_account, - app, + app: Flask, ): mock_get_reset_data.return_value = {"phase": "reset", "email": "User@Example.com", "code": "1234"} mock_account = MagicMock() @@ -194,7 +195,7 @@ class TestForgotPasswordResetApi: mock_db, mock_token_bytes, mock_hash_password, - app, + app: Flask, ): mock_get_reset_data.return_value = {"phase": "reset", "email": "user@example.com"} account = MagicMock() diff --git a/api/tests/test_containers_integration_tests/controllers/web/test_wraps.py b/api/tests/test_containers_integration_tests/controllers/web/test_wraps.py index 19833cc772..0a4e495f36 100644 --- a/api/tests/test_containers_integration_tests/controllers/web/test_wraps.py +++ b/api/tests/test_containers_integration_tests/controllers/web/test_wraps.py @@ -8,6 +8,7 @@ from unittest.mock import MagicMock, patch from uuid import uuid4 import pytest +from flask import Flask from sqlalchemy.orm import Session from werkzeug.exceptions import BadRequest, NotFound, Unauthorized @@ -182,7 +183,7 @@ class TestValidateUserAccessibility: class TestDecodeJwtToken: @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers def _create_app_site_enduser(self, db_session: Session, *, enable_site: bool = True): @@ -239,7 +240,7 @@ class TestDecodeJwtToken: mock_access_mode: MagicMock, mock_validate_token: MagicMock, mock_validate_user: MagicMock, - app, + app: Flask, db_session_with_containers: Session, ) -> None: app_model, site, end_user = self._create_app_site_enduser(db_session_with_containers) @@ -299,7 +300,7 @@ class TestDecodeJwtToken: mock_extract: MagicMock, mock_passport_cls: MagicMock, mock_features: MagicMock, - app, + app: Flask, db_session_with_containers: Session, ) -> None: app_model, site, end_user = self._create_app_site_enduser(db_session_with_containers, enable_site=False) @@ -324,7 +325,7 @@ class TestDecodeJwtToken: mock_extract: MagicMock, mock_passport_cls: MagicMock, mock_features: MagicMock, - app, + app: Flask, db_session_with_containers: Session, ) -> None: app_model, site, _ = self._create_app_site_enduser(db_session_with_containers) @@ -350,7 +351,7 @@ class TestDecodeJwtToken: mock_extract: MagicMock, mock_passport_cls: MagicMock, mock_features: MagicMock, - app, + app: Flask, db_session_with_containers: Session, ) -> None: app_model, site, end_user = self._create_app_site_enduser(db_session_with_containers) diff --git a/api/tests/test_containers_integration_tests/core/app/layers/test_pause_state_persist_layer.py b/api/tests/test_containers_integration_tests/core/app/layers/test_pause_state_persist_layer.py index c342e8994b..66b3392a4b 100644 --- a/api/tests/test_containers_integration_tests/core/app/layers/test_pause_state_persist_layer.py +++ b/api/tests/test_containers_integration_tests/core/app/layers/test_pause_state_persist_layer.py @@ -85,7 +85,7 @@ class TestPauseStatePersistenceLayerTestContainers: return WorkflowRunService(engine) @pytest.fixture(autouse=True) - def setup_test_data(self, db_session_with_containers, file_service, workflow_run_service): + def setup_test_data(self, db_session_with_containers: Session, file_service, workflow_run_service): """Set up test data for each test method using TestContainers.""" # Create test tenant and account from models.account import AccountStatus, Tenant, TenantAccountJoin, TenantAccountRole, TenantStatus @@ -210,7 +210,9 @@ class TestPauseStatePersistenceLayerTestContainers: execution_id = workflow_run_id or getattr(self, "test_workflow_run_id", None) or str(uuid.uuid4()) # Create variable pool - variable_pool = VariablePool(system_variables=build_system_variables(workflow_execution_id=execution_id)) + variable_pool = VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id=execution_id) + ) if variables: for (node_id, var_key), value in variables.items(): variable_pool.add([node_id, var_key], value) @@ -295,7 +297,7 @@ class TestPauseStatePersistenceLayerTestContainers: generate_entity=entity, ) - def test_complete_pause_flow_with_real_dependencies(self, db_session_with_containers): + def test_complete_pause_flow_with_real_dependencies(self, db_session_with_containers: Session): """Test complete pause flow: event -> state serialization -> database save -> storage save.""" # Arrange layer = self._create_pause_state_persistence_layer() @@ -352,7 +354,7 @@ class TestPauseStatePersistenceLayerTestContainers: assert isinstance(persisted_entity, WorkflowAppGenerateEntity) assert persisted_entity.workflow_execution_id == self.test_workflow_run_id - def test_state_persistence_and_retrieval(self, db_session_with_containers): + def test_state_persistence_and_retrieval(self, db_session_with_containers: Session): """Test that pause state can be persisted and retrieved correctly.""" # Arrange layer = self._create_pause_state_persistence_layer() @@ -402,7 +404,7 @@ class TestPauseStatePersistenceLayerTestContainers: assert retrieved_state["node_run_steps"] == 10 assert resumption_context.get_generate_entity().workflow_execution_id == self.test_workflow_run_id - def test_database_transaction_handling(self, db_session_with_containers): + def test_database_transaction_handling(self, db_session_with_containers: Session): """Test that database transactions are handled correctly.""" # Arrange layer = self._create_pause_state_persistence_layer() @@ -433,7 +435,7 @@ class TestPauseStatePersistenceLayerTestContainers: assert pause_model.resumed_at is None assert pause_model.state_object_key != "" - def test_file_storage_integration(self, db_session_with_containers): + def test_file_storage_integration(self, db_session_with_containers: Session): """Test integration with file storage system.""" # Arrange layer = self._create_pause_state_persistence_layer() @@ -467,7 +469,7 @@ class TestPauseStatePersistenceLayerTestContainers: assert resumption_context.serialized_graph_runtime_state == graph_runtime_state.dumps() assert resumption_context.get_generate_entity().workflow_execution_id == self.test_workflow_run_id - def test_workflow_with_different_creators(self, db_session_with_containers): + def test_workflow_with_different_creators(self, db_session_with_containers: Session): """Test pause state with workflows created by different users.""" # Arrange - Create workflow with different creator different_user_id = str(uuid.uuid4()) @@ -532,7 +534,7 @@ class TestPauseStatePersistenceLayerTestContainers: resumption_context = WorkflowResumptionContext.loads(pause_entity.get_state().decode()) assert resumption_context.get_generate_entity().workflow_execution_id == different_workflow_run.id - def test_layer_ignores_non_pause_events(self, db_session_with_containers): + def test_layer_ignores_non_pause_events(self, db_session_with_containers: Session): """Test that layer ignores non-pause events.""" # Arrange layer = self._create_pause_state_persistence_layer() @@ -562,7 +564,7 @@ class TestPauseStatePersistenceLayerTestContainers: ).all() assert len(pause_states) == 0 - def test_layer_requires_initialization(self, db_session_with_containers): + def test_layer_requires_initialization(self, db_session_with_containers: Session): """Test that layer requires proper initialization before handling events.""" # Arrange layer = self._create_pause_state_persistence_layer() diff --git a/api/tests/test_containers_integration_tests/core/rag/pipeline/test_queue_integration.py b/api/tests/test_containers_integration_tests/core/rag/pipeline/test_queue_integration.py index a60159c66a..d1af0a56ef 100644 --- a/api/tests/test_containers_integration_tests/core/rag/pipeline/test_queue_integration.py +++ b/api/tests/test_containers_integration_tests/core/rag/pipeline/test_queue_integration.py @@ -15,11 +15,14 @@ from uuid import uuid4 import pytest from faker import Faker +from sqlalchemy.orm import Session from core.rag.pipeline.queue import TaskWrapper, TenantIsolatedTaskQueue from extensions.ext_redis import redis_client from models import Account, AccountStatus, Tenant, TenantAccountJoin, TenantAccountRole, TenantStatus +TenantAndAccount = tuple[Tenant, Account] + @dataclass class TestTask: @@ -40,7 +43,7 @@ class TestTenantIsolatedTaskQueueIntegration: return Faker() @pytest.fixture - def test_tenant_and_account(self, db_session_with_containers, fake): + def test_tenant_and_account(self, db_session_with_containers: Session, fake: Faker): """Create test tenant and account for testing.""" # Create account account = Account( @@ -73,18 +76,18 @@ class TestTenantIsolatedTaskQueueIntegration: return tenant, account @pytest.fixture - def test_queue(self, test_tenant_and_account): + def test_queue(self, test_tenant_and_account: TenantAndAccount): """Create a generic test queue for testing.""" tenant, _ = test_tenant_and_account return TenantIsolatedTaskQueue(tenant.id, "test_queue") @pytest.fixture - def secondary_queue(self, test_tenant_and_account): + def secondary_queue(self, test_tenant_and_account: TenantAndAccount): """Create a secondary test queue for testing isolation.""" tenant, _ = test_tenant_and_account return TenantIsolatedTaskQueue(tenant.id, "secondary_queue") - def test_queue_initialization(self, test_tenant_and_account): + def test_queue_initialization(self, test_tenant_and_account: TenantAndAccount): """Test queue initialization with correct key generation.""" tenant, _ = test_tenant_and_account queue = TenantIsolatedTaskQueue(tenant.id, "test-key") @@ -94,7 +97,9 @@ class TestTenantIsolatedTaskQueueIntegration: assert queue._queue == f"tenant_self_test-key_task_queue:{tenant.id}" assert queue._task_key == f"tenant_test-key_task:{tenant.id}" - def test_tenant_isolation(self, test_tenant_and_account, db_session_with_containers, fake): + def test_tenant_isolation( + self, test_tenant_and_account: TenantAndAccount, db_session_with_containers: Session, fake: Faker + ): """Test that different tenants have isolated queues.""" tenant1, _ = test_tenant_and_account @@ -114,7 +119,7 @@ class TestTenantIsolatedTaskQueueIntegration: assert queue1._queue == f"tenant_self_same-key_task_queue:{tenant1.id}" assert queue2._queue == f"tenant_self_same-key_task_queue:{tenant2.id}" - def test_key_isolation(self, test_tenant_and_account): + def test_key_isolation(self, test_tenant_and_account: TenantAndAccount): """Test that different keys have isolated queues.""" tenant, _ = test_tenant_and_account queue1 = TenantIsolatedTaskQueue(tenant.id, "key1") @@ -176,7 +181,7 @@ class TestTenantIsolatedTaskQueueIntegration: assert len(remaining_tasks) == 2 assert remaining_tasks == ["task4", "task5"] - def test_push_and_pull_complex_objects(self, test_queue, fake): + def test_push_and_pull_complex_objects(self, test_queue, fake: Faker): """Test pushing and pulling complex object tasks.""" # Create complex task objects as dictionaries (not dataclass instances) tasks = [ @@ -218,7 +223,7 @@ class TestTenantIsolatedTaskQueueIntegration: assert pulled_task["data"] == original_task["data"] assert pulled_task["metadata"] == original_task["metadata"] - def test_mixed_task_types(self, test_queue, fake): + def test_mixed_task_types(self, test_queue, fake: Faker): """Test pushing and pulling mixed string and object tasks.""" string_task = "simple_string_task" object_task = { @@ -267,7 +272,7 @@ class TestTenantIsolatedTaskQueueIntegration: # Verify task key has expired assert test_queue.get_task_key() is None - def test_large_task_batch(self, test_queue, fake): + def test_large_task_batch(self, test_queue, fake: Faker): """Test handling large batches of tasks.""" # Create large batch of tasks large_batch = [] @@ -292,7 +297,7 @@ class TestTenantIsolatedTaskQueueIntegration: assert isinstance(task, dict) assert task["index"] == i # FIFO order - def test_queue_operations_isolation(self, test_tenant_and_account, fake): + def test_queue_operations_isolation(self, test_tenant_and_account: TenantAndAccount, fake: Faker): """Test concurrent operations on different queues.""" tenant, _ = test_tenant_and_account @@ -312,7 +317,7 @@ class TestTenantIsolatedTaskQueueIntegration: assert tasks2 == ["task1_queue2", "task2_queue2"] assert tasks1 != tasks2 - def test_task_wrapper_serialization_roundtrip(self, test_queue, fake): + def test_task_wrapper_serialization_roundtrip(self, test_queue, fake: Faker): """Test TaskWrapper serialization and deserialization roundtrip.""" # Create complex nested data complex_data = { @@ -346,7 +351,7 @@ class TestTenantIsolatedTaskQueueIntegration: task = test_queue.pull_tasks(1) assert task[0] == invalid_json_task - def test_real_world_batch_processing_scenario(self, test_queue, fake): + def test_real_world_batch_processing_scenario(self, test_queue, fake: Faker): """Test realistic batch processing scenario.""" # Simulate batch processing tasks batch_tasks = [] @@ -403,7 +408,7 @@ class TestTenantIsolatedTaskQueueCompatibility: return Faker() @pytest.fixture - def test_tenant_and_account(self, db_session_with_containers, fake): + def test_tenant_and_account(self, db_session_with_containers: Session, fake: Faker): """Create test tenant and account for testing.""" # Create account account = Account( @@ -435,7 +440,7 @@ class TestTenantIsolatedTaskQueueCompatibility: return tenant, account - def test_legacy_string_queue_compatibility(self, test_tenant_and_account, fake): + def test_legacy_string_queue_compatibility(self, test_tenant_and_account: TenantAndAccount, fake: Faker): """ Test compatibility with legacy queues containing only string data. @@ -465,7 +470,7 @@ class TestTenantIsolatedTaskQueueCompatibility: expected_order = ["legacy_task_1", "legacy_task_2", "legacy_task_3", "legacy_task_4", "legacy_task_5"] assert pulled_tasks == expected_order - def test_legacy_queue_migration_scenario(self, test_tenant_and_account, fake): + def test_legacy_queue_migration_scenario(self, test_tenant_and_account: TenantAndAccount, fake: Faker): """ Test complete migration scenario from legacy to new system. @@ -546,7 +551,7 @@ class TestTenantIsolatedTaskQueueCompatibility: assert task["tenant_id"] == tenant.id assert task["processing_type"] == "new_system" - def test_legacy_queue_error_recovery(self, test_tenant_and_account, fake): + def test_legacy_queue_error_recovery(self, test_tenant_and_account: TenantAndAccount, fake: Faker): """ Test error recovery when legacy queue contains malformed data. diff --git a/api/tests/test_containers_integration_tests/core/rag/retrieval/test_dataset_retrieval_integration.py b/api/tests/test_containers_integration_tests/core/rag/retrieval/test_dataset_retrieval_integration.py index 00d7496a40..9da6b04a2c 100644 --- a/api/tests/test_containers_integration_tests/core/rag/retrieval/test_dataset_retrieval_integration.py +++ b/api/tests/test_containers_integration_tests/core/rag/retrieval/test_dataset_retrieval_integration.py @@ -3,6 +3,7 @@ from unittest.mock import patch import pytest from faker import Faker +from sqlalchemy.orm import Session from core.rag.index_processor.constant.index_type import IndexStructureType, IndexTechniqueType from core.rag.retrieval.dataset_retrieval import DatasetRetrieval @@ -15,7 +16,7 @@ from tests.test_containers_integration_tests.helpers import generate_valid_passw class TestGetAvailableDatasetsIntegration: def test_returns_datasets_with_available_documents( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): # Arrange fake = Faker() @@ -77,7 +78,7 @@ class TestGetAvailableDatasetsIntegration: assert result[0].name == dataset.name def test_filters_out_datasets_with_only_archived_documents( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): # Arrange fake = Faker() @@ -130,7 +131,7 @@ class TestGetAvailableDatasetsIntegration: assert len(result) == 0 def test_filters_out_datasets_with_only_disabled_documents( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): # Arrange fake = Faker() @@ -183,7 +184,7 @@ class TestGetAvailableDatasetsIntegration: assert len(result) == 0 def test_filters_out_datasets_with_non_completed_documents( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): # Arrange fake = Faker() @@ -236,7 +237,7 @@ class TestGetAvailableDatasetsIntegration: assert len(result) == 0 def test_includes_external_datasets_without_documents( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test that external datasets are returned even with no available documents. @@ -280,7 +281,7 @@ class TestGetAvailableDatasetsIntegration: assert result[0].id == dataset.id assert result[0].provider == "external" - def test_filters_by_tenant_id(self, db_session_with_containers, mock_external_service_dependencies): + def test_filters_by_tenant_id(self, db_session_with_containers: Session, mock_external_service_dependencies): # Arrange fake = Faker() @@ -356,7 +357,7 @@ class TestGetAvailableDatasetsIntegration: assert result[0].tenant_id == tenant1.id def test_returns_empty_list_when_no_datasets_found( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): # Arrange fake = Faker() @@ -379,7 +380,9 @@ class TestGetAvailableDatasetsIntegration: # Assert assert result == [] - def test_returns_only_requested_dataset_ids(self, db_session_with_containers, mock_external_service_dependencies): + def test_returns_only_requested_dataset_ids( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): # Arrange fake = Faker() @@ -439,7 +442,7 @@ class TestGetAvailableDatasetsIntegration: class TestKnowledgeRetrievalIntegration: def test_knowledge_retrieval_with_available_datasets( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): # Arrange fake = Faker() @@ -507,7 +510,7 @@ class TestKnowledgeRetrievalIntegration: assert isinstance(result, list) def test_knowledge_retrieval_no_available_datasets( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): # Arrange fake = Faker() @@ -555,7 +558,7 @@ class TestKnowledgeRetrievalIntegration: assert result == [] def test_knowledge_retrieval_rate_limit_exceeded( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): # Arrange fake = Faker() diff --git a/api/tests/test_containers_integration_tests/core/workflow/test_human_input_resume_node_execution.py b/api/tests/test_containers_integration_tests/core/workflow/test_human_input_resume_node_execution.py index 5aed230cd4..ad82b8fe2a 100644 --- a/api/tests/test_containers_integration_tests/core/workflow/test_human_input_resume_node_execution.py +++ b/api/tests/test_containers_integration_tests/core/workflow/test_human_input_resume_node_execution.py @@ -66,7 +66,7 @@ def _mock_form_repository_with_submission(action_id: str) -> HumanInputFormRepos def _build_runtime_state(workflow_execution_id: str, app_id: str, workflow_id: str, user_id: str) -> GraphRuntimeState: - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( workflow_execution_id=workflow_execution_id, app_id=app_id, @@ -102,7 +102,7 @@ def _build_graph( start_data = StartNodeData(title="start", variables=[]) start_node = StartNode( node_id="start", - config=start_data, + data=start_data, graph_init_params=params, graph_runtime_state=runtime_state, ) @@ -117,7 +117,7 @@ def _build_graph( ) human_node = HumanInputNode( node_id="human", - config=human_data, + data=human_data, graph_init_params=params, graph_runtime_state=runtime_state, form_repository=form_repository, @@ -131,7 +131,7 @@ def _build_graph( ) end_node = EndNode( node_id="end", - config=end_data, + data=end_data, graph_init_params=params, graph_runtime_state=runtime_state, ) diff --git a/api/tests/test_containers_integration_tests/factories/test_storage_key_loader.py b/api/tests/test_containers_integration_tests/factories/test_storage_key_loader.py index 35e41035df..26b80cebbb 100644 --- a/api/tests/test_containers_integration_tests/factories/test_storage_key_loader.py +++ b/api/tests/test_containers_integration_tests/factories/test_storage_key_loader.py @@ -1,4 +1,5 @@ -import unittest +from __future__ import annotations + from datetime import UTC, datetime from unittest.mock import patch from uuid import uuid4 @@ -16,7 +17,7 @@ from models.enums import CreatorUserRole @pytest.mark.usefixtures("flask_req_ctx_with_containers") -class TestStorageKeyLoader(unittest.TestCase): +class TestStorageKeyLoader: """ Integration tests for StorageKeyLoader class. @@ -24,110 +25,82 @@ class TestStorageKeyLoader(unittest.TestCase): with different transfer methods: LOCAL_FILE, REMOTE_URL, and TOOL_FILE. """ - def setUp(self): - """Set up test data before each test method.""" - self.session = db.session() - self.tenant_id = str(uuid4()) - self.user_id = str(uuid4()) - self.conversation_id = str(uuid4()) - - # Create test data that will be cleaned up after each test - self.test_upload_files = [] - self.test_tool_files = [] - - # Create StorageKeyLoader instance - self.loader = StorageKeyLoader( - self.session, - self.tenant_id, - access_controller=DatabaseFileAccessController(), - ) - - def tearDown(self): - """Clean up test data after each test method.""" - self.session.rollback() + # ------------------------------------------------------------------ + # Per-test helpers (use db_session_with_containers as parameter) + # ------------------------------------------------------------------ + @staticmethod def _create_upload_file( - self, file_id: str | None = None, storage_key: str | None = None, tenant_id: str | None = None + session: Session, + tenant_id: str, + user_id: str, + *, + file_id: str | None = None, + storage_key: str | None = None, + override_tenant_id: str | None = None, ) -> UploadFile: - """Helper method to create an UploadFile record for testing.""" - if file_id is None: - file_id = str(uuid4()) - if storage_key is None: - storage_key = f"test_storage_key_{uuid4()}" - if tenant_id is None: - tenant_id = self.tenant_id - + """Create and flush an UploadFile record for testing.""" upload_file = UploadFile( - tenant_id=tenant_id, + tenant_id=override_tenant_id if override_tenant_id is not None else tenant_id, storage_type=StorageType.LOCAL, - key=storage_key, + key=storage_key or f"test_storage_key_{uuid4()}", name="test_file.txt", size=1024, extension=".txt", mime_type="text/plain", created_by_role=CreatorUserRole.ACCOUNT, - created_by=self.user_id, + created_by=user_id, created_at=datetime.now(UTC), used=False, ) - upload_file.id = file_id - - self.session.add(upload_file) - self.session.flush() - self.test_upload_files.append(upload_file) - + upload_file.id = file_id or str(uuid4()) + session.add(upload_file) + session.flush() return upload_file + @staticmethod def _create_tool_file( - self, file_id: str | None = None, file_key: str | None = None, tenant_id: str | None = None + session: Session, + tenant_id: str, + user_id: str, + conversation_id: str, + *, + file_id: str | None = None, + file_key: str | None = None, + override_tenant_id: str | None = None, ) -> ToolFile: - """Helper method to create a ToolFile record for testing.""" - if file_id is None: - file_id = str(uuid4()) - if file_key is None: - file_key = f"test_file_key_{uuid4()}" - if tenant_id is None: - tenant_id = self.tenant_id - + """Create and flush a ToolFile record for testing.""" tool_file = ToolFile( - user_id=self.user_id, - tenant_id=tenant_id, - conversation_id=self.conversation_id, - file_key=file_key, + user_id=user_id, + tenant_id=override_tenant_id if override_tenant_id is not None else tenant_id, + conversation_id=conversation_id, + file_key=file_key or f"test_file_key_{uuid4()}", mimetype="text/plain", original_url="http://example.com/file.txt", name="test_tool_file.txt", size=2048, ) - tool_file.id = file_id - - self.session.add(tool_file) - self.session.flush() - self.test_tool_files.append(tool_file) - + tool_file.id = file_id or str(uuid4()) + session.add(tool_file) + session.flush() return tool_file - def _create_file(self, related_id: str, transfer_method: FileTransferMethod, tenant_id: str | None = None) -> File: - """Helper method to create a File object for testing.""" - if tenant_id is None: - tenant_id = self.tenant_id - - # Set related_id for LOCAL_FILE and TOOL_FILE transfer methods - file_related_id = None - remote_url = None - - if transfer_method in (FileTransferMethod.LOCAL_FILE, FileTransferMethod.TOOL_FILE): - file_related_id = related_id - elif transfer_method == FileTransferMethod.REMOTE_URL: - remote_url = "https://example.com/test_file.txt" - file_related_id = related_id - + @staticmethod + def _create_file( + tenant_id: str, + related_id: str, + transfer_method: FileTransferMethod, + *, + override_tenant_id: str | None = None, + ) -> File: + """Build a File value-object for testing.""" + remote_url = "https://example.com/test_file.txt" if transfer_method == FileTransferMethod.REMOTE_URL else None return File( - file_id=str(uuid4()), # Generate new UUID for File.id - tenant_id=tenant_id, + file_id=str(uuid4()), + tenant_id=override_tenant_id if override_tenant_id is not None else tenant_id, file_type=FileType.DOCUMENT, transfer_method=transfer_method, - related_id=file_related_id, + related_id=related_id, remote_url=remote_url, filename="test_file.txt", extension=".txt", @@ -136,240 +109,280 @@ class TestStorageKeyLoader(unittest.TestCase): storage_key="initial_key", ) - def test_load_storage_keys_local_file(self): + # ------------------------------------------------------------------ + # Tests + # ------------------------------------------------------------------ + + def test_load_storage_keys_local_file(self, db_session_with_containers: Session): """Test loading storage keys for LOCAL_FILE transfer method.""" - # Create test data - upload_file = self._create_upload_file() - file = self._create_file(related_id=upload_file.id, transfer_method=FileTransferMethod.LOCAL_FILE) + tenant_id = str(uuid4()) + user_id = str(uuid4()) - # Load storage keys - self.loader.load_storage_keys([file]) + upload_file = self._create_upload_file(db_session_with_containers, tenant_id, user_id) + file = self._create_file(tenant_id, related_id=upload_file.id, transfer_method=FileTransferMethod.LOCAL_FILE) + + loader = StorageKeyLoader( + db_session_with_containers, tenant_id, access_controller=DatabaseFileAccessController() + ) + loader.load_storage_keys([file]) - # Verify storage key was loaded correctly assert file._storage_key == upload_file.key - def test_load_storage_keys_remote_url(self): + def test_load_storage_keys_remote_url(self, db_session_with_containers: Session): """Test loading storage keys for REMOTE_URL transfer method.""" - # Create test data - upload_file = self._create_upload_file() - file = self._create_file(related_id=upload_file.id, transfer_method=FileTransferMethod.REMOTE_URL) + tenant_id = str(uuid4()) + user_id = str(uuid4()) - # Load storage keys - self.loader.load_storage_keys([file]) + upload_file = self._create_upload_file(db_session_with_containers, tenant_id, user_id) + file = self._create_file(tenant_id, related_id=upload_file.id, transfer_method=FileTransferMethod.REMOTE_URL) + + loader = StorageKeyLoader( + db_session_with_containers, tenant_id, access_controller=DatabaseFileAccessController() + ) + loader.load_storage_keys([file]) - # Verify storage key was loaded correctly assert file._storage_key == upload_file.key - def test_load_storage_keys_tool_file(self): + def test_load_storage_keys_tool_file(self, db_session_with_containers: Session): """Test loading storage keys for TOOL_FILE transfer method.""" - # Create test data - tool_file = self._create_tool_file() - file = self._create_file(related_id=tool_file.id, transfer_method=FileTransferMethod.TOOL_FILE) + tenant_id = str(uuid4()) + user_id = str(uuid4()) + conversation_id = str(uuid4()) - # Load storage keys - self.loader.load_storage_keys([file]) + tool_file = self._create_tool_file(db_session_with_containers, tenant_id, user_id, conversation_id) + file = self._create_file(tenant_id, related_id=tool_file.id, transfer_method=FileTransferMethod.TOOL_FILE) + + loader = StorageKeyLoader( + db_session_with_containers, tenant_id, access_controller=DatabaseFileAccessController() + ) + loader.load_storage_keys([file]) - # Verify storage key was loaded correctly assert file._storage_key == tool_file.file_key - def test_load_storage_keys_mixed_methods(self): + def test_load_storage_keys_mixed_methods(self, db_session_with_containers: Session): """Test batch loading with mixed transfer methods.""" - # Create test data for different transfer methods - upload_file1 = self._create_upload_file() - upload_file2 = self._create_upload_file() - tool_file = self._create_tool_file() + tenant_id = str(uuid4()) + user_id = str(uuid4()) + conversation_id = str(uuid4()) - file1 = self._create_file(related_id=upload_file1.id, transfer_method=FileTransferMethod.LOCAL_FILE) - file2 = self._create_file(related_id=upload_file2.id, transfer_method=FileTransferMethod.REMOTE_URL) - file3 = self._create_file(related_id=tool_file.id, transfer_method=FileTransferMethod.TOOL_FILE) + upload_file1 = self._create_upload_file(db_session_with_containers, tenant_id, user_id) + upload_file2 = self._create_upload_file(db_session_with_containers, tenant_id, user_id) + tool_file = self._create_tool_file(db_session_with_containers, tenant_id, user_id, conversation_id) - files = [file1, file2, file3] + file1 = self._create_file(tenant_id, related_id=upload_file1.id, transfer_method=FileTransferMethod.LOCAL_FILE) + file2 = self._create_file(tenant_id, related_id=upload_file2.id, transfer_method=FileTransferMethod.REMOTE_URL) + file3 = self._create_file(tenant_id, related_id=tool_file.id, transfer_method=FileTransferMethod.TOOL_FILE) - # Load storage keys - self.loader.load_storage_keys(files) + loader = StorageKeyLoader( + db_session_with_containers, tenant_id, access_controller=DatabaseFileAccessController() + ) + loader.load_storage_keys([file1, file2, file3]) - # Verify all storage keys were loaded correctly assert file1._storage_key == upload_file1.key assert file2._storage_key == upload_file2.key assert file3._storage_key == tool_file.file_key - def test_load_storage_keys_empty_list(self): - """Test with empty file list.""" - # Should not raise any exceptions - self.loader.load_storage_keys([]) + def test_load_storage_keys_empty_list(self, db_session_with_containers: Session): + """Test with empty file list — should not raise.""" + tenant_id = str(uuid4()) + loader = StorageKeyLoader( + db_session_with_containers, tenant_id, access_controller=DatabaseFileAccessController() + ) + loader.load_storage_keys([]) - def test_load_storage_keys_ignores_legacy_file_tenant_id(self): + def test_load_storage_keys_ignores_legacy_file_tenant_id(self, db_session_with_containers: Session): """Legacy file tenant_id should not override the loader tenant scope.""" - upload_file = self._create_upload_file() + tenant_id = str(uuid4()) + user_id = str(uuid4()) + + upload_file = self._create_upload_file(db_session_with_containers, tenant_id, user_id) file = self._create_file( - related_id=upload_file.id, transfer_method=FileTransferMethod.LOCAL_FILE, tenant_id=str(uuid4()) + tenant_id, + related_id=upload_file.id, + transfer_method=FileTransferMethod.LOCAL_FILE, + override_tenant_id=str(uuid4()), ) - self.loader.load_storage_keys([file]) + loader = StorageKeyLoader( + db_session_with_containers, tenant_id, access_controller=DatabaseFileAccessController() + ) + loader.load_storage_keys([file]) assert file._storage_key == upload_file.key - def test_load_storage_keys_missing_file_id(self): - """Test with None file.related_id.""" - # Create a file with valid parameters first, then manually set related_id to None - file = self._create_file(related_id=str(uuid4()), transfer_method=FileTransferMethod.LOCAL_FILE) + def test_load_storage_keys_missing_file_id(self, db_session_with_containers: Session): + """Test with None file.related_id — should raise ValueError.""" + tenant_id = str(uuid4()) + user_id = str(uuid4()) + + upload_file = self._create_upload_file(db_session_with_containers, tenant_id, user_id) + file = self._create_file(tenant_id, related_id=upload_file.id, transfer_method=FileTransferMethod.LOCAL_FILE) file.related_id = None - # Should raise ValueError for None file related_id - with pytest.raises(ValueError) as context: - self.loader.load_storage_keys([file]) + loader = StorageKeyLoader( + db_session_with_containers, tenant_id, access_controller=DatabaseFileAccessController() + ) + with pytest.raises(ValueError, match="file id should not be None."): + loader.load_storage_keys([file]) - assert str(context.value) == "file id should not be None." + def test_load_storage_keys_nonexistent_upload_file_records(self, db_session_with_containers: Session): + """Test with missing UploadFile database records — should raise ValueError.""" + tenant_id = str(uuid4()) + file = self._create_file(tenant_id, related_id=str(uuid4()), transfer_method=FileTransferMethod.LOCAL_FILE) - def test_load_storage_keys_nonexistent_upload_file_records(self): - """Test with missing UploadFile database records.""" - # Create file with non-existent upload file id - non_existent_id = str(uuid4()) - file = self._create_file(related_id=non_existent_id, transfer_method=FileTransferMethod.LOCAL_FILE) - - # Should raise ValueError for missing record + loader = StorageKeyLoader( + db_session_with_containers, tenant_id, access_controller=DatabaseFileAccessController() + ) with pytest.raises(ValueError): - self.loader.load_storage_keys([file]) + loader.load_storage_keys([file]) - def test_load_storage_keys_nonexistent_tool_file_records(self): - """Test with missing ToolFile database records.""" - # Create file with non-existent tool file id - non_existent_id = str(uuid4()) - file = self._create_file(related_id=non_existent_id, transfer_method=FileTransferMethod.TOOL_FILE) + def test_load_storage_keys_nonexistent_tool_file_records(self, db_session_with_containers: Session): + """Test with missing ToolFile database records — should raise ValueError.""" + tenant_id = str(uuid4()) + file = self._create_file(tenant_id, related_id=str(uuid4()), transfer_method=FileTransferMethod.TOOL_FILE) - # Should raise ValueError for missing record + loader = StorageKeyLoader( + db_session_with_containers, tenant_id, access_controller=DatabaseFileAccessController() + ) with pytest.raises(ValueError): - self.loader.load_storage_keys([file]) + loader.load_storage_keys([file]) - def test_load_storage_keys_invalid_uuid(self): - """Test with invalid UUID format.""" - # Create a file with valid parameters first, then manually set invalid related_id - file = self._create_file(related_id=str(uuid4()), transfer_method=FileTransferMethod.LOCAL_FILE) + def test_load_storage_keys_invalid_uuid(self, db_session_with_containers: Session): + """Test with invalid UUID format — should raise ValueError.""" + tenant_id = str(uuid4()) + user_id = str(uuid4()) + + upload_file = self._create_upload_file(db_session_with_containers, tenant_id, user_id) + file = self._create_file(tenant_id, related_id=upload_file.id, transfer_method=FileTransferMethod.LOCAL_FILE) file.related_id = "invalid-uuid-format" - # Should raise ValueError for invalid UUID + loader = StorageKeyLoader( + db_session_with_containers, tenant_id, access_controller=DatabaseFileAccessController() + ) with pytest.raises(ValueError): - self.loader.load_storage_keys([file]) + loader.load_storage_keys([file]) - def test_load_storage_keys_batch_efficiency(self): - """Test batched operations use efficient queries.""" - # Create multiple files of different types - upload_files = [self._create_upload_file() for _ in range(3)] - tool_files = [self._create_tool_file() for _ in range(2)] + def test_load_storage_keys_batch_efficiency(self, db_session_with_containers: Session): + """Batched operations should issue exactly 2 queries for mixed file types.""" + tenant_id = str(uuid4()) + user_id = str(uuid4()) + conversation_id = str(uuid4()) - files = [] - files.extend( - [self._create_file(related_id=uf.id, transfer_method=FileTransferMethod.LOCAL_FILE) for uf in upload_files] + upload_files = [self._create_upload_file(db_session_with_containers, tenant_id, user_id) for _ in range(3)] + tool_files = [ + self._create_tool_file(db_session_with_containers, tenant_id, user_id, conversation_id) for _ in range(2) + ] + + files = [ + self._create_file(tenant_id, related_id=uf.id, transfer_method=FileTransferMethod.LOCAL_FILE) + for uf in upload_files + ] + [ + self._create_file(tenant_id, related_id=tf.id, transfer_method=FileTransferMethod.TOOL_FILE) + for tf in tool_files + ] + + loader = StorageKeyLoader( + db_session_with_containers, tenant_id, access_controller=DatabaseFileAccessController() ) - files.extend( - [self._create_file(related_id=tf.id, transfer_method=FileTransferMethod.TOOL_FILE) for tf in tool_files] - ) - - # Mock the session to count queries - with patch.object(self.session, "scalars", wraps=self.session.scalars) as mock_scalars: - self.loader.load_storage_keys(files) - - # Should make exactly 2 queries (one for upload_files, one for tool_files) + with patch.object( + db_session_with_containers, "scalars", wraps=db_session_with_containers.scalars + ) as mock_scalars: + loader.load_storage_keys(files) + # Exactly 2 DB round-trips: one for UploadFile, one for ToolFile assert mock_scalars.call_count == 2 - # Verify all storage keys were loaded correctly for i, file in enumerate(files[:3]): assert file._storage_key == upload_files[i].key for i, file in enumerate(files[3:]): assert file._storage_key == tool_files[i].file_key - def test_load_storage_keys_tenant_isolation(self): - """Test that tenant isolation works correctly.""" - # Create files for different tenants + def test_load_storage_keys_tenant_isolation(self, db_session_with_containers: Session): + """Loader should not surface records belonging to a different tenant.""" + tenant_id = str(uuid4()) other_tenant_id = str(uuid4()) + user_id = str(uuid4()) - # Create upload file for current tenant - upload_file_current = self._create_upload_file() + upload_file_current = self._create_upload_file(db_session_with_containers, tenant_id, user_id) file_current = self._create_file( - related_id=upload_file_current.id, transfer_method=FileTransferMethod.LOCAL_FILE + tenant_id, related_id=upload_file_current.id, transfer_method=FileTransferMethod.LOCAL_FILE ) - # Create upload file for other tenant (but don't add to cleanup list) - upload_file_other = UploadFile( - tenant_id=other_tenant_id, - storage_type=StorageType.LOCAL, - key="other_tenant_key", - name="other_file.txt", - size=1024, - extension=".txt", - mime_type="text/plain", - created_by_role=CreatorUserRole.ACCOUNT, - created_by=self.user_id, - created_at=datetime.now(UTC), - used=False, + upload_file_other = self._create_upload_file( + db_session_with_containers, + tenant_id, + user_id, + override_tenant_id=other_tenant_id, ) - upload_file_other.id = str(uuid4()) - self.session.add(upload_file_other) - self.session.flush() - - # Create file for other tenant but try to load with current tenant's loader file_other = self._create_file( - related_id=upload_file_other.id, transfer_method=FileTransferMethod.LOCAL_FILE, tenant_id=other_tenant_id + tenant_id, + related_id=upload_file_other.id, + transfer_method=FileTransferMethod.LOCAL_FILE, + override_tenant_id=other_tenant_id, ) - # Should raise ValueError due to tenant mismatch - with pytest.raises(ValueError) as context: - self.loader.load_storage_keys([file_other]) + loader = StorageKeyLoader( + db_session_with_containers, tenant_id, access_controller=DatabaseFileAccessController() + ) - assert "Upload file not found for id:" in str(context.value) + with pytest.raises(ValueError, match="Upload file not found for id:"): + loader.load_storage_keys([file_other]) - # Current tenant's file should still work - self.loader.load_storage_keys([file_current]) + # Current-tenant file still resolves correctly + loader.load_storage_keys([file_current]) assert file_current._storage_key == upload_file_current.key - def test_load_storage_keys_mixed_tenant_batch(self): - """Test batch with mixed tenant files (should fail on first mismatch).""" - # Create files for current tenant - upload_file_current = self._create_upload_file() + def test_load_storage_keys_mixed_tenant_batch(self, db_session_with_containers: Session): + """A batch containing a foreign-tenant file should fail on the mismatch.""" + tenant_id = str(uuid4()) + user_id = str(uuid4()) + + upload_file_current = self._create_upload_file(db_session_with_containers, tenant_id, user_id) file_current = self._create_file( - related_id=upload_file_current.id, transfer_method=FileTransferMethod.LOCAL_FILE + tenant_id, related_id=upload_file_current.id, transfer_method=FileTransferMethod.LOCAL_FILE ) - - # Create file for different tenant - other_tenant_id = str(uuid4()) file_other = self._create_file( - related_id=str(uuid4()), transfer_method=FileTransferMethod.LOCAL_FILE, tenant_id=other_tenant_id + tenant_id, + related_id=str(uuid4()), + transfer_method=FileTransferMethod.LOCAL_FILE, + override_tenant_id=str(uuid4()), ) - # Should raise ValueError on tenant mismatch - with pytest.raises(ValueError) as context: - self.loader.load_storage_keys([file_current, file_other]) + loader = StorageKeyLoader( + db_session_with_containers, tenant_id, access_controller=DatabaseFileAccessController() + ) + with pytest.raises(ValueError, match="Upload file not found for id:"): + loader.load_storage_keys([file_current, file_other]) - assert "Upload file not found for id:" in str(context.value) + def test_load_storage_keys_duplicate_file_ids(self, db_session_with_containers: Session): + """Duplicate file IDs in the batch should be handled gracefully.""" + tenant_id = str(uuid4()) + user_id = str(uuid4()) - def test_load_storage_keys_duplicate_file_ids(self): - """Test handling of duplicate file IDs in the batch.""" - # Create upload file - upload_file = self._create_upload_file() + upload_file = self._create_upload_file(db_session_with_containers, tenant_id, user_id) + file1 = self._create_file(tenant_id, related_id=upload_file.id, transfer_method=FileTransferMethod.LOCAL_FILE) + file2 = self._create_file(tenant_id, related_id=upload_file.id, transfer_method=FileTransferMethod.LOCAL_FILE) - # Create two File objects with same related_id - file1 = self._create_file(related_id=upload_file.id, transfer_method=FileTransferMethod.LOCAL_FILE) - file2 = self._create_file(related_id=upload_file.id, transfer_method=FileTransferMethod.LOCAL_FILE) + loader = StorageKeyLoader( + db_session_with_containers, tenant_id, access_controller=DatabaseFileAccessController() + ) + loader.load_storage_keys([file1, file2]) - # Should handle duplicates gracefully - self.loader.load_storage_keys([file1, file2]) - - # Both files should have the same storage key assert file1._storage_key == upload_file.key assert file2._storage_key == upload_file.key - def test_load_storage_keys_session_isolation(self): - """Test that the loader uses the provided session correctly.""" - # Create test data - upload_file = self._create_upload_file() - file = self._create_file(related_id=upload_file.id, transfer_method=FileTransferMethod.LOCAL_FILE) + def test_load_storage_keys_session_isolation(self, db_session_with_containers: Session): + """A loader backed by an uncommitted session should not see data from another session.""" + tenant_id = str(uuid4()) + user_id = str(uuid4()) - # Create loader with different session (same underlying connection) + upload_file = self._create_upload_file(db_session_with_containers, tenant_id, user_id) + file = self._create_file(tenant_id, related_id=upload_file.id, transfer_method=FileTransferMethod.LOCAL_FILE) + # A loader with a fresh, separate session cannot see uncommitted rows from db_session_with_containers with Session(bind=db.engine) as other_session: other_loader = StorageKeyLoader( other_session, - self.tenant_id, + tenant_id, access_controller=DatabaseFileAccessController(), ) with pytest.raises(ValueError): diff --git a/api/tests/test_containers_integration_tests/libs/broadcast_channel/redis/test_sharded_channel.py b/api/tests/test_containers_integration_tests/libs/broadcast_channel/redis/test_sharded_channel.py index 43915a204d..84c1d0ca41 100644 --- a/api/tests/test_containers_integration_tests/libs/broadcast_channel/redis/test_sharded_channel.py +++ b/api/tests/test_containers_integration_tests/libs/broadcast_channel/redis/test_sharded_channel.py @@ -8,6 +8,7 @@ Covers real Redis 7+ sharded pub/sub interactions including: - Resource cleanup accounting via PUBSUB SHARDNUMSUB """ +import socket import threading import time import uuid @@ -356,10 +357,17 @@ class TestShardedRedisBroadcastChannelClusterIntegration: def _get_test_topic_name(cls) -> str: return f"test_sharded_cluster_topic_{uuid.uuid4()}" + @staticmethod + def _resolve_announced_ip(host: str) -> str: + """Resolve the container host name to a literal IP accepted by Redis cluster config.""" + return socket.getaddrinfo(host, None, type=socket.SOCK_STREAM)[0][4][0] + @staticmethod def _ensure_single_node_cluster(host: str, port: int) -> None: + """Bootstrap a single-node cluster using a literal IP for Redis node advertisement.""" client = redis.Redis(host=host, port=port, decode_responses=False) - client.config_set("cluster-announce-ip", host) + announced_ip = TestShardedRedisBroadcastChannelClusterIntegration._resolve_announced_ip(host) + client.config_set("cluster-announce-ip", announced_ip) client.config_set("cluster-announce-port", port) slots = client.execute_command("CLUSTER", "SLOTS") if not slots: diff --git a/api/tests/test_containers_integration_tests/libs/test_rate_limiter_integration.py b/api/tests/test_containers_integration_tests/libs/test_rate_limiter_integration.py index 178fc2e4fb..390795486b 100644 --- a/api/tests/test_containers_integration_tests/libs/test_rate_limiter_integration.py +++ b/api/tests/test_containers_integration_tests/libs/test_rate_limiter_integration.py @@ -11,7 +11,7 @@ from libs import helper as helper_module @pytest.mark.usefixtures("flask_app_with_containers") -def test_rate_limiter_counts_multiple_attempts_in_same_second(monkeypatch): +def test_rate_limiter_counts_multiple_attempts_in_same_second(monkeypatch: pytest.MonkeyPatch): prefix = f"test_rate_limit:{uuid.uuid4().hex}" limiter = helper_module.RateLimiter(prefix=prefix, max_attempts=2, time_window=60) key = limiter._get_key("203.0.113.10") diff --git a/api/tests/test_containers_integration_tests/services/auth/test_api_key_auth_service.py b/api/tests/test_containers_integration_tests/services/auth/test_api_key_auth_service.py index 177fb95ff3..e71079829f 100644 --- a/api/tests/test_containers_integration_tests/services/auth/test_api_key_auth_service.py +++ b/api/tests/test_containers_integration_tests/services/auth/test_api_key_auth_service.py @@ -5,6 +5,7 @@ from unittest.mock import Mock, patch from uuid import uuid4 import pytest +from sqlalchemy.orm import Session from models.source import DataSourceApiKeyAuthBinding from services.auth.api_key_auth_service import ApiKeyAuthService @@ -31,7 +32,7 @@ class TestApiKeyAuthService: def mock_args(self, category, provider, mock_credentials) -> dict: return {"category": category, "provider": provider, "credentials": mock_credentials} - def _create_binding(self, db_session, *, tenant_id, category, provider, credentials=None, disabled=False): + def _create_binding(self, db_session: Session, *, tenant_id, category, provider, credentials=None, disabled=False): binding = DataSourceApiKeyAuthBinding( tenant_id=tenant_id, category=category, @@ -44,7 +45,7 @@ class TestApiKeyAuthService: return binding def test_get_provider_auth_list_success( - self, flask_app_with_containers, db_session_with_containers, tenant_id, category, provider + self, flask_app_with_containers, db_session_with_containers: Session, tenant_id, category, provider ): self._create_binding(db_session_with_containers, tenant_id=tenant_id, category=category, provider=provider) db_session_with_containers.expire_all() @@ -56,14 +57,16 @@ class TestApiKeyAuthService: assert len(tenant_results) == 1 assert tenant_results[0].provider == provider - def test_get_provider_auth_list_empty(self, flask_app_with_containers, db_session_with_containers, tenant_id): + def test_get_provider_auth_list_empty( + self, flask_app_with_containers, db_session_with_containers: Session, tenant_id + ): result = ApiKeyAuthService.get_provider_auth_list(tenant_id) tenant_results = [r for r in result if r.tenant_id == tenant_id] assert tenant_results == [] def test_get_provider_auth_list_filters_disabled( - self, flask_app_with_containers, db_session_with_containers, tenant_id, category, provider + self, flask_app_with_containers, db_session_with_containers: Session, tenant_id, category, provider ): self._create_binding( db_session_with_containers, tenant_id=tenant_id, category=category, provider=provider, disabled=True @@ -78,7 +81,13 @@ class TestApiKeyAuthService: @patch("services.auth.api_key_auth_service.ApiKeyAuthFactory") @patch("services.auth.api_key_auth_service.encrypter") def test_create_provider_auth_success( - self, mock_encrypter, mock_factory, flask_app_with_containers, db_session_with_containers, tenant_id, mock_args + self, + mock_encrypter, + mock_factory, + flask_app_with_containers, + db_session_with_containers: Session, + tenant_id, + mock_args, ): mock_auth_instance = Mock() mock_auth_instance.validate_credentials.return_value = True @@ -97,7 +106,7 @@ class TestApiKeyAuthService: @patch("services.auth.api_key_auth_service.ApiKeyAuthFactory") def test_create_provider_auth_validation_failed( - self, mock_factory, flask_app_with_containers, db_session_with_containers, tenant_id, mock_args + self, mock_factory, flask_app_with_containers, db_session_with_containers: Session, tenant_id, mock_args ): mock_auth_instance = Mock() mock_auth_instance.validate_credentials.return_value = False @@ -112,7 +121,13 @@ class TestApiKeyAuthService: @patch("services.auth.api_key_auth_service.ApiKeyAuthFactory") @patch("services.auth.api_key_auth_service.encrypter") def test_create_provider_auth_encrypts_api_key( - self, mock_encrypter, mock_factory, flask_app_with_containers, db_session_with_containers, tenant_id, mock_args + self, + mock_encrypter, + mock_factory, + flask_app_with_containers, + db_session_with_containers: Session, + tenant_id, + mock_args, ): mock_auth_instance = Mock() mock_auth_instance.validate_credentials.return_value = True @@ -128,7 +143,13 @@ class TestApiKeyAuthService: mock_encrypter.encrypt_token.assert_called_once_with(tenant_id, original_key) def test_get_auth_credentials_success( - self, flask_app_with_containers, db_session_with_containers, tenant_id, category, provider, mock_credentials + self, + flask_app_with_containers, + db_session_with_containers: Session, + tenant_id, + category, + provider, + mock_credentials, ): self._create_binding( db_session_with_containers, @@ -144,14 +165,14 @@ class TestApiKeyAuthService: assert result == mock_credentials def test_get_auth_credentials_not_found( - self, flask_app_with_containers, db_session_with_containers, tenant_id, category, provider + self, flask_app_with_containers, db_session_with_containers: Session, tenant_id, category, provider ): result = ApiKeyAuthService.get_auth_credentials(tenant_id, category, provider) assert result is None def test_get_auth_credentials_json_parsing( - self, flask_app_with_containers, db_session_with_containers, tenant_id, category, provider + self, flask_app_with_containers, db_session_with_containers: Session, tenant_id, category, provider ): special_credentials = {"auth_type": "api_key", "config": {"api_key": "key_with_中文_and_special_chars_!@#$%"}} self._create_binding( @@ -169,7 +190,7 @@ class TestApiKeyAuthService: assert result["config"]["api_key"] == "key_with_中文_and_special_chars_!@#$%" def test_delete_provider_auth_success( - self, flask_app_with_containers, db_session_with_containers, tenant_id, category, provider + self, flask_app_with_containers, db_session_with_containers: Session, tenant_id, category, provider ): binding = self._create_binding( db_session_with_containers, tenant_id=tenant_id, category=category, provider=provider @@ -183,7 +204,9 @@ class TestApiKeyAuthService: remaining = db_session_with_containers.query(DataSourceApiKeyAuthBinding).filter_by(id=binding_id).first() assert remaining is None - def test_delete_provider_auth_not_found(self, flask_app_with_containers, db_session_with_containers, tenant_id): + def test_delete_provider_auth_not_found( + self, flask_app_with_containers, db_session_with_containers: Session, tenant_id + ): # Should not raise when binding not found ApiKeyAuthService.delete_provider_auth(tenant_id, str(uuid4())) diff --git a/api/tests/test_containers_integration_tests/services/auth/test_auth_integration.py b/api/tests/test_containers_integration_tests/services/auth/test_auth_integration.py index f48c6da690..e78fa27976 100644 --- a/api/tests/test_containers_integration_tests/services/auth/test_auth_integration.py +++ b/api/tests/test_containers_integration_tests/services/auth/test_auth_integration.py @@ -10,6 +10,7 @@ from uuid import uuid4 import httpx import pytest +from sqlalchemy.orm import Session from models.source import DataSourceApiKeyAuthBinding from services.auth.api_key_auth_factory import ApiKeyAuthFactory @@ -114,7 +115,7 @@ class TestAuthIntegration: assert result2[0].tenant_id == tenant_id_2 def test_cross_tenant_access_prevention( - self, flask_app_with_containers, db_session_with_containers, tenant_id_2, category + self, flask_app_with_containers, db_session_with_containers: Session, tenant_id_2, category ): result = ApiKeyAuthService.get_auth_credentials(tenant_id_2, category, AuthType.FIRECRAWL) diff --git a/api/tests/test_containers_integration_tests/services/document_service_status.py b/api/tests/test_containers_integration_tests/services/document_service_status.py index 42d587b7f7..327f14ddfe 100644 --- a/api/tests/test_containers_integration_tests/services/document_service_status.py +++ b/api/tests/test_containers_integration_tests/services/document_service_status.py @@ -12,6 +12,7 @@ from unittest.mock import create_autospec, patch from uuid import uuid4 import pytest +from sqlalchemy.orm import Session from core.rag.index_processor.constant.index_type import IndexStructureType from extensions.storage.storage_type import StorageType @@ -273,7 +274,9 @@ class TestDocumentServicePauseDocument: "user_id": user_id, } - def test_pause_document_waiting_state_success(self, db_session_with_containers, mock_document_service_dependencies): + def test_pause_document_waiting_state_success( + self, db_session_with_containers: Session, mock_document_service_dependencies + ): """ Test successful pause of document in waiting state. @@ -310,7 +313,7 @@ class TestDocumentServicePauseDocument: mock_document_service_dependencies["redis_client"].setnx.assert_called_once_with(expected_cache_key, "True") def test_pause_document_indexing_state_success( - self, db_session_with_containers, mock_document_service_dependencies + self, db_session_with_containers: Session, mock_document_service_dependencies ): """ Test successful pause of document in indexing state. @@ -340,7 +343,9 @@ class TestDocumentServicePauseDocument: assert document.is_paused is True assert document.paused_by == mock_document_service_dependencies["user_id"] - def test_pause_document_parsing_state_success(self, db_session_with_containers, mock_document_service_dependencies): + def test_pause_document_parsing_state_success( + self, db_session_with_containers: Session, mock_document_service_dependencies + ): """ Test successful pause of document in parsing state. @@ -367,7 +372,9 @@ class TestDocumentServicePauseDocument: db_session_with_containers.refresh(document) assert document.is_paused is True - def test_pause_document_completed_state_error(self, db_session_with_containers, mock_document_service_dependencies): + def test_pause_document_completed_state_error( + self, db_session_with_containers: Session, mock_document_service_dependencies + ): """ Test error when trying to pause completed document. @@ -396,7 +403,9 @@ class TestDocumentServicePauseDocument: db_session_with_containers.refresh(document) assert document.is_paused is False - def test_pause_document_error_state_error(self, db_session_with_containers, mock_document_service_dependencies): + def test_pause_document_error_state_error( + self, db_session_with_containers: Session, mock_document_service_dependencies + ): """ Test error when trying to pause document in error state. @@ -467,7 +476,9 @@ class TestDocumentServiceRecoverDocument: "recover_task": mock_task, } - def test_recover_document_paused_success(self, db_session_with_containers, mock_document_service_dependencies): + def test_recover_document_paused_success( + self, db_session_with_containers: Session, mock_document_service_dependencies + ): """ Test successful recovery of paused document. @@ -510,7 +521,9 @@ class TestDocumentServiceRecoverDocument: document.dataset_id, document.id ) - def test_recover_document_not_paused_error(self, db_session_with_containers, mock_document_service_dependencies): + def test_recover_document_not_paused_error( + self, db_session_with_containers: Session, mock_document_service_dependencies + ): """ Test error when trying to recover non-paused document. @@ -590,7 +603,9 @@ class TestDocumentServiceRetryDocument: "user_id": user_id, } - def test_retry_document_single_success(self, db_session_with_containers, mock_document_service_dependencies): + def test_retry_document_single_success( + self, db_session_with_containers: Session, mock_document_service_dependencies + ): """ Test successful retry of single document. @@ -629,7 +644,9 @@ class TestDocumentServiceRetryDocument: dataset.id, [document.id], mock_document_service_dependencies["user_id"] ) - def test_retry_document_multiple_success(self, db_session_with_containers, mock_document_service_dependencies): + def test_retry_document_multiple_success( + self, db_session_with_containers: Session, mock_document_service_dependencies + ): """ Test successful retry of multiple documents. @@ -675,7 +692,7 @@ class TestDocumentServiceRetryDocument: ) def test_retry_document_concurrent_retry_error( - self, db_session_with_containers, mock_document_service_dependencies + self, db_session_with_containers: Session, mock_document_service_dependencies ): """ Test error when document is already being retried. @@ -708,7 +725,7 @@ class TestDocumentServiceRetryDocument: assert document.indexing_status == IndexingStatus.ERROR def test_retry_document_missing_current_user_error( - self, db_session_with_containers, mock_document_service_dependencies + self, db_session_with_containers: Session, mock_document_service_dependencies ): """ Test error when current_user is missing. @@ -794,7 +811,7 @@ class TestDocumentServiceBatchUpdateDocumentStatus: } def test_batch_update_document_status_enable_success( - self, db_session_with_containers, mock_document_service_dependencies + self, db_session_with_containers: Session, mock_document_service_dependencies ): """ Test successful batch enabling of documents. @@ -844,7 +861,7 @@ class TestDocumentServiceBatchUpdateDocumentStatus: assert mock_document_service_dependencies["add_task"].delay.call_count == 2 def test_batch_update_document_status_disable_success( - self, db_session_with_containers, mock_document_service_dependencies + self, db_session_with_containers: Session, mock_document_service_dependencies ): """ Test successful batch disabling of documents. @@ -886,7 +903,7 @@ class TestDocumentServiceBatchUpdateDocumentStatus: mock_document_service_dependencies["remove_task"].delay.assert_called_once_with(document.id) def test_batch_update_document_status_archive_success( - self, db_session_with_containers, mock_document_service_dependencies + self, db_session_with_containers: Session, mock_document_service_dependencies ): """ Test successful batch archiving of documents. @@ -928,7 +945,7 @@ class TestDocumentServiceBatchUpdateDocumentStatus: mock_document_service_dependencies["remove_task"].delay.assert_called_once_with(document.id) def test_batch_update_document_status_unarchive_success( - self, db_session_with_containers, mock_document_service_dependencies + self, db_session_with_containers: Session, mock_document_service_dependencies ): """ Test successful batch unarchiving of documents. @@ -970,7 +987,7 @@ class TestDocumentServiceBatchUpdateDocumentStatus: mock_document_service_dependencies["add_task"].delay.assert_called_once_with(document.id) def test_batch_update_document_status_empty_list( - self, db_session_with_containers, mock_document_service_dependencies + self, db_session_with_containers: Session, mock_document_service_dependencies ): """ Test handling of empty document list. @@ -996,7 +1013,7 @@ class TestDocumentServiceBatchUpdateDocumentStatus: mock_document_service_dependencies["remove_task"].delay.assert_not_called() def test_batch_update_document_status_document_indexing_error( - self, db_session_with_containers, mock_document_service_dependencies + self, db_session_with_containers: Session, mock_document_service_dependencies ): """ Test error when document is being indexed. @@ -1073,7 +1090,7 @@ class TestDocumentServiceRenameDocument: "current_user": mock_current_user, } - def test_rename_document_success(self, db_session_with_containers, mock_document_service_dependencies): + def test_rename_document_success(self, db_session_with_containers: Session, mock_document_service_dependencies): """ Test successful document renaming. @@ -1111,7 +1128,9 @@ class TestDocumentServiceRenameDocument: assert result == document assert document.name == new_name - def test_rename_document_with_built_in_fields(self, db_session_with_containers, mock_document_service_dependencies): + def test_rename_document_with_built_in_fields( + self, db_session_with_containers: Session, mock_document_service_dependencies + ): """ Test document renaming with built-in fields enabled. @@ -1154,7 +1173,9 @@ class TestDocumentServiceRenameDocument: assert document.doc_metadata["document_name"] == new_name assert document.doc_metadata["existing_key"] == "existing_value" - def test_rename_document_with_upload_file(self, db_session_with_containers, mock_document_service_dependencies): + def test_rename_document_with_upload_file( + self, db_session_with_containers: Session, mock_document_service_dependencies + ): """ Test document renaming with associated upload file. @@ -1202,7 +1223,7 @@ class TestDocumentServiceRenameDocument: assert upload_file.name == new_name def test_rename_document_dataset_not_found_error( - self, db_session_with_containers, mock_document_service_dependencies + self, db_session_with_containers: Session, mock_document_service_dependencies ): """ Test error when dataset is not found. @@ -1224,7 +1245,9 @@ class TestDocumentServiceRenameDocument: with pytest.raises(ValueError, match="Dataset not found"): DocumentService.rename_document(dataset_id, document_id, new_name) - def test_rename_document_not_found_error(self, db_session_with_containers, mock_document_service_dependencies): + def test_rename_document_not_found_error( + self, db_session_with_containers: Session, mock_document_service_dependencies + ): """ Test error when document is not found. @@ -1251,7 +1274,9 @@ class TestDocumentServiceRenameDocument: with pytest.raises(ValueError, match="Document not found"): DocumentService.rename_document(dataset.id, document_id, new_name) - def test_rename_document_permission_error(self, db_session_with_containers, mock_document_service_dependencies): + def test_rename_document_permission_error( + self, db_session_with_containers: Session, mock_document_service_dependencies + ): """ Test error when user lacks permission. diff --git a/api/tests/test_containers_integration_tests/services/enterprise/test_account_deletion_sync.py b/api/tests/test_containers_integration_tests/services/enterprise/test_account_deletion_sync.py index 4e8255d8ed..e73c2afe7f 100644 --- a/api/tests/test_containers_integration_tests/services/enterprise/test_account_deletion_sync.py +++ b/api/tests/test_containers_integration_tests/services/enterprise/test_account_deletion_sync.py @@ -11,6 +11,7 @@ from uuid import uuid4 import pytest from redis import RedisError +from sqlalchemy.orm import Session from extensions.ext_redis import redis_client from models.account import TenantAccountJoin @@ -122,7 +123,7 @@ class TestSyncAccountDeletion: mock_queue_task.assert_not_called() def test_sync_account_deletion_multiple_workspaces( - self, flask_app_with_containers, db_session_with_containers, mock_queue_task + self, flask_app_with_containers, db_session_with_containers: Session, mock_queue_task ): account_id = str(uuid4()) tenant_ids = [str(uuid4()) for _ in range(3)] @@ -144,7 +145,7 @@ class TestSyncAccountDeletion: assert queued_workspace_ids == set(tenant_ids) def test_sync_account_deletion_no_workspaces( - self, flask_app_with_containers, db_session_with_containers, mock_queue_task + self, flask_app_with_containers, db_session_with_containers: Session, mock_queue_task ): with patch("services.enterprise.account_deletion_sync.dify_config") as mock_config: mock_config.ENTERPRISE_ENABLED = True @@ -155,7 +156,7 @@ class TestSyncAccountDeletion: mock_queue_task.assert_not_called() def test_sync_account_deletion_partial_failure( - self, flask_app_with_containers, db_session_with_containers, mock_queue_task + self, flask_app_with_containers, db_session_with_containers: Session, mock_queue_task ): account_id = str(uuid4()) tenant_ids = [str(uuid4()) for _ in range(3)] @@ -180,7 +181,7 @@ class TestSyncAccountDeletion: assert mock_queue_task.call_count == 3 def test_sync_account_deletion_all_failures( - self, flask_app_with_containers, db_session_with_containers, mock_queue_task + self, flask_app_with_containers, db_session_with_containers: Session, mock_queue_task ): account_id = str(uuid4()) tenant_id = str(uuid4()) diff --git a/api/tests/test_containers_integration_tests/services/plugin/test_plugin_permission_service.py b/api/tests/test_containers_integration_tests/services/plugin/test_plugin_permission_service.py new file mode 100644 index 0000000000..49d06986fd --- /dev/null +++ b/api/tests/test_containers_integration_tests/services/plugin/test_plugin_permission_service.py @@ -0,0 +1,94 @@ +from __future__ import annotations + +from uuid import uuid4 + +from sqlalchemy import func, select +from sqlalchemy.orm import Session + +from models.account import TenantPluginPermission +from services.plugin.plugin_permission_service import PluginPermissionService + + +def _tenant_id() -> str: + return str(uuid4()) + + +def _get_permission(session: Session, tenant_id: str) -> TenantPluginPermission | None: + session.expire_all() + stmt = select(TenantPluginPermission).where(TenantPluginPermission.tenant_id == tenant_id) + return session.scalars(stmt).one_or_none() + + +def _count_permissions(session: Session, tenant_id: str) -> int: + stmt = select(func.count()).select_from(TenantPluginPermission).where(TenantPluginPermission.tenant_id == tenant_id) + return session.scalar(stmt) or 0 + + +class TestGetPermission: + """Integration tests for PluginPermissionService.get_permission using testcontainers.""" + + def test_returns_permission_when_found(self, db_session_with_containers: Session): + tenant_id = _tenant_id() + permission = TenantPluginPermission( + tenant_id=tenant_id, + install_permission=TenantPluginPermission.InstallPermission.ADMINS, + debug_permission=TenantPluginPermission.DebugPermission.EVERYONE, + ) + db_session_with_containers.add(permission) + db_session_with_containers.commit() + + result = PluginPermissionService.get_permission(tenant_id) + + assert result is not None + assert result.id == permission.id + assert result.tenant_id == tenant_id + assert result.install_permission == TenantPluginPermission.InstallPermission.ADMINS + assert result.debug_permission == TenantPluginPermission.DebugPermission.EVERYONE + + def test_returns_none_when_not_found(self, db_session_with_containers: Session): + result = PluginPermissionService.get_permission(_tenant_id()) + + assert result is None + + +class TestChangePermission: + """Integration tests for PluginPermissionService.change_permission using testcontainers.""" + + def test_creates_new_permission_when_not_exists(self, db_session_with_containers: Session): + tenant_id = _tenant_id() + + result = PluginPermissionService.change_permission( + tenant_id, + TenantPluginPermission.InstallPermission.EVERYONE, + TenantPluginPermission.DebugPermission.EVERYONE, + ) + + permission = _get_permission(db_session_with_containers, tenant_id) + assert result is True + assert permission is not None + assert permission.install_permission == TenantPluginPermission.InstallPermission.EVERYONE + assert permission.debug_permission == TenantPluginPermission.DebugPermission.EVERYONE + + def test_updates_existing_permission(self, db_session_with_containers: Session): + tenant_id = _tenant_id() + existing = TenantPluginPermission( + tenant_id=tenant_id, + install_permission=TenantPluginPermission.InstallPermission.EVERYONE, + debug_permission=TenantPluginPermission.DebugPermission.EVERYONE, + ) + db_session_with_containers.add(existing) + db_session_with_containers.commit() + + result = PluginPermissionService.change_permission( + tenant_id, + TenantPluginPermission.InstallPermission.ADMINS, + TenantPluginPermission.DebugPermission.ADMINS, + ) + + permission = _get_permission(db_session_with_containers, tenant_id) + assert result is True + assert permission is not None + assert permission.id == existing.id + assert permission.install_permission == TenantPluginPermission.InstallPermission.ADMINS + assert permission.debug_permission == TenantPluginPermission.DebugPermission.ADMINS + assert _count_permissions(db_session_with_containers, tenant_id) == 1 diff --git a/api/tests/test_containers_integration_tests/services/recommend_app/test_database_retrieval.py b/api/tests/test_containers_integration_tests/services/recommend_app/test_database_retrieval.py index 2b842629a7..11e864176a 100644 --- a/api/tests/test_containers_integration_tests/services/recommend_app/test_database_retrieval.py +++ b/api/tests/test_containers_integration_tests/services/recommend_app/test_database_retrieval.py @@ -3,6 +3,8 @@ from __future__ import annotations from unittest.mock import patch from uuid import uuid4 +from sqlalchemy.orm import Session + from models.model import App, RecommendedApp, Site from services.recommend_app.database.database_retrieval import DatabaseRecommendAppRetrieval from services.recommend_app.recommend_app_type import RecommendAppType @@ -45,6 +47,7 @@ def _create_recommended_app( *, app_id: str, category: str = "chat", + categories: list[str] | None = None, language: str = "en-US", is_listed: bool = True, position: int = 1, @@ -55,6 +58,7 @@ def _create_recommended_app( copyright="copy", privacy_policy="pp", category=category, + categories=[category] if categories is None else categories, language=language, is_listed=is_listed, position=position, @@ -91,7 +95,7 @@ class TestDatabaseRecommendAppRetrieval: class TestFetchRecommendedAppsFromDb: - def test_returns_apps_and_sorted_categories(self, flask_app_with_containers, db_session_with_containers): + def test_returns_apps_and_sorted_categories(self, flask_app_with_containers, db_session_with_containers: Session): tenant_id = str(uuid4()) app1 = _create_app(db_session_with_containers, tenant_id=tenant_id) _create_site(db_session_with_containers, app_id=app1.id) @@ -111,7 +115,56 @@ class TestFetchRecommendedAppsFromDb: assert "assistant" in result["categories"] assert "writing" in result["categories"] - def test_falls_back_to_default_language_when_empty(self, flask_app_with_containers, db_session_with_containers): + def test_returns_multiple_categories_for_one_app( + self, flask_app_with_containers, db_session_with_containers: Session + ): + tenant_id = str(uuid4()) + created_app = _create_app(db_session_with_containers, tenant_id=tenant_id) + _create_site(db_session_with_containers, app_id=created_app.id) + _create_recommended_app( + db_session_with_containers, + app_id=created_app.id, + category="writing", + categories=["writing", "assistant"], + ) + + db_session_with_containers.expire_all() + + result = DatabaseRecommendAppRetrieval.fetch_recommended_apps_from_db("en-US") + + recommended_app = next(item for item in result["recommended_apps"] if item["app_id"] == created_app.id) + assert recommended_app["categories"] == ["writing", "assistant"] + assert "writing" in result["categories"] + assert "assistant" in result["categories"] + + def test_ignores_legacy_category_when_categories_are_empty( + self, + flask_app_with_containers, + db_session_with_containers: Session, + ): + legacy_category = f"legacy-empty-{uuid4()}" + tenant_id = str(uuid4()) + created_app = _create_app(db_session_with_containers, tenant_id=tenant_id) + _create_site(db_session_with_containers, app_id=created_app.id) + _create_recommended_app( + db_session_with_containers, + app_id=created_app.id, + category=legacy_category, + categories=[], + ) + + db_session_with_containers.expire_all() + + result = DatabaseRecommendAppRetrieval.fetch_recommended_apps_from_db("en-US") + + recommended_app = next(item for item in result["recommended_apps"] if item["app_id"] == created_app.id) + assert "category" not in recommended_app + assert recommended_app["categories"] == [] + assert legacy_category not in result["categories"] + + def test_falls_back_to_default_language_when_empty( + self, flask_app_with_containers, db_session_with_containers: Session + ): tenant_id = str(uuid4()) app1 = _create_app(db_session_with_containers, tenant_id=tenant_id) _create_site(db_session_with_containers, app_id=app1.id) @@ -124,7 +177,7 @@ class TestFetchRecommendedAppsFromDb: app_ids = {r["app_id"] for r in result["recommended_apps"]} assert app1.id in app_ids - def test_skips_non_public_apps(self, flask_app_with_containers, db_session_with_containers): + def test_skips_non_public_apps(self, flask_app_with_containers, db_session_with_containers: Session): tenant_id = str(uuid4()) app1 = _create_app(db_session_with_containers, tenant_id=tenant_id, is_public=False) _create_site(db_session_with_containers, app_id=app1.id) @@ -137,7 +190,7 @@ class TestFetchRecommendedAppsFromDb: app_ids = {r["app_id"] for r in result["recommended_apps"]} assert app1.id not in app_ids - def test_skips_apps_without_site(self, flask_app_with_containers, db_session_with_containers): + def test_skips_apps_without_site(self, flask_app_with_containers, db_session_with_containers: Session): tenant_id = str(uuid4()) app1 = _create_app(db_session_with_containers, tenant_id=tenant_id) _create_recommended_app(db_session_with_containers, app_id=app1.id) @@ -151,12 +204,12 @@ class TestFetchRecommendedAppsFromDb: class TestFetchRecommendedAppDetailFromDb: - def test_returns_none_when_not_listed(self, flask_app_with_containers, db_session_with_containers): + def test_returns_none_when_not_listed(self, flask_app_with_containers, db_session_with_containers: Session): result = DatabaseRecommendAppRetrieval.fetch_recommended_app_detail_from_db(str(uuid4())) assert result is None - def test_returns_none_when_app_not_public(self, flask_app_with_containers, db_session_with_containers): + def test_returns_none_when_app_not_public(self, flask_app_with_containers, db_session_with_containers: Session): tenant_id = str(uuid4()) app1 = _create_app(db_session_with_containers, tenant_id=tenant_id, is_public=False) _create_recommended_app(db_session_with_containers, app_id=app1.id) @@ -168,7 +221,7 @@ class TestFetchRecommendedAppDetailFromDb: assert result is None @patch("services.recommend_app.database.database_retrieval.AppDslService") - def test_returns_detail_on_success(self, mock_dsl, flask_app_with_containers, db_session_with_containers): + def test_returns_detail_on_success(self, mock_dsl, flask_app_with_containers, db_session_with_containers: Session): tenant_id = str(uuid4()) app1 = _create_app(db_session_with_containers, tenant_id=tenant_id) _create_site(db_session_with_containers, app_id=app1.id) diff --git a/api/tests/test_containers_integration_tests/services/test_advanced_prompt_template_service.py b/api/tests/test_containers_integration_tests/services/test_advanced_prompt_template_service.py index 3ec265d009..f78037e503 100644 --- a/api/tests/test_containers_integration_tests/services/test_advanced_prompt_template_service.py +++ b/api/tests/test_containers_integration_tests/services/test_advanced_prompt_template_service.py @@ -2,6 +2,7 @@ import copy import pytest from faker import Faker +from sqlalchemy.orm import Session from core.prompt.prompt_templates.advanced_prompt_templates import ( BAICHUAN_CHAT_APP_CHAT_PROMPT_CONFIG, @@ -29,7 +30,9 @@ class TestAdvancedPromptTemplateService: # for consistency with other test files return {} - def test_get_prompt_baichuan_model_success(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_prompt_baichuan_model_success( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test successful prompt generation for Baichuan model. @@ -64,7 +67,9 @@ class TestAdvancedPromptTemplateService: assert "{{#histories#}}" in prompt_text assert "{{#query#}}" in prompt_text - def test_get_prompt_common_model_success(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_prompt_common_model_success( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test successful prompt generation for common models. @@ -100,7 +105,7 @@ class TestAdvancedPromptTemplateService: assert "{{#query#}}" in prompt_text def test_get_prompt_case_insensitive_baichuan_detection( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test Baichuan model detection is case insensitive. @@ -131,7 +136,7 @@ class TestAdvancedPromptTemplateService: assert BAICHUAN_CONTEXT in prompt_text def test_get_common_prompt_chat_app_completion_mode( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test common prompt generation for chat app with completion mode. @@ -161,7 +166,9 @@ class TestAdvancedPromptTemplateService: assert "{{#histories#}}" in prompt_text assert "{{#query#}}" in prompt_text - def test_get_common_prompt_chat_app_chat_mode(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_common_prompt_chat_app_chat_mode( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test common prompt generation for chat app with chat mode. @@ -189,7 +196,7 @@ class TestAdvancedPromptTemplateService: assert "{{#pre_prompt#}}" in prompt_text def test_get_common_prompt_completion_app_completion_mode( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test common prompt generation for completion app with completion mode. @@ -217,7 +224,7 @@ class TestAdvancedPromptTemplateService: assert "{{#pre_prompt#}}" in prompt_text def test_get_common_prompt_completion_app_chat_mode( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test common prompt generation for completion app with chat mode. @@ -245,7 +252,9 @@ class TestAdvancedPromptTemplateService: assert CONTEXT in prompt_text assert "{{#pre_prompt#}}" in prompt_text - def test_get_common_prompt_no_context(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_common_prompt_no_context( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test common prompt generation without context. @@ -273,7 +282,7 @@ class TestAdvancedPromptTemplateService: assert "{{#query#}}" in prompt_text def test_get_common_prompt_unsupported_app_mode( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test common prompt generation with unsupported app mode. @@ -291,7 +300,7 @@ class TestAdvancedPromptTemplateService: assert result == {} def test_get_common_prompt_unsupported_model_mode( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test common prompt generation with unsupported model mode. @@ -308,7 +317,9 @@ class TestAdvancedPromptTemplateService: # Assert: Verify empty dict is returned assert result == {} - def test_get_completion_prompt_with_context(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_completion_prompt_with_context( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test completion prompt generation with context. @@ -339,7 +350,7 @@ class TestAdvancedPromptTemplateService: assert result_text == CONTEXT + original_text def test_get_completion_prompt_without_context( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test completion prompt generation without context. @@ -368,7 +379,9 @@ class TestAdvancedPromptTemplateService: assert result_text == original_text assert CONTEXT not in result_text - def test_get_chat_prompt_with_context(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_chat_prompt_with_context( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test chat prompt generation with context. @@ -399,7 +412,9 @@ class TestAdvancedPromptTemplateService: assert original_text in result_text assert result_text == CONTEXT + original_text - def test_get_chat_prompt_without_context(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_chat_prompt_without_context( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test chat prompt generation without context. @@ -429,7 +444,7 @@ class TestAdvancedPromptTemplateService: assert CONTEXT not in result_text def test_get_baichuan_prompt_chat_app_completion_mode( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test Baichuan prompt generation for chat app with completion mode. @@ -460,7 +475,7 @@ class TestAdvancedPromptTemplateService: assert "{{#query#}}" in prompt_text def test_get_baichuan_prompt_chat_app_chat_mode( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test Baichuan prompt generation for chat app with chat mode. @@ -489,7 +504,7 @@ class TestAdvancedPromptTemplateService: assert "{{#pre_prompt#}}" in prompt_text def test_get_baichuan_prompt_completion_app_completion_mode( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test Baichuan prompt generation for completion app with completion mode. @@ -517,7 +532,7 @@ class TestAdvancedPromptTemplateService: assert "{{#pre_prompt#}}" in prompt_text def test_get_baichuan_prompt_completion_app_chat_mode( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test Baichuan prompt generation for completion app with chat mode. @@ -545,7 +560,9 @@ class TestAdvancedPromptTemplateService: assert BAICHUAN_CONTEXT in prompt_text assert "{{#pre_prompt#}}" in prompt_text - def test_get_baichuan_prompt_no_context(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_baichuan_prompt_no_context( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test Baichuan prompt generation without context. @@ -573,7 +590,7 @@ class TestAdvancedPromptTemplateService: assert "{{#query#}}" in prompt_text def test_get_baichuan_prompt_unsupported_app_mode( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test Baichuan prompt generation with unsupported app mode. @@ -591,7 +608,7 @@ class TestAdvancedPromptTemplateService: assert result == {} def test_get_baichuan_prompt_unsupported_model_mode( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test Baichuan prompt generation with unsupported model mode. @@ -609,7 +626,7 @@ class TestAdvancedPromptTemplateService: assert result == {} def test_get_prompt_all_app_modes_common_model( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test prompt generation for all app modes with common model. @@ -641,7 +658,7 @@ class TestAdvancedPromptTemplateService: assert result != {} def test_get_prompt_all_app_modes_baichuan_model( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test prompt generation for all app modes with Baichuan model. @@ -672,7 +689,7 @@ class TestAdvancedPromptTemplateService: assert result is not None assert result != {} - def test_get_prompt_edge_cases(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_prompt_edge_cases(self, db_session_with_containers: Session, mock_external_service_dependencies): """ Test prompt generation with edge cases. @@ -704,7 +721,7 @@ class TestAdvancedPromptTemplateService: # Should either return a valid result or empty dict, but not crash assert result is not None - def test_template_immutability(self, db_session_with_containers, mock_external_service_dependencies): + def test_template_immutability(self, db_session_with_containers: Session, mock_external_service_dependencies): """ Test that original templates are not modified. @@ -738,7 +755,9 @@ class TestAdvancedPromptTemplateService: assert original_completion_completion == COMPLETION_APP_COMPLETION_PROMPT_CONFIG assert original_completion_chat == COMPLETION_APP_CHAT_PROMPT_CONFIG - def test_baichuan_template_immutability(self, db_session_with_containers, mock_external_service_dependencies): + def test_baichuan_template_immutability( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test that original Baichuan templates are not modified. @@ -772,7 +791,9 @@ class TestAdvancedPromptTemplateService: assert original_baichuan_completion_completion == BAICHUAN_COMPLETION_APP_COMPLETION_PROMPT_CONFIG assert original_baichuan_completion_chat == BAICHUAN_COMPLETION_APP_CHAT_PROMPT_CONFIG - def test_context_integration_consistency(self, db_session_with_containers, mock_external_service_dependencies): + def test_context_integration_consistency( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test consistency of context integration across different scenarios. @@ -828,7 +849,7 @@ class TestAdvancedPromptTemplateService: assert prompt_text.startswith(CONTEXT) def test_baichuan_context_integration_consistency( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test consistency of Baichuan context integration across different scenarios. diff --git a/api/tests/test_containers_integration_tests/services/test_agent_service.py b/api/tests/test_containers_integration_tests/services/test_agent_service.py index 00a2f9a59f..670b4d00da 100644 --- a/api/tests/test_containers_integration_tests/services/test_agent_service.py +++ b/api/tests/test_containers_integration_tests/services/test_agent_service.py @@ -6,12 +6,12 @@ from faker import Faker from sqlalchemy.orm import Session from core.plugin.impl.exc import PluginDaemonClientSideError -from models import Account +from models import Account, CreatorUserRole from models.enums import ConversationFromSource, MessageFileBelongsTo from models.model import AppModelConfig, Conversation, EndUser, Message, MessageAgentThought from services.account_service import AccountService, TenantService from services.agent_service import AgentService -from services.app_service import AppService +from services.app_service import AppService, CreateAppParams from tests.test_containers_integration_tests.helpers import generate_valid_password @@ -119,16 +119,16 @@ class TestAgentService: tenant = account.current_tenant # Create app with realistic data - app_args = { - "name": fake.company(), - "description": fake.text(max_nb_chars=100), - "mode": "agent-chat", - "icon_type": "emoji", - "icon": "🤖", - "icon_background": "#FF6B6B", - "api_rph": 100, - "api_rpm": 10, - } + app_args = CreateAppParams( + name=fake.company(), + description=fake.text(max_nb_chars=100), + mode="agent-chat", + icon_type="emoji", + icon="🤖", + icon_background="#FF6B6B", + api_rph=100, + api_rpm=10, + ) app_service = AppService() app = app_service.create_app(tenant.id, app_args, account) @@ -246,7 +246,7 @@ class TestAgentService: tool_input=json.dumps({"test_tool": {"input": "test_input"}}), observation=json.dumps({"test_tool": {"output": "test_output"}}), tokens=50, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=message.from_account_id, ) db_session_with_containers.add(thought1) @@ -294,7 +294,7 @@ class TestAgentService: agent_thoughts = self._create_test_agent_thoughts(db_session_with_containers, message) # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result structure assert result is not None @@ -370,7 +370,7 @@ class TestAgentService: # Execute the method under test with non-existent message with pytest.raises(ValueError, match="Message not found"): - AgentService.get_agent_logs(app, str(conversation.id), fake.uuid4()) + AgentService.get_agent_logs(app, conversation.id, fake.uuid4()) def test_get_agent_logs_with_end_user( self, db_session_with_containers: Session, mock_external_service_dependencies @@ -451,7 +451,7 @@ class TestAgentService: db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -523,7 +523,7 @@ class TestAgentService: db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -561,14 +561,14 @@ class TestAgentService: tool_input=json.dumps({"error_tool": {"input": "test_input"}}), observation=json.dumps({"error_tool": {"output": "error_output"}}), tokens=50, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=message.from_account_id, ) db_session_with_containers.add(thought_with_error) db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -592,7 +592,7 @@ class TestAgentService: conversation, message = self._create_test_conversation_and_message(db_session_with_containers, app, account) # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -654,7 +654,7 @@ class TestAgentService: # Execute the method under test with pytest.raises(ValueError, match="App model config not found"): - AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + AgentService.get_agent_logs(app, conversation.id, message.id) def test_get_agent_logs_agent_config_not_found( self, db_session_with_containers: Session, mock_external_service_dependencies @@ -673,7 +673,7 @@ class TestAgentService: # Execute the method under test with pytest.raises(ValueError, match="Agent config not found"): - AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + AgentService.get_agent_logs(app, conversation.id, message.id) def test_list_agent_providers_success( self, db_session_with_containers: Session, mock_external_service_dependencies @@ -687,7 +687,7 @@ class TestAgentService: app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) # Execute the method under test - result = AgentService.list_agent_providers(str(account.id), str(app.tenant_id)) + result = AgentService.list_agent_providers(account.id, app.tenant_id) # Verify the result assert result is not None @@ -696,7 +696,7 @@ class TestAgentService: # Verify the mock was called correctly mock_plugin_client = mock_external_service_dependencies["plugin_agent_client"].return_value - mock_plugin_client.fetch_agent_strategy_providers.assert_called_once_with(str(app.tenant_id)) + mock_plugin_client.fetch_agent_strategy_providers.assert_called_once_with(app.tenant_id) def test_get_agent_provider_success(self, db_session_with_containers: Session, mock_external_service_dependencies): """ @@ -710,7 +710,7 @@ class TestAgentService: provider_name = "test_provider" # Execute the method under test - result = AgentService.get_agent_provider(str(account.id), str(app.tenant_id), provider_name) + result = AgentService.get_agent_provider(account.id, app.tenant_id, provider_name) # Verify the result assert result is not None @@ -718,7 +718,7 @@ class TestAgentService: # Verify the mock was called correctly mock_plugin_client = mock_external_service_dependencies["plugin_agent_client"].return_value - mock_plugin_client.fetch_agent_strategy_provider.assert_called_once_with(str(app.tenant_id), provider_name) + mock_plugin_client.fetch_agent_strategy_provider.assert_called_once_with(app.tenant_id, provider_name) def test_get_agent_provider_plugin_error( self, db_session_with_containers: Session, mock_external_service_dependencies @@ -740,7 +740,7 @@ class TestAgentService: # Execute the method under test with pytest.raises(ValueError, match=error_message): - AgentService.get_agent_provider(str(account.id), str(app.tenant_id), provider_name) + AgentService.get_agent_provider(account.id, app.tenant_id, provider_name) def test_get_agent_logs_with_complex_tool_data( self, db_session_with_containers: Session, mock_external_service_dependencies @@ -796,14 +796,14 @@ class TestAgentService: {"tool1": {"output1": "result1"}, "tool2": {"output2": "result2"}, "tool3": {"output3": "result3"}} ), tokens=100, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=message.from_account_id, ) db_session_with_containers.add(complex_thought) db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -891,14 +891,14 @@ class TestAgentService: observation=json.dumps({"file_tool": {"output": "test_output"}}), message_files=json.dumps(["file1", "file2"]), tokens=50, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=message.from_account_id, ) db_session_with_containers.add(thought_with_files) db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -926,7 +926,7 @@ class TestAgentService: mock_external_service_dependencies["current_user"].timezone = "Asia/Shanghai" # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -960,14 +960,14 @@ class TestAgentService: tool_input="", # Empty input observation="", # Empty observation tokens=50, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=message.from_account_id, ) db_session_with_containers.add(empty_thought) db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -1001,14 +1001,14 @@ class TestAgentService: tool_input="invalid json", # Malformed JSON observation="invalid json", # Malformed JSON tokens=50, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=message.from_account_id, ) db_session_with_containers.add(malformed_thought) db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result - should handle malformed JSON gracefully assert result is not None diff --git a/api/tests/test_containers_integration_tests/services/test_annotation_service.py b/api/tests/test_containers_integration_tests/services/test_annotation_service.py index 95fc73f45a..bc75562d15 100644 --- a/api/tests/test_containers_integration_tests/services/test_annotation_service.py +++ b/api/tests/test_containers_integration_tests/services/test_annotation_service.py @@ -9,7 +9,7 @@ from models import Account from models.enums import ConversationFromSource, InvokeFrom from models.model import MessageAnnotation from services.annotation_service import AppAnnotationService -from services.app_service import AppService +from services.app_service import AppService, CreateAppParams from tests.test_containers_integration_tests.helpers import generate_valid_password @@ -86,16 +86,16 @@ class TestAnnotationService: tenant = account.current_tenant # Setup app creation arguments - app_args = { - "name": fake.company(), - "description": fake.text(max_nb_chars=100), - "mode": "chat", - "icon_type": "emoji", - "icon": "🤖", - "icon_background": "#FF6B6B", - "api_rph": 100, - "api_rpm": 10, - } + app_args = CreateAppParams( + name=fake.company(), + description=fake.text(max_nb_chars=100), + mode="chat", + icon_type="emoji", + icon="🤖", + icon_background="#FF6B6B", + api_rph=100, + api_rpm=10, + ) # Create app app_service = AppService() diff --git a/api/tests/test_containers_integration_tests/services/test_app_dsl_service.py b/api/tests/test_containers_integration_tests/services/test_app_dsl_service.py index 77ce28b999..c77bbd3e44 100644 --- a/api/tests/test_containers_integration_tests/services/test_app_dsl_service.py +++ b/api/tests/test_containers_integration_tests/services/test_app_dsl_service.py @@ -3,12 +3,15 @@ from __future__ import annotations import base64 import json from types import SimpleNamespace +from typing import Any from unittest.mock import MagicMock, patch from uuid import uuid4 import pytest import yaml from faker import Faker +from flask import Flask +from sqlalchemy.orm import Session from core.trigger.constants import ( TRIGGER_PLUGIN_NODE_TYPE, @@ -17,7 +20,7 @@ from core.trigger.constants import ( ) from extensions.ext_redis import redis_client from graphon.enums import BuiltinNodeTypes -from models import Account, AppMode +from models import Account, App, AppMode from models.model import AppModelConfig, IconType from services import app_dsl_service from services.account_service import AccountService, TenantService @@ -34,7 +37,7 @@ from services.app_dsl_service import ( PendingData, _check_version_compatibility, ) -from services.app_service import AppService +from services.app_service import AppService, CreateAppParams from tests.test_containers_integration_tests.helpers import generate_valid_password _DEFAULT_TENANT_ID = "00000000-0000-0000-0000-000000000001" @@ -67,11 +70,31 @@ def _pending_yaml_content(version: str = "99.0.0") -> bytes: return (f'version: "{version}"\nkind: app\napp:\n name: Loop Test\n mode: workflow\n').encode() +def _app_stub(**overrides: Any) -> App: + """Create a stub App object for testing without hitting the database.""" + defaults = { + "id": str(uuid4()), + "tenant_id": _DEFAULT_TENANT_ID, + "mode": AppMode.WORKFLOW.value, + "name": "n", + "description": "d", + "icon_type": IconType.EMOJI, + "icon": "i", + "icon_background": "#fff", + "use_icon_as_answer_icon": False, + "app_model_config": None, + } + app = MagicMock(spec=App) + for key, value in (defaults | overrides).items(): + object.__setattr__(app, key, value) + return app + + class TestAppDslService: """Integration tests for AppDslService using testcontainers.""" @pytest.fixture - def app(self, flask_app_with_containers): + def app(self, flask_app_with_containers: Flask): return flask_app_with_containers @pytest.fixture @@ -112,7 +135,7 @@ class TestAppDslService: "enterprise_service": mock_enterprise_service, } - def _create_test_app_and_account(self, db_session_with_containers, mock_external_service_dependencies): + def _create_test_app_and_account(self, db_session_with_containers: Session, mock_external_service_dependencies): fake = Faker() with patch("services.account_service.FeatureService") as mock_account_feature_service: mock_account_feature_service.get_system_features.return_value.is_allow_register = True @@ -124,16 +147,16 @@ class TestAppDslService: ) TenantService.create_owner_tenant_if_not_exist(account, name=fake.company()) tenant = account.current_tenant - app_args = { - "name": fake.company(), - "description": fake.text(max_nb_chars=100), - "mode": "chat", - "icon_type": "emoji", - "icon": "🤖", - "icon_background": "#FF6B6B", - "api_rph": 100, - "api_rpm": 10, - } + app_args = CreateAppParams( + name=fake.company(), + description=fake.text(max_nb_chars=100), + mode="chat", + icon_type="emoji", + icon="🤖", + icon_background="#FF6B6B", + api_rph=100, + api_rpm=10, + ) app_service = AppService() app = app_service.create_app(tenant.id, app_args, account) return app, account @@ -175,7 +198,7 @@ class TestAppDslService: def test_check_version_compatibility_newer_version_returns_pending(self): assert _check_version_compatibility("99.0.0") == ImportStatus.PENDING - def test_check_version_compatibility_major_older_returns_pending(self, monkeypatch): + def test_check_version_compatibility_major_older_returns_pending(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(app_dsl_service, "CURRENT_DSL_VERSION", "1.0.0") assert _check_version_compatibility("0.9.9") == ImportStatus.PENDING @@ -189,7 +212,7 @@ class TestAppDslService: # ── Import: Validation ──────────────────────────────────────────── - def test_import_app_invalid_import_mode_raises_value_error(self, db_session_with_containers): + def test_import_app_invalid_import_mode_raises_value_error(self, db_session_with_containers: Session): service = AppDslService(db_session_with_containers) with pytest.raises(ValueError, match="Invalid import_mode"): service.import_app( @@ -198,7 +221,7 @@ class TestAppDslService: yaml_content="version: '0.1.0'", ) - def test_import_app_missing_yaml_content(self, db_session_with_containers): + def test_import_app_missing_yaml_content(self, db_session_with_containers: Session): service = AppDslService(db_session_with_containers) result = service.import_app( account=_account_mock(), @@ -208,7 +231,7 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert "yaml_content is required" in result.error - def test_import_app_missing_yaml_url(self, db_session_with_containers): + def test_import_app_missing_yaml_url(self, db_session_with_containers: Session): service = AppDslService(db_session_with_containers) result = service.import_app( account=_account_mock(), @@ -218,7 +241,7 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert "yaml_url is required" in result.error - def test_import_app_yaml_not_mapping_returns_failed(self, db_session_with_containers): + def test_import_app_yaml_not_mapping_returns_failed(self, db_session_with_containers: Session): service = AppDslService(db_session_with_containers) result = service.import_app( account=_account_mock(), @@ -228,7 +251,7 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert "content must be a mapping" in result.error - def test_import_app_version_not_str_returns_failed(self, db_session_with_containers): + def test_import_app_version_not_str_returns_failed(self, db_session_with_containers: Session): service = AppDslService(db_session_with_containers) yaml_content = _yaml_dump({"version": 1, "kind": "app", "app": {"name": "x", "mode": "workflow"}}) result = service.import_app( @@ -239,7 +262,7 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert "Invalid version type" in result.error - def test_import_app_missing_app_data_returns_failed(self, db_session_with_containers): + def test_import_app_missing_app_data_returns_failed(self, db_session_with_containers: Session): service = AppDslService(db_session_with_containers) result = service.import_app( account=_account_mock(), @@ -249,7 +272,9 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert "Missing app data" in result.error - def test_import_app_yaml_error_returns_failed(self, db_session_with_containers, monkeypatch): + def test_import_app_yaml_error_returns_failed( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): def bad_safe_load(_content: str): raise yaml.YAMLError("bad") @@ -264,7 +289,9 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert result.error.startswith("Invalid YAML format:") - def test_import_app_unexpected_error_returns_failed(self, db_session_with_containers, monkeypatch): + def test_import_app_unexpected_error_returns_failed( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): monkeypatch.setattr( AppDslService, "_create_or_update_app", @@ -282,7 +309,9 @@ class TestAppDslService: # ── Import: YAML URL ────────────────────────────────────────────── - def test_import_app_yaml_url_fetch_error_returns_failed(self, db_session_with_containers, monkeypatch): + def test_import_app_yaml_url_fetch_error_returns_failed( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): monkeypatch.setattr( app_dsl_service.ssrf_proxy, "get", @@ -298,7 +327,9 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert "Error fetching YAML from URL: boom" in result.error - def test_import_app_yaml_url_empty_content_returns_failed(self, db_session_with_containers, monkeypatch): + def test_import_app_yaml_url_empty_content_returns_failed( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): response = MagicMock() response.content = b"" response.raise_for_status.return_value = None @@ -313,7 +344,9 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert "Empty content" in result.error - def test_import_app_yaml_url_file_too_large_returns_failed(self, db_session_with_containers, monkeypatch): + def test_import_app_yaml_url_file_too_large_returns_failed( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): response = MagicMock() response.content = b"x" * (DSL_MAX_SIZE + 1) response.raise_for_status.return_value = None @@ -328,7 +361,9 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert "File size exceeds" in result.error - def test_import_app_yaml_url_user_attachments_keeps_original_url(self, db_session_with_containers, monkeypatch): + def test_import_app_yaml_url_user_attachments_keeps_original_url( + self, db_session_with_containers: Session, monkeypatch + ): yaml_url = "https://github.com/user-attachments/files/24290802/loop-test.yml" yaml_bytes = _pending_yaml_content() @@ -354,7 +389,9 @@ class TestAppDslService: assert result.imported_dsl_version == "99.0.0" assert requested_urls == [yaml_url] - def test_import_app_yaml_url_github_blob_rewrites_to_raw(self, db_session_with_containers, monkeypatch): + def test_import_app_yaml_url_github_blob_rewrites_to_raw( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): yaml_url = "https://github.com/acme/repo/blob/main/app.yml" raw_url = "https://raw.githubusercontent.com/acme/repo/main/app.yml" yaml_bytes = _pending_yaml_content() @@ -383,7 +420,7 @@ class TestAppDslService: # ── Import: App ID checks ──────────────────────────────────────── - def test_import_app_app_id_not_found_returns_failed(self, db_session_with_containers): + def test_import_app_app_id_not_found_returns_failed(self, db_session_with_containers: Session): service = AppDslService(db_session_with_containers) result = service.import_app( account=_account_mock(), @@ -395,7 +432,7 @@ class TestAppDslService: assert result.error == "App not found" def test_import_app_overwrite_only_allows_workflow_and_advanced_chat( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) assert app.mode == "chat" @@ -412,7 +449,7 @@ class TestAppDslService: # ── Import: Flow ────────────────────────────────────────────────── - def test_import_app_pending_stores_import_info_in_redis(self, db_session_with_containers): + def test_import_app_pending_stores_import_info_in_redis(self, db_session_with_containers: Session): service = AppDslService(db_session_with_containers) result = service.import_app( account=_account_mock(), @@ -432,7 +469,7 @@ class TestAppDslService: assert stored is not None def test_import_app_completed_uses_declared_dependencies( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): _, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) @@ -466,7 +503,7 @@ class TestAppDslService: @pytest.mark.parametrize("has_workflow", [True, False]) def test_import_app_legacy_versions_extract_dependencies( - self, db_session_with_containers, monkeypatch, has_workflow: bool + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch, has_workflow: bool ): monkeypatch.setattr( AppDslService, @@ -523,13 +560,15 @@ class TestAppDslService: # ── Confirm Import ──────────────────────────────────────────────── - def test_confirm_import_expired_returns_failed(self, db_session_with_containers): + def test_confirm_import_expired_returns_failed(self, db_session_with_containers: Session): service = AppDslService(db_session_with_containers) result = service.confirm_import(import_id=str(uuid4()), account=_account_mock()) assert result.status == ImportStatus.FAILED assert "expired" in result.error - def test_confirm_import_success_deletes_redis_key(self, db_session_with_containers, monkeypatch): + def test_confirm_import_success_deletes_redis_key( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): import_id = str(uuid4()) redis_key = f"{IMPORT_INFO_REDIS_KEY_PREFIX}{import_id}" @@ -562,7 +601,7 @@ class TestAppDslService: assert result.app_id == created_app.id assert redis_client.get(redis_key) is None - def test_confirm_import_invalid_pending_data_type_returns_failed(self, db_session_with_containers): + def test_confirm_import_invalid_pending_data_type_returns_failed(self, db_session_with_containers: Session): import_id = str(uuid4()) redis_key = f"{IMPORT_INFO_REDIS_KEY_PREFIX}{import_id}" redis_client.setex(redis_key, IMPORT_INFO_REDIS_EXPIRY, "123") @@ -572,7 +611,7 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert "validation error" in result.error - def test_confirm_import_exception_returns_failed(self, db_session_with_containers): + def test_confirm_import_exception_returns_failed(self, db_session_with_containers: Session): import_id = str(uuid4()) redis_key = f"{IMPORT_INFO_REDIS_KEY_PREFIX}{import_id}" redis_client.setex(redis_key, IMPORT_INFO_REDIS_EXPIRY, "not-valid-json") @@ -583,13 +622,15 @@ class TestAppDslService: # ── Check Dependencies ──────────────────────────────────────────── - def test_check_dependencies_returns_empty_when_no_redis_data(self, db_session_with_containers): + def test_check_dependencies_returns_empty_when_no_redis_data(self, db_session_with_containers: Session): service = AppDslService(db_session_with_containers) - app_model = SimpleNamespace(id=str(uuid4()), tenant_id=_DEFAULT_TENANT_ID) + app_model = _app_stub() result = service.check_dependencies(app_model=app_model) assert result.leaked_dependencies == [] - def test_check_dependencies_calls_analysis_service(self, db_session_with_containers, monkeypatch): + def test_check_dependencies_calls_analysis_service( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): app_id = str(uuid4()) pending = CheckDependenciesPendingData(dependencies=[], app_id=app_id) redis_client.setex( @@ -614,10 +655,12 @@ class TestAppDslService: ) service = AppDslService(db_session_with_containers) - result = service.check_dependencies(app_model=SimpleNamespace(id=app_id, tenant_id=_DEFAULT_TENANT_ID)) + result = service.check_dependencies(app_model=_app_stub(id=app_id)) assert len(result.leaked_dependencies) == 1 - def test_check_dependencies_with_real_app(self, db_session_with_containers, mock_external_service_dependencies): + def test_check_dependencies_with_real_app( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) mock_dependencies_json = '{"app_id": "' + app.id + '", "dependencies": []}' @@ -633,12 +676,14 @@ class TestAppDslService: # ── Create/Update App ───────────────────────────────────────────── - def test_create_or_update_app_missing_mode_raises(self, db_session_with_containers): + def test_create_or_update_app_missing_mode_raises(self, db_session_with_containers: Session): service = AppDslService(db_session_with_containers) with pytest.raises(ValueError, match="loss app mode"): service._create_or_update_app(app=None, data={"app": {}}, account=_account_mock()) - def test_create_or_update_app_existing_app_updates_fields(self, db_session_with_containers, monkeypatch): + def test_create_or_update_app_existing_app_updates_fields( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): fixed_now = object() monkeypatch.setattr(app_dsl_service, "naive_utc_now", lambda: fixed_now) @@ -656,9 +701,7 @@ class TestAppDslService: lambda _m: SimpleNamespace(kind="conv"), ) - app = SimpleNamespace( - id=str(uuid4()), - tenant_id=_DEFAULT_TENANT_ID, + app = _app_stub( mode=AppMode.WORKFLOW.value, name="old", description="old-desc", @@ -667,7 +710,6 @@ class TestAppDslService: icon_background="#111111", updated_by=None, updated_at=None, - app_model_config=None, ) service = AppDslService(db_session_with_containers) updated = service._create_or_update_app( @@ -693,7 +735,7 @@ class TestAppDslService: assert app.icon_background == "#222222" assert app.updated_at is fixed_now - def test_create_or_update_app_new_app_requires_tenant(self, db_session_with_containers): + def test_create_or_update_app_new_app_requires_tenant(self, db_session_with_containers: Session): account = _account_mock() account.current_tenant_id = None service = AppDslService(db_session_with_containers) @@ -705,7 +747,7 @@ class TestAppDslService: ) def test_create_or_update_app_creates_workflow_app_and_saves_dependencies( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): _, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) @@ -741,42 +783,26 @@ class TestAppDslService: stored = redis_client.get(f"{CHECK_DEPENDENCIES_REDIS_KEY_PREFIX}{app.id}") assert stored is not None - def test_create_or_update_app_workflow_missing_workflow_data_raises(self, db_session_with_containers): + def test_create_or_update_app_workflow_missing_workflow_data_raises(self, db_session_with_containers: Session): service = AppDslService(db_session_with_containers) with pytest.raises(ValueError, match="Missing workflow data"): service._create_or_update_app( - app=SimpleNamespace( - id=str(uuid4()), - tenant_id=_DEFAULT_TENANT_ID, - mode=AppMode.WORKFLOW.value, - name="n", - description="d", - icon_background="#fff", - app_model_config=None, - ), + app=_app_stub(mode=AppMode.WORKFLOW.value), data={"app": {"mode": AppMode.WORKFLOW.value}}, account=_account_mock(), ) - def test_create_or_update_app_chat_requires_model_config(self, db_session_with_containers): + def test_create_or_update_app_chat_requires_model_config(self, db_session_with_containers: Session): service = AppDslService(db_session_with_containers) with pytest.raises(ValueError, match="Missing model_config"): service._create_or_update_app( - app=SimpleNamespace( - id=str(uuid4()), - tenant_id=_DEFAULT_TENANT_ID, - mode=AppMode.CHAT.value, - name="n", - description="d", - icon_background="#fff", - app_model_config=None, - ), - data={"app": {"mode": AppMode.CHAT.value}}, + app=_app_stub(mode=AppMode.CHAT), + data={"app": {"mode": AppMode.CHAT}}, account=_account_mock(), ) def test_create_or_update_app_chat_creates_model_config_and_sends_event( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) app.app_model_config_id = None @@ -786,7 +812,7 @@ class TestAppDslService: service._create_or_update_app( app=app, data={ - "app": {"mode": AppMode.CHAT.value}, + "app": {"mode": AppMode.CHAT}, "model_config": {"model": {"provider": "openai"}}, }, account=account, @@ -795,26 +821,18 @@ class TestAppDslService: db_session_with_containers.expire_all() assert app.app_model_config_id is not None - def test_create_or_update_app_invalid_mode_raises(self, db_session_with_containers): + def test_create_or_update_app_invalid_mode_raises(self, db_session_with_containers: Session): service = AppDslService(db_session_with_containers) with pytest.raises(ValueError, match="Invalid app mode"): service._create_or_update_app( - app=SimpleNamespace( - id=str(uuid4()), - tenant_id=_DEFAULT_TENANT_ID, - mode=AppMode.RAG_PIPELINE.value, - name="n", - description="d", - icon_background="#fff", - app_model_config=None, - ), - data={"app": {"mode": AppMode.RAG_PIPELINE.value}}, + app=_app_stub(mode=AppMode.RAG_PIPELINE), + data={"app": {"mode": AppMode.RAG_PIPELINE}}, account=_account_mock(), ) # ── Export ───────────────────────────────────────────────────────── - def test_export_dsl_delegates_by_mode(self, monkeypatch): + def test_export_dsl_delegates_by_mode(self, monkeypatch: pytest.MonkeyPatch): workflow_calls: list[bool] = [] model_calls: list[bool] = [] monkeypatch.setattr( @@ -828,51 +846,36 @@ class TestAppDslService: lambda *_args, **_kwargs: model_calls.append(True), ) - workflow_app = SimpleNamespace( + workflow_app = _app_stub( mode=AppMode.WORKFLOW.value, - tenant_id=_DEFAULT_TENANT_ID, - name="n", - icon="i", icon_type="emoji", - icon_background="#fff", - description="d", - use_icon_as_answer_icon=False, - app_model_config=None, ) AppDslService.export_dsl(workflow_app) assert workflow_calls == [True] - chat_app = SimpleNamespace( - mode=AppMode.CHAT.value, - tenant_id=_DEFAULT_TENANT_ID, - name="n", - icon="i", + chat_app = _app_stub( + mode=AppMode.CHAT, icon_type="emoji", - icon_background="#fff", - description="d", - use_icon_as_answer_icon=False, app_model_config=SimpleNamespace(to_dict=lambda: {"agent_mode": {"tools": []}}), ) AppDslService.export_dsl(chat_app) assert model_calls == [True] - def test_export_dsl_preserves_icon_and_icon_type(self, monkeypatch): + def test_export_dsl_preserves_icon_and_icon_type(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( AppDslService, "_append_workflow_export_data", lambda **_kwargs: None, ) - emoji_app = SimpleNamespace( + emoji_app = _app_stub( mode=AppMode.WORKFLOW.value, - tenant_id=_DEFAULT_TENANT_ID, name="Emoji App", icon="🎨", icon_type=IconType.EMOJI, icon_background="#FF5733", description="App with emoji icon", use_icon_as_answer_icon=True, - app_model_config=None, ) yaml_output = AppDslService.export_dsl(emoji_app) data = yaml.safe_load(yaml_output) @@ -880,16 +883,14 @@ class TestAppDslService: assert data["app"]["icon_type"] == "emoji" assert data["app"]["icon_background"] == "#FF5733" - image_app = SimpleNamespace( + image_app = _app_stub( mode=AppMode.WORKFLOW.value, - tenant_id=_DEFAULT_TENANT_ID, name="Image App", icon="https://example.com/icon.png", icon_type=IconType.IMAGE, icon_background="#FFEAD5", description="App with image icon", use_icon_as_answer_icon=False, - app_model_config=None, ) yaml_output = AppDslService.export_dsl(image_app) data = yaml.safe_load(yaml_output) @@ -897,7 +898,7 @@ class TestAppDslService: assert data["app"]["icon_type"] == "image" assert data["app"]["icon_background"] == "#FFEAD5" - def test_export_dsl_chat_app_success(self, db_session_with_containers, mock_external_service_dependencies): + def test_export_dsl_chat_app_success(self, db_session_with_containers: Session, mock_external_service_dependencies): app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) model_config = AppModelConfig( @@ -935,7 +936,9 @@ class TestAppDslService: assert "model_config" in exported_data assert "dependencies" in exported_data - def test_export_dsl_workflow_app_success(self, db_session_with_containers, mock_external_service_dependencies): + def test_export_dsl_workflow_app_success( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) app.mode = "workflow" db_session_with_containers.commit() @@ -968,7 +971,9 @@ class TestAppDslService: assert "workflow" in exported_data assert "dependencies" in exported_data - def test_export_dsl_with_workflow_id_success(self, db_session_with_containers, mock_external_service_dependencies): + def test_export_dsl_with_workflow_id_success( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) app.mode = "workflow" db_session_with_containers.commit() @@ -1008,7 +1013,7 @@ class TestAppDslService: assert "workflow" in exported_data def test_export_dsl_with_invalid_workflow_id_raises_error( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) app.mode = "workflow" @@ -1024,7 +1029,7 @@ class TestAppDslService: # ── Workflow Export Data ─────────────────────────────────────────── - def test_append_workflow_export_data_filters_and_overrides(self, monkeypatch): + def test_append_workflow_export_data_filters_and_overrides(self, monkeypatch: pytest.MonkeyPatch): workflow_dict = { "graph": { "nodes": [ @@ -1106,7 +1111,7 @@ class TestAppDslService: export_data: dict = {} AppDslService._append_workflow_export_data( export_data=export_data, - app_model=SimpleNamespace(tenant_id=_DEFAULT_TENANT_ID), + app_model=_app_stub(), include_secret=False, workflow_id=None, ) @@ -1124,7 +1129,7 @@ class TestAppDslService: assert nodes[5]["data"]["subscription_id"] == "" assert export_data["dependencies"] == [{"tenant": _DEFAULT_TENANT_ID, "dep": "dep-1"}] - def test_append_workflow_export_data_missing_workflow_raises(self, monkeypatch): + def test_append_workflow_export_data_missing_workflow_raises(self, monkeypatch: pytest.MonkeyPatch): workflow_service = MagicMock() workflow_service.get_draft_workflow.return_value = None monkeypatch.setattr(app_dsl_service, "WorkflowService", lambda: workflow_service) @@ -1132,14 +1137,14 @@ class TestAppDslService: with pytest.raises(ValueError, match="Missing draft workflow configuration"): AppDslService._append_workflow_export_data( export_data={}, - app_model=SimpleNamespace(tenant_id=_DEFAULT_TENANT_ID), + app_model=_app_stub(), include_secret=False, workflow_id=None, ) # ── Model Config Export Data ────────────────────────────────────── - def test_append_model_config_export_data_filters_credential_id(self, monkeypatch): + def test_append_model_config_export_data_filters_credential_id(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( AppDslService, "_extract_dependencies_from_model_config", @@ -1160,7 +1165,7 @@ class TestAppDslService: monkeypatch.setattr(app_dsl_service, "jsonable_encoder", lambda x: x) app_model_config = SimpleNamespace(to_dict=lambda: {"agent_mode": {"tools": [{"credential_id": "secret"}]}}) - app_model = SimpleNamespace(tenant_id=_DEFAULT_TENANT_ID, app_model_config=app_model_config) + app_model = _app_stub(app_model_config=app_model_config) export_data: dict = {} AppDslService._append_model_config_export_data(export_data, app_model) @@ -1169,11 +1174,11 @@ class TestAppDslService: def test_append_model_config_export_data_requires_app_config(self): with pytest.raises(ValueError, match="Missing app configuration"): - AppDslService._append_model_config_export_data({}, SimpleNamespace(app_model_config=None)) + AppDslService._append_model_config_export_data({}, _app_stub(app_model_config=None)) # ── Dependency Extraction ───────────────────────────────────────── - def test_extract_dependencies_from_workflow_graph_covers_all_node_types(self, monkeypatch): + def test_extract_dependencies_from_workflow_graph_covers_all_node_types(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.DependenciesAnalysisService, "analyze_tool_dependency", @@ -1243,7 +1248,7 @@ class TestAppDslService: "model:m4", ] - def test_extract_dependencies_from_workflow_graph_handles_exceptions(self, monkeypatch): + def test_extract_dependencies_from_workflow_graph_handles_exceptions(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.ToolNodeData, "model_validate", @@ -1254,7 +1259,7 @@ class TestAppDslService: ) assert deps == [] - def test_extract_dependencies_from_model_config_parses_providers(self, monkeypatch): + def test_extract_dependencies_from_model_config_parses_providers(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.DependenciesAnalysisService, "analyze_model_provider_dependency", @@ -1277,7 +1282,7 @@ class TestAppDslService: ) assert deps == ["model:p1", "model:p2", "tool:t1"] - def test_extract_dependencies_from_model_config_handles_exceptions(self, monkeypatch): + def test_extract_dependencies_from_model_config_handles_exceptions(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.DependenciesAnalysisService, "analyze_model_provider_dependency", @@ -1291,7 +1296,7 @@ class TestAppDslService: def test_get_leaked_dependencies_empty_returns_empty(self): assert AppDslService.get_leaked_dependencies(_DEFAULT_TENANT_ID, []) == [] - def test_get_leaked_dependencies_delegates(self, monkeypatch): + def test_get_leaked_dependencies_delegates(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.DependenciesAnalysisService, "get_leaked_dependencies", @@ -1302,7 +1307,7 @@ class TestAppDslService: # ── Encryption/Decryption ───────────────────────────────────────── - def test_encrypt_decrypt_dataset_id_respects_config(self, monkeypatch): + def test_encrypt_decrypt_dataset_id_respects_config(self, monkeypatch: pytest.MonkeyPatch): tenant_id = _DEFAULT_TENANT_ID dataset_uuid = "00000000-0000-0000-0000-000000000000" @@ -1327,7 +1332,7 @@ class TestAppDslService: value = "00000000-0000-0000-0000-000000000000" assert AppDslService.decrypt_dataset_id(encrypted_data=value, tenant_id=_DEFAULT_TENANT_ID) == value - def test_decrypt_dataset_id_returns_none_on_invalid_data(self, monkeypatch): + def test_decrypt_dataset_id_returns_none_on_invalid_data(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.dify_config, "DSL_EXPORT_ENCRYPT_DATASET_ID", @@ -1335,7 +1340,7 @@ class TestAppDslService: ) assert AppDslService.decrypt_dataset_id(encrypted_data="not-base64", tenant_id=_DEFAULT_TENANT_ID) is None - def test_decrypt_dataset_id_returns_none_when_decrypted_is_not_uuid(self, monkeypatch): + def test_decrypt_dataset_id_returns_none_when_decrypted_is_not_uuid(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.dify_config, "DSL_EXPORT_ENCRYPT_DATASET_ID", diff --git a/api/tests/test_containers_integration_tests/services/test_app_generate_service.py b/api/tests/test_containers_integration_tests/services/test_app_generate_service.py index 3229693fd4..8be4c040b7 100644 --- a/api/tests/test_containers_integration_tests/services/test_app_generate_service.py +++ b/api/tests/test_containers_integration_tests/services/test_app_generate_service.py @@ -1,4 +1,5 @@ import uuid +from typing import Literal from unittest.mock import ANY, MagicMock, patch import pytest @@ -7,6 +8,7 @@ from faker import Faker from sqlalchemy.orm import Session from core.app.entities.app_invoke_entities import InvokeFrom +from models import App from models.model import EndUser from models.workflow import Workflow from services.app_generate_service import AppGenerateService @@ -132,7 +134,10 @@ class TestAppGenerateService: } def _create_test_app_and_account( - self, db_session_with_containers: Session, mock_external_service_dependencies, mode="chat" + self, + db_session_with_containers: Session, + mock_external_service_dependencies, + mode: Literal["chat", "agent-chat", "advanced-chat", "workflow", "completion"] = "chat", ): """ Helper method to create a test app and account for testing. @@ -164,27 +169,27 @@ class TestAppGenerateService: TenantService.create_owner_tenant_if_not_exist(account, name=fake.company()) tenant = account.current_tenant - # Create app with realistic data - app_args = { - "name": fake.company(), - "description": fake.text(max_nb_chars=100), - "mode": mode, - "icon_type": "emoji", - "icon": "🤖", - "icon_background": "#FF6B6B", - "api_rph": 100, - "api_rpm": 10, - "max_active_requests": 5, - } + from services.app_service import AppService, CreateAppParams - from services.app_service import AppService + # Create app with realistic data + app_args = CreateAppParams( + name=fake.company(), + description=fake.text(max_nb_chars=100), + mode=mode, + icon_type="emoji", + icon="🤖", + icon_background="#FF6B6B", + api_rph=100, + api_rpm=10, + max_active_requests=5, + ) app_service = AppService() app = app_service.create_app(tenant.id, app_args, account) return app, account - def _create_test_workflow(self, db_session_with_containers: Session, app): + def _create_test_workflow(self, db_session_with_containers: Session, app: App): """ Helper method to create a test workflow for testing. diff --git a/api/tests/test_containers_integration_tests/services/test_app_service.py b/api/tests/test_containers_integration_tests/services/test_app_service.py index b695ae9fd9..c37fce296f 100644 --- a/api/tests/test_containers_integration_tests/services/test_app_service.py +++ b/api/tests/test_containers_integration_tests/services/test_app_service.py @@ -2,16 +2,18 @@ from unittest.mock import create_autospec, patch import pytest from faker import Faker +from pydantic import ValidationError from sqlalchemy.orm import Session from constants.model_template import default_app_templates from models import Account +from models.enums import AppStatus, CustomizeTokenStrategy from models.model import App, IconType, Site from services.account_service import AccountService, TenantService from tests.test_containers_integration_tests.helpers import generate_valid_password # Delay import of AppService to avoid circular dependency -# from services.app_service import AppService +# from services.app_service import AppService, AppListParams, CreateAppParams class TestAppService: @@ -63,34 +65,34 @@ class TestAppService: tenant = account.current_tenant # Setup app creation arguments - app_args = { - "name": fake.company(), - "description": fake.text(max_nb_chars=100), - "mode": "chat", - "icon_type": "emoji", - "icon": "🤖", - "icon_background": "#FF6B6B", - "api_rph": 100, - "api_rpm": 10, - } + # Import here to avoid circular dependency + from services.app_service import AppService, CreateAppParams + + app_params = CreateAppParams( + name=fake.company(), + description=fake.text(max_nb_chars=100), + mode="chat", + icon_type="emoji", + icon="🤖", + icon_background="#FF6B6B", + api_rph=100, + api_rpm=10, + ) # Create app - # Import here to avoid circular dependency - from services.app_service import AppService - app_service = AppService() - app = app_service.create_app(tenant.id, app_args, account) + app = app_service.create_app(tenant.id, app_params, account) # Verify app was created correctly - assert app.name == app_args["name"] - assert app.description == app_args["description"] - assert app.mode == app_args["mode"] - assert app.icon_type == app_args["icon_type"] - assert app.icon == app_args["icon"] - assert app.icon_background == app_args["icon_background"] + assert app.name == app_params.name + assert app.description == app_params.description + assert app.mode == app_params.mode + assert app.icon_type == app_params.icon_type + assert app.icon == app_params.icon + assert app.icon_background == app_params.icon_background assert app.tenant_id == tenant.id - assert app.api_rph == app_args["api_rph"] - assert app.api_rpm == app_args["api_rpm"] + assert app.api_rph == app_params.api_rph + assert app.api_rpm == app_params.api_rpm assert app.created_by == account.id assert app.updated_by == account.id assert app.status == "normal" @@ -119,7 +121,7 @@ class TestAppService: tenant = account.current_tenant # Import here to avoid circular dependency - from services.app_service import AppService + from services.app_service import AppService, CreateAppParams app_service = AppService() @@ -128,20 +130,20 @@ class TestAppService: app_modes = [v.value for v in default_app_templates] for mode in app_modes: - app_args = { - "name": f"{fake.company()} {mode}", - "description": f"Test app for {mode} mode", - "mode": mode, - "icon_type": "emoji", - "icon": "🚀", - "icon_background": "#4ECDC4", - } + app_params = CreateAppParams( + name=f"{fake.company()} {mode}", + description=f"Test app for {mode} mode", + mode=mode, + icon_type="emoji", + icon="🚀", + icon_background="#4ECDC4", + ) - app = app_service.create_app(tenant.id, app_args, account) + app = app_service.create_app(tenant.id, app_params, account) # Verify app mode was set correctly assert app.mode == mode - assert app.name == app_args["name"] + assert app.name == app_params.name assert app.tenant_id == tenant.id assert app.created_by == account.id @@ -162,20 +164,20 @@ class TestAppService: tenant = account.current_tenant # Create app first - app_args = { - "name": fake.company(), - "description": fake.text(max_nb_chars=100), - "mode": "chat", - "icon_type": "emoji", - "icon": "🎯", - "icon_background": "#45B7D1", - } - # Import here to avoid circular dependency - from services.app_service import AppService + from services.app_service import AppService, CreateAppParams + + app_params = CreateAppParams( + name=fake.company(), + description=fake.text(max_nb_chars=100), + mode="chat", + icon_type="emoji", + icon="🎯", + icon_background="#45B7D1", + ) app_service = AppService() - created_app = app_service.create_app(tenant.id, app_args, account) + created_app = app_service.create_app(tenant.id, app_params, account) # Get app using the service - needs current_user mock mock_current_user = create_autospec(Account, instance=True) @@ -210,31 +212,27 @@ class TestAppService: tenant = account.current_tenant # Import here to avoid circular dependency - from services.app_service import AppService + from services.app_service import AppListParams, AppService, CreateAppParams app_service = AppService() # Create multiple apps app_names = [fake.company() for _ in range(5)] for name in app_names: - app_args = { - "name": name, - "description": fake.text(max_nb_chars=100), - "mode": "chat", - "icon_type": "emoji", - "icon": "📱", - "icon_background": "#96CEB4", - } - app_service.create_app(tenant.id, app_args, account) + app_params = CreateAppParams( + name=name, + description=fake.text(max_nb_chars=100), + mode="chat", + icon_type="emoji", + icon="📱", + icon_background="#96CEB4", + ) + app_service.create_app(tenant.id, app_params, account) # Get paginated apps - args = { - "page": 1, - "limit": 10, - "mode": "chat", - } + params = AppListParams(page=1, limit=10, mode="chat") - paginated_apps = app_service.get_paginate_apps(account.id, tenant.id, args) + paginated_apps = app_service.get_paginate_apps(account.id, tenant.id, params) # Verify pagination results assert paginated_apps is not None @@ -266,60 +264,47 @@ class TestAppService: tenant = account.current_tenant # Import here to avoid circular dependency - from services.app_service import AppService + from services.app_service import AppListParams, AppService, CreateAppParams app_service = AppService() # Create apps with different modes - chat_app_args = { - "name": "Chat App", - "description": "A chat application", - "mode": "chat", - "icon_type": "emoji", - "icon": "💬", - "icon_background": "#FF6B6B", - } - completion_app_args = { - "name": "Completion App", - "description": "A completion application", - "mode": "completion", - "icon_type": "emoji", - "icon": "✍️", - "icon_background": "#4ECDC4", - } + chat_app_params = CreateAppParams( + name="Chat App", + description="A chat application", + mode="chat", + icon_type="emoji", + icon="💬", + icon_background="#FF6B6B", + ) + completion_app_params = CreateAppParams( + name="Completion App", + description="A completion application", + mode="completion", + icon_type="emoji", + icon="✍️", + icon_background="#4ECDC4", + ) - chat_app = app_service.create_app(tenant.id, chat_app_args, account) - completion_app = app_service.create_app(tenant.id, completion_app_args, account) + chat_app = app_service.create_app(tenant.id, chat_app_params, account) + completion_app = app_service.create_app(tenant.id, completion_app_params, account) # Test filter by mode - chat_args = { - "page": 1, - "limit": 10, - "mode": "chat", - } - chat_apps = app_service.get_paginate_apps(account.id, tenant.id, chat_args) + chat_apps = app_service.get_paginate_apps(account.id, tenant.id, AppListParams(page=1, limit=10, mode="chat")) assert len(chat_apps.items) == 1 assert chat_apps.items[0].mode == "chat" # Test filter by name - name_args = { - "page": 1, - "limit": 10, - "mode": "chat", - "name": "Chat", - } - filtered_apps = app_service.get_paginate_apps(account.id, tenant.id, name_args) + filtered_apps = app_service.get_paginate_apps( + account.id, tenant.id, AppListParams(page=1, limit=10, mode="chat", name="Chat") + ) assert len(filtered_apps.items) == 1 assert "Chat" in filtered_apps.items[0].name # Test filter by created_by_me - created_by_me_args = { - "page": 1, - "limit": 10, - "mode": "completion", - "is_created_by_me": True, - } - my_apps = app_service.get_paginate_apps(account.id, tenant.id, created_by_me_args) + my_apps = app_service.get_paginate_apps( + account.id, tenant.id, AppListParams(page=1, limit=10, mode="completion", is_created_by_me=True) + ) assert len(my_apps.items) == 1 def test_get_paginate_apps_with_tag_filters( @@ -341,34 +326,29 @@ class TestAppService: tenant = account.current_tenant # Import here to avoid circular dependency - from services.app_service import AppService + from services.app_service import AppListParams, AppService, CreateAppParams app_service = AppService() # Create an app - app_args = { - "name": fake.company(), - "description": fake.text(max_nb_chars=100), - "mode": "chat", - "icon_type": "emoji", - "icon": "🏷️", - "icon_background": "#FFEAA7", - } - app = app_service.create_app(tenant.id, app_args, account) + app_params = CreateAppParams( + name=fake.company(), + description=fake.text(max_nb_chars=100), + mode="chat", + icon_type="emoji", + icon="🏷️", + icon_background="#FFEAA7", + ) + app = app_service.create_app(tenant.id, app_params, account) # Mock TagService to return the app ID for tag filtering with patch("services.app_service.TagService.get_target_ids_by_tag_ids") as mock_tag_service: mock_tag_service.return_value = [app.id] # Test with tag filter - args = { - "page": 1, - "limit": 10, - "mode": "chat", - "tag_ids": ["tag1", "tag2"], - } + params = AppListParams(page=1, limit=10, mode="chat", tag_ids=["tag1", "tag2"]) - paginated_apps = app_service.get_paginate_apps(account.id, tenant.id, args) + paginated_apps = app_service.get_paginate_apps(account.id, tenant.id, params) # Verify tag service was called mock_tag_service.assert_called_once_with("app", tenant.id, ["tag1", "tag2"]) @@ -382,14 +362,9 @@ class TestAppService: with patch("services.app_service.TagService.get_target_ids_by_tag_ids") as mock_tag_service: mock_tag_service.return_value = [] - args = { - "page": 1, - "limit": 10, - "mode": "chat", - "tag_ids": ["nonexistent_tag"], - } + params = AppListParams(page=1, limit=10, mode="chat", tag_ids=["nonexistent_tag"]) - paginated_apps = app_service.get_paginate_apps(account.id, tenant.id, args) + paginated_apps = app_service.get_paginate_apps(account.id, tenant.id, params) # Should return None when no apps match tag filter assert paginated_apps is None @@ -411,20 +386,20 @@ class TestAppService: tenant = account.current_tenant # Create app first - app_args = { - "name": fake.company(), - "description": fake.text(max_nb_chars=100), - "mode": "chat", - "icon_type": "emoji", - "icon": "🎯", - "icon_background": "#45B7D1", - } - # Import here to avoid circular dependency - from services.app_service import AppService + from services.app_service import AppService, CreateAppParams + + app_params = CreateAppParams( + name=fake.company(), + description=fake.text(max_nb_chars=100), + mode="chat", + icon_type="emoji", + icon="🎯", + icon_background="#45B7D1", + ) app_service = AppService() - app = app_service.create_app(tenant.id, app_args, account) + app = app_service.create_app(tenant.id, app_params, account) # Store original values original_name = app.name @@ -480,19 +455,19 @@ class TestAppService: TenantService.create_owner_tenant_if_not_exist(account, name=fake.company()) tenant = account.current_tenant - from services.app_service import AppService + from services.app_service import AppService, CreateAppParams app_service = AppService() app = app_service.create_app( tenant.id, - { - "name": fake.company(), - "description": fake.text(max_nb_chars=100), - "mode": "chat", - "icon_type": "emoji", - "icon": "🎯", - "icon_background": "#45B7D1", - }, + CreateAppParams( + name=fake.company(), + description=fake.text(max_nb_chars=100), + mode="chat", + icon_type="emoji", + icon="🎯", + icon_background="#45B7D1", + ), account, ) @@ -532,19 +507,19 @@ class TestAppService: TenantService.create_owner_tenant_if_not_exist(account, name=fake.company()) tenant = account.current_tenant - from services.app_service import AppService + from services.app_service import AppService, CreateAppParams app_service = AppService() app = app_service.create_app( tenant.id, - { - "name": fake.company(), - "description": fake.text(max_nb_chars=100), - "mode": "chat", - "icon_type": "emoji", - "icon": "🎯", - "icon_background": "#45B7D1", - }, + CreateAppParams( + name=fake.company(), + description=fake.text(max_nb_chars=100), + mode="chat", + icon_type="emoji", + icon="🎯", + icon_background="#45B7D1", + ), account, ) @@ -583,20 +558,20 @@ class TestAppService: tenant = account.current_tenant # Create app first - app_args = { - "name": fake.company(), - "description": fake.text(max_nb_chars=100), - "mode": "chat", - "icon_type": "emoji", - "icon": "🎯", - "icon_background": "#45B7D1", - } - # Import here to avoid circular dependency - from services.app_service import AppService + from services.app_service import AppService, CreateAppParams + + app_params = CreateAppParams( + name=fake.company(), + description=fake.text(max_nb_chars=100), + mode="chat", + icon_type="emoji", + icon="🎯", + icon_background="#45B7D1", + ) app_service = AppService() - app = app_service.create_app(tenant.id, app_args, account) + app = app_service.create_app(tenant.id, app_params, account) # Store original name original_name = app.name @@ -636,20 +611,20 @@ class TestAppService: tenant = account.current_tenant # Create app first - app_args = { - "name": fake.company(), - "description": fake.text(max_nb_chars=100), - "mode": "chat", - "icon_type": "emoji", - "icon": "🎯", - "icon_background": "#45B7D1", - } - # Import here to avoid circular dependency - from services.app_service import AppService + from services.app_service import AppService, CreateAppParams + + app_params = CreateAppParams( + name=fake.company(), + description=fake.text(max_nb_chars=100), + mode="chat", + icon_type="emoji", + icon="🎯", + icon_background="#45B7D1", + ) app_service = AppService() - app = app_service.create_app(tenant.id, app_args, account) + app = app_service.create_app(tenant.id, app_params, account) # Store original values original_icon = app.icon @@ -697,18 +672,17 @@ class TestAppService: tenant = account.current_tenant # Create app first - app_args = { - "name": fake.company(), - "description": fake.text(max_nb_chars=100), - "mode": "chat", - "icon_type": "emoji", - "icon": "🌐", - "icon_background": "#74B9FF", - } - # Import here to avoid circular dependency - from services.app_service import AppService + from services.app_service import AppService, CreateAppParams + app_args = CreateAppParams( + name=fake.company(), + description=fake.text(max_nb_chars=100), + mode="chat", + icon_type="emoji", + icon="🌐", + icon_background="#74B9FF", + ) app_service = AppService() app = app_service.create_app(tenant.id, app_args, account) @@ -757,18 +731,17 @@ class TestAppService: tenant = account.current_tenant # Create app first - app_args = { - "name": fake.company(), - "description": fake.text(max_nb_chars=100), - "mode": "chat", - "icon_type": "emoji", - "icon": "🔌", - "icon_background": "#A29BFE", - } - # Import here to avoid circular dependency - from services.app_service import AppService + from services.app_service import AppService, CreateAppParams + app_args = CreateAppParams( + name=fake.company(), + description=fake.text(max_nb_chars=100), + mode="chat", + icon_type="emoji", + icon="🔌", + icon_background="#A29BFE", + ) app_service = AppService() app = app_service.create_app(tenant.id, app_args, account) @@ -817,18 +790,17 @@ class TestAppService: tenant = account.current_tenant # Create app first - app_args = { - "name": fake.company(), - "description": fake.text(max_nb_chars=100), - "mode": "chat", - "icon_type": "emoji", - "icon": "🔄", - "icon_background": "#FD79A8", - } - # Import here to avoid circular dependency - from services.app_service import AppService + from services.app_service import AppService, CreateAppParams + app_args = CreateAppParams( + name=fake.company(), + description=fake.text(max_nb_chars=100), + mode="chat", + icon_type="emoji", + icon="🔄", + icon_background="#FD79A8", + ) app_service = AppService() app = app_service.create_app(tenant.id, app_args, account) @@ -868,18 +840,17 @@ class TestAppService: tenant = account.current_tenant # Create app first - app_args = { - "name": fake.company(), - "description": fake.text(max_nb_chars=100), - "mode": "chat", - "icon_type": "emoji", - "icon": "🗑️", - "icon_background": "#E17055", - } - # Import here to avoid circular dependency - from services.app_service import AppService + from services.app_service import AppService, CreateAppParams + app_args = CreateAppParams( + name=fake.company(), + description=fake.text(max_nb_chars=100), + mode="chat", + icon_type="emoji", + icon="🗑️", + icon_background="#E17055", + ) app_service = AppService() app = app_service.create_app(tenant.id, app_args, account) @@ -920,18 +891,17 @@ class TestAppService: tenant = account.current_tenant # Create app first - app_args = { - "name": fake.company(), - "description": fake.text(max_nb_chars=100), - "mode": "chat", - "icon_type": "emoji", - "icon": "🧹", - "icon_background": "#00B894", - } - # Import here to avoid circular dependency - from services.app_service import AppService + from services.app_service import AppService, CreateAppParams + app_args = CreateAppParams( + name=fake.company(), + description=fake.text(max_nb_chars=100), + mode="chat", + icon_type="emoji", + icon="🧹", + icon_background="#00B894", + ) app_service = AppService() app = app_service.create_app(tenant.id, app_args, account) @@ -980,18 +950,17 @@ class TestAppService: tenant = account.current_tenant # Create app first - app_args = { - "name": fake.company(), - "description": fake.text(max_nb_chars=100), - "mode": "chat", - "icon_type": "emoji", - "icon": "📊", - "icon_background": "#6C5CE7", - } - # Import here to avoid circular dependency - from services.app_service import AppService + from services.app_service import AppService, CreateAppParams + app_args = CreateAppParams( + name=fake.company(), + description=fake.text(max_nb_chars=100), + mode="chat", + icon_type="emoji", + icon="📊", + icon_background="#6C5CE7", + ) app_service = AppService() app = app_service.create_app(tenant.id, app_args, account) @@ -1019,18 +988,17 @@ class TestAppService: tenant = account.current_tenant # Create app first - app_args = { - "name": fake.company(), - "description": fake.text(max_nb_chars=100), - "mode": "chat", - "icon_type": "emoji", - "icon": "🔗", - "icon_background": "#FDCB6E", - } - # Import here to avoid circular dependency - from services.app_service import AppService + from services.app_service import AppService, CreateAppParams + app_args = CreateAppParams( + name=fake.company(), + description=fake.text(max_nb_chars=100), + mode="chat", + icon_type="emoji", + icon="🔗", + icon_background="#FDCB6E", + ) app_service = AppService() app = app_service.create_app(tenant.id, app_args, account) @@ -1059,18 +1027,17 @@ class TestAppService: tenant = account.current_tenant # Create app first - app_args = { - "name": fake.company(), - "description": fake.text(max_nb_chars=100), - "mode": "chat", - "icon_type": "emoji", - "icon": "🆔", - "icon_background": "#E84393", - } - # Import here to avoid circular dependency - from services.app_service import AppService + from services.app_service import AppService, CreateAppParams + app_args = CreateAppParams( + name=fake.company(), + description=fake.text(max_nb_chars=100), + mode="chat", + icon_type="emoji", + icon="🆔", + icon_background="#E84393", + ) app_service = AppService() app = app_service.create_app(tenant.id, app_args, account) @@ -1079,9 +1046,9 @@ class TestAppService: site.app_id = app.id site.code = fake.postalcode() site.title = fake.company() - site.status = "normal" + site.status = AppStatus.NORMAL site.default_language = "en-US" - site.customize_token_strategy = "uuid" + site.customize_token_strategy = CustomizeTokenStrategy.UUID db_session_with_containers.add(site) db_session_with_containers.commit() @@ -1106,26 +1073,20 @@ class TestAppService: password=generate_valid_password(fake), ) TenantService.create_owner_tenant_if_not_exist(account, name=fake.company()) - tenant = account.current_tenant - - # Setup app creation arguments with invalid mode - app_args = { - "name": fake.company(), - "description": fake.text(max_nb_chars=100), - "mode": "invalid_mode", # Invalid mode - "icon_type": "emoji", - "icon": "❌", - "icon_background": "#D63031", - } # Import here to avoid circular dependency - from services.app_service import AppService + from services.app_service import CreateAppParams - app_service = AppService() - - # Attempt to create app with invalid mode - with pytest.raises(ValueError, match="invalid mode value"): - app_service.create_app(tenant.id, app_args, account) + # Attempt to create app with invalid mode - Pydantic will reject invalid literal + with pytest.raises(ValidationError): + CreateAppParams( + name=fake.company(), + description=fake.text(max_nb_chars=100), + mode="invalid_mode", # type: ignore[arg-type] + icon_type="emoji", + icon="❌", + icon_background="#D63031", + ) def test_get_apps_with_special_characters_in_name( self, db_session_with_containers: Session, mock_external_service_dependencies @@ -1151,99 +1112,103 @@ class TestAppService: tenant = account.current_tenant # Import here to avoid circular dependency - from services.app_service import AppService + from services.app_service import AppListParams, AppService, CreateAppParams app_service = AppService() # Create apps with special characters in names app_with_percent = app_service.create_app( tenant.id, - { - "name": "App with 50% discount", - "description": fake.text(max_nb_chars=100), - "mode": "chat", - "icon_type": "emoji", - "icon": "🤖", - "icon_background": "#FF6B6B", - "api_rph": 100, - "api_rpm": 10, - }, + CreateAppParams( + name="App with 50% discount", + description=fake.text(max_nb_chars=100), + mode="chat", + icon_type="emoji", + icon="🤖", + icon_background="#FF6B6B", + api_rph=100, + api_rpm=10, + ), account, ) app_with_underscore = app_service.create_app( tenant.id, - { - "name": "test_data_app", - "description": fake.text(max_nb_chars=100), - "mode": "chat", - "icon_type": "emoji", - "icon": "🤖", - "icon_background": "#FF6B6B", - "api_rph": 100, - "api_rpm": 10, - }, + CreateAppParams( + name="test_data_app", + description=fake.text(max_nb_chars=100), + mode="chat", + icon_type="emoji", + icon="🤖", + icon_background="#FF6B6B", + api_rph=100, + api_rpm=10, + ), account, ) app_with_backslash = app_service.create_app( tenant.id, - { - "name": "path\\to\\app", - "description": fake.text(max_nb_chars=100), - "mode": "chat", - "icon_type": "emoji", - "icon": "🤖", - "icon_background": "#FF6B6B", - "api_rph": 100, - "api_rpm": 10, - }, + CreateAppParams( + name="path\\to\\app", + description=fake.text(max_nb_chars=100), + mode="chat", + icon_type="emoji", + icon="🤖", + icon_background="#FF6B6B", + api_rph=100, + api_rpm=10, + ), account, ) # Create app that should NOT match app_no_match = app_service.create_app( tenant.id, - { - "name": "100% different", - "description": fake.text(max_nb_chars=100), - "mode": "chat", - "icon_type": "emoji", - "icon": "🤖", - "icon_background": "#FF6B6B", - "api_rph": 100, - "api_rpm": 10, - }, + CreateAppParams( + name="100% different", + description=fake.text(max_nb_chars=100), + mode="chat", + icon_type="emoji", + icon="🤖", + icon_background="#FF6B6B", + api_rph=100, + api_rpm=10, + ), account, ) # Test 1: Search with % character - args = {"name": "50%", "mode": "chat", "page": 1, "limit": 10} - paginated_apps = app_service.get_paginate_apps(account.id, tenant.id, args) + paginated_apps = app_service.get_paginate_apps( + account.id, tenant.id, AppListParams(name="50%", mode="chat", page=1, limit=10) + ) assert paginated_apps is not None assert paginated_apps.total == 1 assert len(paginated_apps.items) == 1 assert paginated_apps.items[0].name == "App with 50% discount" # Test 2: Search with _ character - args = {"name": "test_data", "mode": "chat", "page": 1, "limit": 10} - paginated_apps = app_service.get_paginate_apps(account.id, tenant.id, args) + paginated_apps = app_service.get_paginate_apps( + account.id, tenant.id, AppListParams(name="test_data", mode="chat", page=1, limit=10) + ) assert paginated_apps is not None assert paginated_apps.total == 1 assert len(paginated_apps.items) == 1 assert paginated_apps.items[0].name == "test_data_app" # Test 3: Search with \ character - args = {"name": "path\\to\\app", "mode": "chat", "page": 1, "limit": 10} - paginated_apps = app_service.get_paginate_apps(account.id, tenant.id, args) + paginated_apps = app_service.get_paginate_apps( + account.id, tenant.id, AppListParams(name="path\\to\\app", mode="chat", page=1, limit=10) + ) assert paginated_apps is not None assert paginated_apps.total == 1 assert len(paginated_apps.items) == 1 assert paginated_apps.items[0].name == "path\\to\\app" # Test 4: Search with % should NOT match 100% (verifies escaping works) - args = {"name": "50%", "mode": "chat", "page": 1, "limit": 10} - paginated_apps = app_service.get_paginate_apps(account.id, tenant.id, args) + paginated_apps = app_service.get_paginate_apps( + account.id, tenant.id, AppListParams(name="50%", mode="chat", page=1, limit=10) + ) assert paginated_apps is not None assert paginated_apps.total == 1 assert all("50%" in app.name for app in paginated_apps.items) diff --git a/api/tests/test_containers_integration_tests/services/test_attachment_service.py b/api/tests/test_containers_integration_tests/services/test_attachment_service.py index 768a8baee2..d0c07f0de8 100644 --- a/api/tests/test_containers_integration_tests/services/test_attachment_service.py +++ b/api/tests/test_containers_integration_tests/services/test_attachment_service.py @@ -7,7 +7,7 @@ from uuid import uuid4 import pytest from sqlalchemy import create_engine -from sqlalchemy.orm import sessionmaker +from sqlalchemy.orm import Session, sessionmaker from werkzeug.exceptions import NotFound import services.attachment_service as attachment_service_module @@ -19,7 +19,7 @@ from services.attachment_service import AttachmentService class TestAttachmentService: - def _create_upload_file(self, db_session_with_containers, *, tenant_id: str | None = None) -> UploadFile: + def _create_upload_file(self, db_session_with_containers: Session, *, tenant_id: str | None = None) -> UploadFile: upload_file = UploadFile( tenant_id=tenant_id or str(uuid4()), storage_type=StorageType.OPENDAL, @@ -60,7 +60,7 @@ class TestAttachmentService: with pytest.raises(AssertionError, match="must be a sessionmaker or an Engine."): AttachmentService(session_factory=invalid_session_factory) - def test_should_return_base64_when_file_exists(self, db_session_with_containers): + def test_should_return_base64_when_file_exists(self, db_session_with_containers: Session): upload_file = self._create_upload_file(db_session_with_containers) service = AttachmentService(session_factory=sessionmaker(bind=db.engine)) @@ -70,7 +70,7 @@ class TestAttachmentService: assert result == base64.b64encode(b"binary-content").decode() mock_load.assert_called_once_with(upload_file.key) - def test_should_raise_not_found_when_file_missing(self, db_session_with_containers): + def test_should_raise_not_found_when_file_missing(self, db_session_with_containers: Session): service = AttachmentService(session_factory=sessionmaker(bind=db.engine)) with patch.object(attachment_service_module.storage, "load_once") as mock_load: diff --git a/api/tests/test_containers_integration_tests/services/test_billing_service.py b/api/tests/test_containers_integration_tests/services/test_billing_service.py index 8092c7ad75..4893126d7f 100644 --- a/api/tests/test_containers_integration_tests/services/test_billing_service.py +++ b/api/tests/test_containers_integration_tests/services/test_billing_service.py @@ -4,6 +4,7 @@ from unittest.mock import patch from uuid import uuid4 import pytest +from flask import Flask from sqlalchemy.orm import Session from extensions.ext_redis import redis_client @@ -24,7 +25,7 @@ class TestBillingServiceGetPlanBulkWithCache: """ @pytest.fixture(autouse=True) - def setup_redis_cleanup(self, flask_app_with_containers): + def setup_redis_cleanup(self, flask_app_with_containers: Flask): """Clean up Redis cache before and after each test.""" with flask_app_with_containers.app_context(): # Clean up before test @@ -56,7 +57,7 @@ class TestBillingServiceGetPlanBulkWithCache: return value return None - def test_get_plan_bulk_with_cache_all_cache_hit(self, flask_app_with_containers): + def test_get_plan_bulk_with_cache_all_cache_hit(self, flask_app_with_containers: Flask): """Test bulk plan retrieval when all tenants are in cache.""" with flask_app_with_containers.app_context(): # Arrange @@ -87,7 +88,7 @@ class TestBillingServiceGetPlanBulkWithCache: # Verify API was not called mock_get_plan_bulk.assert_not_called() - def test_get_plan_bulk_with_cache_all_cache_miss(self, flask_app_with_containers): + def test_get_plan_bulk_with_cache_all_cache_miss(self, flask_app_with_containers: Flask): """Test bulk plan retrieval when all tenants are not in cache.""" with flask_app_with_containers.app_context(): # Arrange @@ -127,7 +128,7 @@ class TestBillingServiceGetPlanBulkWithCache: assert ttl_1 > 0 assert ttl_1 <= 600 # Should be <= 600 seconds - def test_get_plan_bulk_with_cache_partial_cache_hit(self, flask_app_with_containers): + def test_get_plan_bulk_with_cache_partial_cache_hit(self, flask_app_with_containers: Flask): """Test bulk plan retrieval when some tenants are in cache, some are not.""" with flask_app_with_containers.app_context(): # Arrange @@ -158,7 +159,7 @@ class TestBillingServiceGetPlanBulkWithCache: cached_data_3 = json.loads(cached_3) assert cached_data_3 == missing_plan["tenant-3"] - def test_get_plan_bulk_with_cache_redis_mget_failure(self, flask_app_with_containers): + def test_get_plan_bulk_with_cache_redis_mget_failure(self, flask_app_with_containers: Flask): """Test fallback to API when Redis mget fails.""" with flask_app_with_containers.app_context(): # Arrange @@ -189,7 +190,7 @@ class TestBillingServiceGetPlanBulkWithCache: assert cached_1 is not None assert cached_2 is not None - def test_get_plan_bulk_with_cache_invalid_json_in_cache(self, flask_app_with_containers): + def test_get_plan_bulk_with_cache_invalid_json_in_cache(self, flask_app_with_containers: Flask): """Test fallback to API when cache contains invalid JSON.""" with flask_app_with_containers.app_context(): # Arrange @@ -241,7 +242,7 @@ class TestBillingServiceGetPlanBulkWithCache: cached_data_3 = json.loads(cached_3) assert cached_data_3 == expected_plans["tenant-3"] - def test_get_plan_bulk_with_cache_invalid_plan_data_in_cache(self, flask_app_with_containers): + def test_get_plan_bulk_with_cache_invalid_plan_data_in_cache(self, flask_app_with_containers: Flask): """Test fallback to API when cache data doesn't match SubscriptionPlan schema.""" with flask_app_with_containers.app_context(): # Arrange @@ -274,7 +275,7 @@ class TestBillingServiceGetPlanBulkWithCache: # Verify API was called for tenant-2 and tenant-3 mock_get_plan_bulk.assert_called_once_with(["tenant-2", "tenant-3"]) - def test_get_plan_bulk_with_cache_redis_pipeline_failure(self, flask_app_with_containers): + def test_get_plan_bulk_with_cache_redis_pipeline_failure(self, flask_app_with_containers: Flask): """Test that pipeline failure doesn't affect return value.""" with flask_app_with_containers.app_context(): # Arrange @@ -303,7 +304,7 @@ class TestBillingServiceGetPlanBulkWithCache: # Verify pipeline was attempted mock_pipeline.assert_called_once() - def test_get_plan_bulk_with_cache_empty_tenant_ids(self, flask_app_with_containers): + def test_get_plan_bulk_with_cache_empty_tenant_ids(self, flask_app_with_containers: Flask): """Test with empty tenant_ids list.""" with flask_app_with_containers.app_context(): # Act @@ -321,7 +322,7 @@ class TestBillingServiceGetPlanBulkWithCache: # But we should check that mget was not called at all # Since we can't easily verify this without more mocking, we just verify the result - def test_get_plan_bulk_with_cache_ttl_expired(self, flask_app_with_containers): + def test_get_plan_bulk_with_cache_ttl_expired(self, flask_app_with_containers: Flask): """Test that expired cache keys are treated as cache misses.""" with flask_app_with_containers.app_context(): # Arrange diff --git a/api/tests/test_containers_integration_tests/services/test_conversation_service.py b/api/tests/test_containers_integration_tests/services/test_conversation_service.py index 98c38f2b5f..5f3914eb19 100644 --- a/api/tests/test_containers_integration_tests/services/test_conversation_service.py +++ b/api/tests/test_containers_integration_tests/services/test_conversation_service.py @@ -7,8 +7,10 @@ from uuid import uuid4 import pytest from sqlalchemy import select +from sqlalchemy.orm import Session from core.app.entities.app_invoke_entities import InvokeFrom +from models import TenantAccountRole from models.account import Account, Tenant, TenantAccountJoin from models.enums import ConversationFromSource from models.model import App, Conversation, EndUser, Message, MessageAnnotation @@ -21,7 +23,7 @@ from services.message_service import MessageService class ConversationServiceIntegrationTestDataFactory: @staticmethod - def create_app_and_account(db_session_with_containers): + def create_app_and_account(db_session_with_containers: Session): tenant = Tenant(name=f"Tenant {uuid4()}") db_session_with_containers.add(tenant) db_session_with_containers.flush() @@ -40,7 +42,7 @@ class ConversationServiceIntegrationTestDataFactory: tenant_join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role="owner", + role=TenantAccountRole.OWNER, current=True, ) db_session_with_containers.add(tenant_join) @@ -154,7 +156,7 @@ class ConversationServiceIntegrationTestDataFactory: total_price=Decimal(0), currency="USD", status="normal", - invoke_from=InvokeFrom.WEB_APP.value, + invoke_from=InvokeFrom.WEB_APP, from_source=ConversationFromSource.API if isinstance(user, EndUser) else ConversationFromSource.CONSOLE, from_end_user_id=user.id if isinstance(user, EndUser) else None, from_account_id=user.id if isinstance(user, Account) else None, @@ -170,7 +172,7 @@ class ConversationServiceIntegrationTestDataFactory: class TestConversationServicePagination: """Test conversation pagination operations.""" - def test_pagination_with_non_empty_include_ids(self, db_session_with_containers): + def test_pagination_with_non_empty_include_ids(self, db_session_with_containers: Session): """ Test that non-empty include_ids filters properly. @@ -204,7 +206,7 @@ class TestConversationServicePagination: returned_ids = {conversation.id for conversation in result.data} assert returned_ids == {conversations[0].id, conversations[1].id} - def test_pagination_with_empty_exclude_ids(self, db_session_with_containers): + def test_pagination_with_empty_exclude_ids(self, db_session_with_containers: Session): """ Test that empty exclude_ids doesn't filter. @@ -237,7 +239,7 @@ class TestConversationServicePagination: # Assert assert len(result.data) == len(conversations) - def test_pagination_with_non_empty_exclude_ids(self, db_session_with_containers): + def test_pagination_with_non_empty_exclude_ids(self, db_session_with_containers: Session): """ Test that non-empty exclude_ids filters properly. @@ -271,7 +273,7 @@ class TestConversationServicePagination: returned_ids = {conversation.id for conversation in result.data} assert returned_ids == {conversations[2].id} - def test_pagination_with_sorting_descending(self, db_session_with_containers): + def test_pagination_with_sorting_descending(self, db_session_with_containers: Session): """ Test pagination with descending sort order. @@ -316,7 +318,7 @@ class TestConversationServiceMessageCreation: within conversations. """ - def test_pagination_by_first_id_without_first_id(self, db_session_with_containers): + def test_pagination_by_first_id_without_first_id(self, db_session_with_containers: Session): """ Test message pagination without specifying first_id. @@ -354,7 +356,7 @@ class TestConversationServiceMessageCreation: assert len(result.data) == 3 # All 3 messages returned assert result.has_more is False # No more messages available (3 < limit of 10) - def test_pagination_by_first_id_with_first_id(self, db_session_with_containers): + def test_pagination_by_first_id_with_first_id(self, db_session_with_containers: Session): """ Test message pagination with first_id specified. @@ -399,7 +401,9 @@ class TestConversationServiceMessageCreation: assert len(result.data) == 2 # Only 2 messages returned after first_id assert result.has_more is False # No more messages available (2 < limit of 10) - def test_pagination_by_first_id_raises_error_when_first_message_not_found(self, db_session_with_containers): + def test_pagination_by_first_id_raises_error_when_first_message_not_found( + self, db_session_with_containers: Session + ): """ Test that FirstMessageNotExistsError is raised when first_id doesn't exist. @@ -424,7 +428,7 @@ class TestConversationServiceMessageCreation: limit=10, ) - def test_pagination_with_has_more_flag(self, db_session_with_containers): + def test_pagination_with_has_more_flag(self, db_session_with_containers: Session): """ Test that has_more flag is correctly set when there are more messages. @@ -463,7 +467,7 @@ class TestConversationServiceMessageCreation: assert len(result.data) == limit # Extra message should be removed assert result.has_more is True # Flag should be set - def test_pagination_with_ascending_order(self, db_session_with_containers): + def test_pagination_with_ascending_order(self, db_session_with_containers: Session): """ Test message pagination with ascending order. @@ -512,7 +516,7 @@ class TestConversationServiceSummarization: """ @patch("services.conversation_service.LLMGenerator.generate_conversation_name") - def test_auto_generate_name_success(self, mock_llm_generator, db_session_with_containers): + def test_auto_generate_name_success(self, mock_llm_generator, db_session_with_containers: Session): """ Test successful auto-generation of conversation name. @@ -552,7 +556,7 @@ class TestConversationServiceSummarization: app_model.tenant_id, first_message.query, conversation.id, app_model.id ) - def test_auto_generate_name_raises_error_when_no_message(self, db_session_with_containers): + def test_auto_generate_name_raises_error_when_no_message(self, db_session_with_containers: Session): """ Test that MessageNotExistsError is raised when conversation has no messages. @@ -571,7 +575,9 @@ class TestConversationServiceSummarization: ConversationService.auto_generate_name(app_model, conversation) @patch("services.conversation_service.LLMGenerator.generate_conversation_name") - def test_auto_generate_name_handles_llm_failure_gracefully(self, mock_llm_generator, db_session_with_containers): + def test_auto_generate_name_handles_llm_failure_gracefully( + self, mock_llm_generator, db_session_with_containers: Session + ): """ Test that LLM generation failures are suppressed and don't crash. @@ -604,7 +610,7 @@ class TestConversationServiceSummarization: assert conversation.name == original_name # Name remains unchanged @patch("services.conversation_service.naive_utc_now") - def test_rename_with_manual_name(self, mock_naive_utc_now, db_session_with_containers): + def test_rename_with_manual_name(self, mock_naive_utc_now, db_session_with_containers: Session): """ Test renaming conversation with manual name. @@ -638,7 +644,7 @@ class TestConversationServiceSummarization: assert conversation.updated_at == mock_time @patch("services.conversation_service.LLMGenerator.generate_conversation_name") - def test_rename_with_auto_generate(self, mock_llm_generator, db_session_with_containers): + def test_rename_with_auto_generate(self, mock_llm_generator, db_session_with_containers: Session): """ Test rename delegates to auto_generate_name when auto_generate is True. @@ -682,7 +688,9 @@ class TestConversationServiceMessageAnnotation: @patch("services.annotation_service.add_annotation_to_index_task") @patch("services.annotation_service.current_account_with_tenant") - def test_create_annotation_from_message(self, mock_current_account, mock_add_task, db_session_with_containers): + def test_create_annotation_from_message( + self, mock_current_account, mock_add_task, db_session_with_containers: Session + ): """ Test creating annotation from existing message. @@ -721,7 +729,9 @@ class TestConversationServiceMessageAnnotation: @patch("services.annotation_service.add_annotation_to_index_task") @patch("services.annotation_service.current_account_with_tenant") - def test_create_annotation_without_message(self, mock_current_account, mock_add_task, db_session_with_containers): + def test_create_annotation_without_message( + self, mock_current_account, mock_add_task, db_session_with_containers: Session + ): """ Test creating standalone annotation without message. @@ -753,7 +763,7 @@ class TestConversationServiceMessageAnnotation: @patch("services.annotation_service.add_annotation_to_index_task") @patch("services.annotation_service.current_account_with_tenant") - def test_update_existing_annotation(self, mock_current_account, mock_add_task, db_session_with_containers): + def test_update_existing_annotation(self, mock_current_account, mock_add_task, db_session_with_containers: Session): """ Test updating an existing annotation. @@ -800,7 +810,7 @@ class TestConversationServiceMessageAnnotation: mock_add_task.delay.assert_not_called() @patch("services.annotation_service.current_account_with_tenant") - def test_get_annotation_list(self, mock_current_account, db_session_with_containers): + def test_get_annotation_list(self, mock_current_account, db_session_with_containers: Session): """ Test retrieving paginated annotation list. @@ -836,7 +846,7 @@ class TestConversationServiceMessageAnnotation: assert result_total == 5 @patch("services.annotation_service.current_account_with_tenant") - def test_get_annotation_list_with_keyword_search(self, mock_current_account, db_session_with_containers): + def test_get_annotation_list_with_keyword_search(self, mock_current_account, db_session_with_containers: Session): """ Test retrieving annotations with keyword filtering. @@ -885,7 +895,7 @@ class TestConversationServiceMessageAnnotation: @patch("services.annotation_service.add_annotation_to_index_task") @patch("services.annotation_service.current_account_with_tenant") - def test_insert_annotation_directly(self, mock_current_account, mock_add_task, db_session_with_containers): + def test_insert_annotation_directly(self, mock_current_account, mock_add_task, db_session_with_containers: Session): """ Test direct annotation insertion without message reference. @@ -919,7 +929,7 @@ class TestConversationServiceExport: Tests retrieving conversation data for export purposes. """ - def test_get_conversation_success(self, db_session_with_containers): + def test_get_conversation_success(self, db_session_with_containers: Session): """Test successful retrieval of conversation.""" # Arrange app_model, user = ConversationServiceIntegrationTestDataFactory.create_app_and_account( @@ -937,7 +947,7 @@ class TestConversationServiceExport: # Assert assert result == conversation - def test_get_conversation_not_found(self, db_session_with_containers): + def test_get_conversation_not_found(self, db_session_with_containers: Session): """Test ConversationNotExistsError when conversation doesn't exist.""" # Arrange app_model, user = ConversationServiceIntegrationTestDataFactory.create_app_and_account( @@ -949,7 +959,7 @@ class TestConversationServiceExport: ConversationService.get_conversation(app_model=app_model, conversation_id=str(uuid4()), user=user) @patch("services.annotation_service.current_account_with_tenant") - def test_export_annotation_list(self, mock_current_account, db_session_with_containers): + def test_export_annotation_list(self, mock_current_account, db_session_with_containers: Session): """Test exporting all annotations for an app.""" # Arrange app_model, account = ConversationServiceIntegrationTestDataFactory.create_app_and_account( @@ -977,7 +987,7 @@ class TestConversationServiceExport: # Assert assert len(result) == 10 - def test_get_message_success(self, db_session_with_containers): + def test_get_message_success(self, db_session_with_containers: Session): """Test successful retrieval of a message.""" # Arrange app_model, user = ConversationServiceIntegrationTestDataFactory.create_app_and_account( @@ -1001,7 +1011,7 @@ class TestConversationServiceExport: # Assert assert result == message - def test_get_message_not_found(self, db_session_with_containers): + def test_get_message_not_found(self, db_session_with_containers: Session): """Test MessageNotExistsError when message doesn't exist.""" # Arrange app_model, user = ConversationServiceIntegrationTestDataFactory.create_app_and_account( @@ -1012,7 +1022,7 @@ class TestConversationServiceExport: with pytest.raises(MessageNotExistsError): MessageService.get_message(app_model=app_model, user=user, message_id=str(uuid4())) - def test_get_conversation_for_end_user(self, db_session_with_containers): + def test_get_conversation_for_end_user(self, db_session_with_containers: Session): """ Test retrieving conversation created by end user via API. @@ -1038,7 +1048,7 @@ class TestConversationServiceExport: assert result == conversation @patch("services.conversation_service.delete_conversation_related_data") - def test_delete_conversation(self, mock_delete_task, db_session_with_containers): + def test_delete_conversation(self, mock_delete_task, db_session_with_containers: Session): """ Test conversation deletion with async cleanup. @@ -1071,7 +1081,7 @@ class TestConversationServiceExport: mock_delete_task.delay.assert_called_once_with(conversation_id) @patch("services.conversation_service.delete_conversation_related_data") - def test_delete_conversation_not_owned_by_account(self, mock_delete_task, db_session_with_containers): + def test_delete_conversation_not_owned_by_account(self, mock_delete_task, db_session_with_containers: Session): """ Test deletion is denied when conversation belongs to a different account. """ @@ -1102,7 +1112,7 @@ class TestConversationServiceExport: mock_delete_task.delay.assert_not_called() @patch("services.conversation_service.delete_conversation_related_data") - def test_delete_handles_exception_and_rollback(self, mock_delete_task, db_session_with_containers): + def test_delete_handles_exception_and_rollback(self, mock_delete_task, db_session_with_containers: Session): """ Test that delete propagates exceptions and does not trigger the cleanup task. diff --git a/api/tests/test_containers_integration_tests/services/test_conversation_service_variables.py b/api/tests/test_containers_integration_tests/services/test_conversation_service_variables.py index 0b7bd9ca64..853630ad65 100644 --- a/api/tests/test_containers_integration_tests/services/test_conversation_service_variables.py +++ b/api/tests/test_containers_integration_tests/services/test_conversation_service_variables.py @@ -5,7 +5,8 @@ from unittest.mock import patch from uuid import uuid4 import pytest -from sqlalchemy.orm import sessionmaker +from flask import Flask +from sqlalchemy.orm import Session, sessionmaker from core.app.entities.app_invoke_entities import InvokeFrom from extensions.ext_database import db @@ -24,7 +25,7 @@ from services.errors.conversation import ( class ConversationServiceVariableIntegrationFactory: @staticmethod - def create_app_and_account(db_session_with_containers): + def create_app_and_account(db_session_with_containers: Session): tenant = Tenant(name=f"Tenant {uuid4()}") db_session_with_containers.add(tenant) db_session_with_containers.flush() @@ -149,7 +150,7 @@ class ConversationServiceVariableIntegrationFactory: @pytest.fixture -def real_conversation_service_session_factory(flask_app_with_containers): +def real_conversation_service_session_factory(flask_app_with_containers: Flask): del flask_app_with_containers real_session_maker = sessionmaker(bind=db.engine, expire_on_commit=False) @@ -162,7 +163,7 @@ def real_conversation_service_session_factory(flask_app_with_containers): class TestConversationServiceVariables: def test_get_conversational_variable_success( - self, db_session_with_containers, real_conversation_service_session_factory + self, db_session_with_containers: Session, real_conversation_service_session_factory ): del real_conversation_service_session_factory factory = ConversationServiceVariableIntegrationFactory @@ -200,7 +201,7 @@ class TestConversationServiceVariables: assert result.has_more is False def test_get_conversational_variable_with_last_id( - self, db_session_with_containers, real_conversation_service_session_factory + self, db_session_with_containers: Session, real_conversation_service_session_factory ): del real_conversation_service_session_factory factory = ConversationServiceVariableIntegrationFactory @@ -242,7 +243,7 @@ class TestConversationServiceVariables: assert result.has_more is False def test_get_conversational_variable_last_id_not_found_raises_error( - self, db_session_with_containers, real_conversation_service_session_factory + self, db_session_with_containers: Session, real_conversation_service_session_factory ): del real_conversation_service_session_factory factory = ConversationServiceVariableIntegrationFactory @@ -259,7 +260,7 @@ class TestConversationServiceVariables: ) def test_get_conversational_variable_sets_has_more( - self, db_session_with_containers, real_conversation_service_session_factory + self, db_session_with_containers: Session, real_conversation_service_session_factory ): del real_conversation_service_session_factory factory = ConversationServiceVariableIntegrationFactory @@ -287,7 +288,7 @@ class TestConversationServiceVariables: assert result.has_more is True def test_update_conversation_variable_success( - self, db_session_with_containers, real_conversation_service_session_factory + self, db_session_with_containers: Session, real_conversation_service_session_factory ): del real_conversation_service_session_factory factory = ConversationServiceVariableIntegrationFactory @@ -320,7 +321,7 @@ class TestConversationServiceVariables: assert result["updated_at"] == updated_at def test_update_conversation_variable_not_found_raises_error( - self, db_session_with_containers, real_conversation_service_session_factory + self, db_session_with_containers: Session, real_conversation_service_session_factory ): del real_conversation_service_session_factory factory = ConversationServiceVariableIntegrationFactory @@ -337,7 +338,7 @@ class TestConversationServiceVariables: ) def test_update_conversation_variable_type_mismatch_raises_error( - self, db_session_with_containers, real_conversation_service_session_factory + self, db_session_with_containers: Session, real_conversation_service_session_factory ): del real_conversation_service_session_factory factory = ConversationServiceVariableIntegrationFactory @@ -360,7 +361,7 @@ class TestConversationServiceVariables: ) def test_update_conversation_variable_integer_number_compatibility( - self, db_session_with_containers, real_conversation_service_session_factory + self, db_session_with_containers: Session, real_conversation_service_session_factory ): del real_conversation_service_session_factory factory = ConversationServiceVariableIntegrationFactory @@ -390,7 +391,7 @@ class TestConversationServiceVariables: class TestConversationServicePaginationWithContainers: - def test_pagination_by_last_id_raises_error_when_last_id_missing(self, db_session_with_containers): + def test_pagination_by_last_id_raises_error_when_last_id_missing(self, db_session_with_containers: Session): factory = ConversationServiceVariableIntegrationFactory app, account = factory.create_app_and_account(db_session_with_containers) @@ -404,7 +405,7 @@ class TestConversationServicePaginationWithContainers: invoke_from=InvokeFrom.WEB_APP, ) - def test_pagination_by_last_id_with_default_desc_updated_at(self, db_session_with_containers): + def test_pagination_by_last_id_with_default_desc_updated_at(self, db_session_with_containers: Session): factory = ConversationServiceVariableIntegrationFactory app, account = factory.create_app_and_account(db_session_with_containers) base_time = datetime(2024, 1, 1, 8, 0, 0) @@ -442,7 +443,7 @@ class TestConversationServicePaginationWithContainers: assert newest.id != middle.id assert [conversation.id for conversation in result.data] == [oldest.id] - def test_pagination_by_last_id_with_name_sort(self, db_session_with_containers): + def test_pagination_by_last_id_with_name_sort(self, db_session_with_containers: Session): factory = ConversationServiceVariableIntegrationFactory app, account = factory.create_app_and_account(db_session_with_containers) alpha = factory.create_conversation(db_session_with_containers, app, account, name="Alpha") @@ -462,7 +463,7 @@ class TestConversationServicePaginationWithContainers: assert alpha.id != beta.id assert [conversation.id for conversation in result.data] == [gamma.id] - def test_pagination_filters_to_end_user_api_source(self, db_session_with_containers): + def test_pagination_filters_to_end_user_api_source(self, db_session_with_containers: Session): factory = ConversationServiceVariableIntegrationFactory app, account = factory.create_app_and_account(db_session_with_containers) end_user = factory.create_end_user(db_session_with_containers, app) @@ -493,7 +494,7 @@ class TestConversationServicePaginationWithContainers: assert account_conversation.id != end_user_conversation.id assert [conversation.id for conversation in result.data] == [end_user_conversation.id] - def test_pagination_filters_to_account_console_source(self, db_session_with_containers): + def test_pagination_filters_to_account_console_source(self, db_session_with_containers: Session): factory = ConversationServiceVariableIntegrationFactory app, account = factory.create_app_and_account(db_session_with_containers) end_user = factory.create_end_user(db_session_with_containers, app) diff --git a/api/tests/test_containers_integration_tests/services/test_conversation_variable_updater.py b/api/tests/test_containers_integration_tests/services/test_conversation_variable_updater.py index 02ab3f8314..638a962f18 100644 --- a/api/tests/test_containers_integration_tests/services/test_conversation_variable_updater.py +++ b/api/tests/test_containers_integration_tests/services/test_conversation_variable_updater.py @@ -3,7 +3,7 @@ from uuid import uuid4 import pytest -from sqlalchemy.orm import sessionmaker +from sqlalchemy.orm import Session, sessionmaker from extensions.ext_database import db from graphon.variables import StringVariable @@ -13,7 +13,12 @@ from services.conversation_variable_updater import ConversationVariableNotFoundE class TestConversationVariableUpdater: def _create_conversation_variable( - self, db_session_with_containers, *, conversation_id: str, variable: StringVariable, app_id: str | None = None + self, + db_session_with_containers: Session, + *, + conversation_id: str, + variable: StringVariable, + app_id: str | None = None, ) -> ConversationVariable: row = ConversationVariable( id=variable.id, @@ -25,7 +30,7 @@ class TestConversationVariableUpdater: db_session_with_containers.commit() return row - def test_should_update_conversation_variable_data_and_commit(self, db_session_with_containers): + def test_should_update_conversation_variable_data_and_commit(self, db_session_with_containers: Session): conversation_id = str(uuid4()) variable = StringVariable(id=str(uuid4()), name="topic", value="old value") self._create_conversation_variable( @@ -42,7 +47,7 @@ class TestConversationVariableUpdater: assert row is not None assert row.data == updated_variable.model_dump_json() - def test_should_raise_not_found_when_variable_missing(self, db_session_with_containers): + def test_should_raise_not_found_when_variable_missing(self, db_session_with_containers: Session): conversation_id = str(uuid4()) variable = StringVariable(id=str(uuid4()), name="topic", value="value") updater = ConversationVariableUpdater(sessionmaker(bind=db.engine)) @@ -50,7 +55,7 @@ class TestConversationVariableUpdater: with pytest.raises(ConversationVariableNotFoundError, match="conversation variable not found in the database"): updater.update(conversation_id=conversation_id, variable=variable) - def test_should_do_nothing_when_flush_is_called(self, db_session_with_containers): + def test_should_do_nothing_when_flush_is_called(self, db_session_with_containers: Session): updater = ConversationVariableUpdater(sessionmaker(bind=db.engine)) result = updater.flush() diff --git a/api/tests/test_containers_integration_tests/services/test_credit_pool_service.py b/api/tests/test_containers_integration_tests/services/test_credit_pool_service.py index 0f63d98642..07dc3a4e9e 100644 --- a/api/tests/test_containers_integration_tests/services/test_credit_pool_service.py +++ b/api/tests/test_containers_integration_tests/services/test_credit_pool_service.py @@ -3,6 +3,7 @@ from uuid import uuid4 import pytest +from sqlalchemy.orm import Session from core.errors.error import QuotaExceededError from models import TenantCreditPool @@ -14,7 +15,7 @@ class TestCreditPoolService: def _create_tenant_id(self) -> str: return str(uuid4()) - def test_create_default_pool(self, db_session_with_containers): + def test_create_default_pool(self, db_session_with_containers: Session): tenant_id = self._create_tenant_id() pool = CreditPoolService.create_default_pool(tenant_id) @@ -25,7 +26,7 @@ class TestCreditPoolService: assert pool.quota_used == 0 assert pool.quota_limit > 0 - def test_get_pool_returns_pool_when_exists(self, db_session_with_containers): + def test_get_pool_returns_pool_when_exists(self, db_session_with_containers: Session): tenant_id = self._create_tenant_id() CreditPoolService.create_default_pool(tenant_id) @@ -35,17 +36,17 @@ class TestCreditPoolService: assert result.tenant_id == tenant_id assert result.pool_type == ProviderQuotaType.TRIAL - def test_get_pool_returns_none_when_not_exists(self, db_session_with_containers): + def test_get_pool_returns_none_when_not_exists(self, db_session_with_containers: Session): result = CreditPoolService.get_pool(tenant_id=self._create_tenant_id(), pool_type=ProviderQuotaType.TRIAL) assert result is None - def test_check_credits_available_returns_false_when_no_pool(self, db_session_with_containers): + def test_check_credits_available_returns_false_when_no_pool(self, db_session_with_containers: Session): result = CreditPoolService.check_credits_available(tenant_id=self._create_tenant_id(), credits_required=10) assert result is False - def test_check_credits_available_returns_true_when_sufficient(self, db_session_with_containers): + def test_check_credits_available_returns_true_when_sufficient(self, db_session_with_containers: Session): tenant_id = self._create_tenant_id() CreditPoolService.create_default_pool(tenant_id) @@ -53,7 +54,7 @@ class TestCreditPoolService: assert result is True - def test_check_credits_available_returns_false_when_insufficient(self, db_session_with_containers): + def test_check_credits_available_returns_false_when_insufficient(self, db_session_with_containers: Session): tenant_id = self._create_tenant_id() pool = CreditPoolService.create_default_pool(tenant_id) # Exhaust credits @@ -64,11 +65,11 @@ class TestCreditPoolService: assert result is False - def test_check_and_deduct_credits_raises_when_no_pool(self, db_session_with_containers): + def test_check_and_deduct_credits_raises_when_no_pool(self, db_session_with_containers: Session): with pytest.raises(QuotaExceededError, match="Credit pool not found"): CreditPoolService.check_and_deduct_credits(tenant_id=self._create_tenant_id(), credits_required=10) - def test_check_and_deduct_credits_raises_when_no_remaining(self, db_session_with_containers): + def test_check_and_deduct_credits_raises_when_no_remaining(self, db_session_with_containers: Session): tenant_id = self._create_tenant_id() pool = CreditPoolService.create_default_pool(tenant_id) pool.quota_used = pool.quota_limit @@ -77,7 +78,7 @@ class TestCreditPoolService: with pytest.raises(QuotaExceededError, match="No credits remaining"): CreditPoolService.check_and_deduct_credits(tenant_id=tenant_id, credits_required=10) - def test_check_and_deduct_credits_deducts_required_amount(self, db_session_with_containers): + def test_check_and_deduct_credits_deducts_required_amount(self, db_session_with_containers: Session): tenant_id = self._create_tenant_id() CreditPoolService.create_default_pool(tenant_id) credits_required = 10 @@ -89,16 +90,34 @@ class TestCreditPoolService: pool = CreditPoolService.get_pool(tenant_id=tenant_id) assert pool.quota_used == credits_required - def test_check_and_deduct_credits_caps_at_remaining(self, db_session_with_containers): + def test_check_and_deduct_credits_raises_without_deducting_when_insufficient( + self, db_session_with_containers: Session + ): tenant_id = self._create_tenant_id() pool = CreditPoolService.create_default_pool(tenant_id) remaining = 5 pool.quota_used = pool.quota_limit - remaining + quota_used = pool.quota_used db_session_with_containers.commit() - result = CreditPoolService.check_and_deduct_credits(tenant_id=tenant_id, credits_required=200) + with pytest.raises(QuotaExceededError, match="Insufficient credits remaining"): + CreditPoolService.check_and_deduct_credits(tenant_id=tenant_id, credits_required=200) + + db_session_with_containers.expire_all() + updated_pool = CreditPoolService.get_pool(tenant_id=tenant_id) + assert updated_pool.quota_used == quota_used + + def test_deduct_credits_capped_depletes_available_balance(self, db_session_with_containers: Session): + tenant_id = self._create_tenant_id() + pool = CreditPoolService.create_default_pool(tenant_id) + remaining = 5 + pool.quota_used = pool.quota_limit - remaining + quota_limit = pool.quota_limit + db_session_with_containers.commit() + + result = CreditPoolService.deduct_credits_capped(tenant_id=tenant_id, credits_required=200) assert result == remaining db_session_with_containers.expire_all() updated_pool = CreditPoolService.get_pool(tenant_id=tenant_id) - assert updated_pool.quota_used == pool.quota_limit + assert updated_pool.quota_used == quota_limit diff --git a/api/tests/test_containers_integration_tests/services/test_dataset_permission_service.py b/api/tests/test_containers_integration_tests/services/test_dataset_permission_service.py index 71c8874f79..f9898e2cfa 100644 --- a/api/tests/test_containers_integration_tests/services/test_dataset_permission_service.py +++ b/api/tests/test_containers_integration_tests/services/test_dataset_permission_service.py @@ -8,6 +8,7 @@ checks with testcontainers-backed infrastructure instead of database-chain mocks from uuid import uuid4 import pytest +from sqlalchemy.orm import Session from core.rag.index_processor.constant.index_type import IndexTechniqueType from extensions.ext_database import db @@ -107,7 +108,7 @@ class DatasetPermissionTestDataFactory: class TestDatasetPermissionServiceGetPartialMemberList: """Verify partial-member list reads against persisted DatasetPermission rows.""" - def test_get_dataset_partial_member_list_with_members(self, db_session_with_containers): + def test_get_dataset_partial_member_list_with_members(self, db_session_with_containers: Session): """ Test retrieving partial member list with multiple members. """ @@ -138,7 +139,7 @@ class TestDatasetPermissionServiceGetPartialMemberList: assert set(result) == set(expected_account_ids) assert len(result) == 3 - def test_get_dataset_partial_member_list_with_single_member(self, db_session_with_containers): + def test_get_dataset_partial_member_list_with_single_member(self, db_session_with_containers: Session): """ Test retrieving partial member list with single member. """ @@ -160,7 +161,7 @@ class TestDatasetPermissionServiceGetPartialMemberList: assert set(result) == set(expected_account_ids) assert len(result) == 1 - def test_get_dataset_partial_member_list_empty(self, db_session_with_containers): + def test_get_dataset_partial_member_list_empty(self, db_session_with_containers: Session): """ Test retrieving partial member list when no members exist. """ @@ -179,7 +180,7 @@ class TestDatasetPermissionServiceGetPartialMemberList: class TestDatasetPermissionServiceUpdatePartialMemberList: """Verify partial-member list updates against persisted DatasetPermission rows.""" - def test_update_partial_member_list_add_new_members(self, db_session_with_containers): + def test_update_partial_member_list_add_new_members(self, db_session_with_containers: Session): """ Test adding new partial members to a dataset. """ @@ -203,7 +204,7 @@ class TestDatasetPermissionServiceUpdatePartialMemberList: result = DatasetPermissionService.get_dataset_partial_member_list(dataset.id) assert set(result) == {member_1.id, member_2.id} - def test_update_partial_member_list_replace_existing(self, db_session_with_containers): + def test_update_partial_member_list_replace_existing(self, db_session_with_containers: Session): """ Test replacing existing partial members with new ones. """ @@ -239,7 +240,7 @@ class TestDatasetPermissionServiceUpdatePartialMemberList: result = DatasetPermissionService.get_dataset_partial_member_list(dataset.id) assert set(result) == {new_member_1.id, new_member_2.id} - def test_update_partial_member_list_empty_list(self, db_session_with_containers): + def test_update_partial_member_list_empty_list(self, db_session_with_containers: Session): """ Test updating with empty member list (clearing all members). """ @@ -264,7 +265,7 @@ class TestDatasetPermissionServiceUpdatePartialMemberList: result = DatasetPermissionService.get_dataset_partial_member_list(dataset.id) assert result == [] - def test_update_partial_member_list_database_error_rollback(self, db_session_with_containers): + def test_update_partial_member_list_database_error_rollback(self, db_session_with_containers: Session): """ Test error handling and rollback on database error. """ @@ -313,7 +314,7 @@ class TestDatasetPermissionServiceUpdatePartialMemberList: class TestDatasetPermissionServiceClearPartialMemberList: """Verify partial-member clearing against persisted DatasetPermission rows.""" - def test_clear_partial_member_list_success(self, db_session_with_containers): + def test_clear_partial_member_list_success(self, db_session_with_containers: Session): """ Test successful clearing of partial member list. """ @@ -338,7 +339,7 @@ class TestDatasetPermissionServiceClearPartialMemberList: result = DatasetPermissionService.get_dataset_partial_member_list(dataset.id) assert result == [] - def test_clear_partial_member_list_empty_list(self, db_session_with_containers): + def test_clear_partial_member_list_empty_list(self, db_session_with_containers: Session): """ Test clearing partial member list when no members exist. """ @@ -353,7 +354,7 @@ class TestDatasetPermissionServiceClearPartialMemberList: result = DatasetPermissionService.get_dataset_partial_member_list(dataset.id) assert result == [] - def test_clear_partial_member_list_database_error_rollback(self, db_session_with_containers): + def test_clear_partial_member_list_database_error_rollback(self, db_session_with_containers: Session): """ Test error handling and rollback on database error. """ @@ -398,7 +399,7 @@ class TestDatasetPermissionServiceClearPartialMemberList: class TestDatasetServiceCheckDatasetPermission: """Verify dataset access checks against persisted partial-member permissions.""" - def test_check_dataset_permission_different_tenant_should_fail(self, db_session_with_containers): + def test_check_dataset_permission_different_tenant_should_fail(self, db_session_with_containers: Session): """Test that users from different tenants cannot access dataset.""" owner, tenant = DatasetPermissionTestDataFactory.create_account_with_tenant(role=TenantAccountRole.OWNER) other_user, _ = DatasetPermissionTestDataFactory.create_account_with_tenant(role=TenantAccountRole.EDITOR) @@ -410,7 +411,7 @@ class TestDatasetServiceCheckDatasetPermission: with pytest.raises(NoPermissionError): DatasetService.check_dataset_permission(dataset, other_user) - def test_check_dataset_permission_owner_can_access_any_dataset(self, db_session_with_containers): + def test_check_dataset_permission_owner_can_access_any_dataset(self, db_session_with_containers: Session): """Test that tenant owners can access any dataset regardless of permission level.""" owner, tenant = DatasetPermissionTestDataFactory.create_account_with_tenant(role=TenantAccountRole.OWNER) creator, _ = DatasetPermissionTestDataFactory.create_account_with_tenant( @@ -423,7 +424,7 @@ class TestDatasetServiceCheckDatasetPermission: DatasetService.check_dataset_permission(dataset, owner) - def test_check_dataset_permission_only_me_creator_can_access(self, db_session_with_containers): + def test_check_dataset_permission_only_me_creator_can_access(self, db_session_with_containers: Session): """Test ONLY_ME permission allows only the dataset creator to access.""" creator, tenant = DatasetPermissionTestDataFactory.create_account_with_tenant(role=TenantAccountRole.EDITOR) @@ -433,7 +434,7 @@ class TestDatasetServiceCheckDatasetPermission: DatasetService.check_dataset_permission(dataset, creator) - def test_check_dataset_permission_only_me_others_cannot_access(self, db_session_with_containers): + def test_check_dataset_permission_only_me_others_cannot_access(self, db_session_with_containers: Session): """Test ONLY_ME permission denies access to non-creators.""" creator, tenant = DatasetPermissionTestDataFactory.create_account_with_tenant(role=TenantAccountRole.NORMAL) other, _ = DatasetPermissionTestDataFactory.create_account_with_tenant( @@ -447,7 +448,7 @@ class TestDatasetServiceCheckDatasetPermission: with pytest.raises(NoPermissionError): DatasetService.check_dataset_permission(dataset, other) - def test_check_dataset_permission_all_team_allows_access(self, db_session_with_containers): + def test_check_dataset_permission_all_team_allows_access(self, db_session_with_containers: Session): """Test ALL_TEAM permission allows any team member to access the dataset.""" creator, tenant = DatasetPermissionTestDataFactory.create_account_with_tenant(role=TenantAccountRole.NORMAL) member, _ = DatasetPermissionTestDataFactory.create_account_with_tenant( @@ -460,7 +461,9 @@ class TestDatasetServiceCheckDatasetPermission: DatasetService.check_dataset_permission(dataset, member) - def test_check_dataset_permission_partial_members_with_permission_success(self, db_session_with_containers): + def test_check_dataset_permission_partial_members_with_permission_success( + self, db_session_with_containers: Session + ): """ Test that user with explicit permission can access partial_members dataset. """ @@ -485,7 +488,9 @@ class TestDatasetServiceCheckDatasetPermission: permissions = DatasetPermissionService.get_dataset_partial_member_list(dataset.id) assert user.id in permissions - def test_check_dataset_permission_partial_members_without_permission_error(self, db_session_with_containers): + def test_check_dataset_permission_partial_members_without_permission_error( + self, db_session_with_containers: Session + ): """ Test error when user without permission tries to access partial_members dataset. """ @@ -506,7 +511,7 @@ class TestDatasetServiceCheckDatasetPermission: with pytest.raises(NoPermissionError, match="You do not have permission to access this dataset"): DatasetService.check_dataset_permission(dataset, user) - def test_check_dataset_permission_partial_team_creator_can_access(self, db_session_with_containers): + def test_check_dataset_permission_partial_team_creator_can_access(self, db_session_with_containers: Session): """Test PARTIAL_TEAM permission allows creator to access without explicit permission.""" creator, tenant = DatasetPermissionTestDataFactory.create_account_with_tenant(role=TenantAccountRole.EDITOR) diff --git a/api/tests/test_containers_integration_tests/services/test_dataset_service.py b/api/tests/test_containers_integration_tests/services/test_dataset_service.py index 0de3c64c4f..e6ee896a52 100644 --- a/api/tests/test_containers_integration_tests/services/test_dataset_service.py +++ b/api/tests/test_containers_integration_tests/services/test_dataset_service.py @@ -712,7 +712,7 @@ class TestDatasetServiceRetrievalConfiguration: class TestDocumentServicePauseRecoverRetry: """Tests for pause/recover/retry orchestration using real DB and Redis.""" - def _create_indexing_document(self, db_session_with_containers, indexing_status="indexing"): + def _create_indexing_document(self, db_session_with_containers: Session, indexing_status="indexing"): factory = DatasetServiceIntegrationDataFactory account, tenant = factory.create_account_with_tenant(db_session_with_containers) dataset = factory.create_dataset(db_session_with_containers, tenant.id, account.id) @@ -721,7 +721,7 @@ class TestDocumentServicePauseRecoverRetry: db_session_with_containers.commit() return doc, account - def test_pause_document_success(self, db_session_with_containers): + def test_pause_document_success(self, db_session_with_containers: Session): from extensions.ext_redis import redis_client from services.dataset_service import DocumentService @@ -740,7 +740,7 @@ class TestDocumentServicePauseRecoverRetry: assert redis_client.get(cache_key) is not None redis_client.delete(cache_key) - def test_pause_document_invalid_status_error(self, db_session_with_containers): + def test_pause_document_invalid_status_error(self, db_session_with_containers: Session): from services.dataset_service import DocumentService from services.errors.document import DocumentIndexingError @@ -751,7 +751,7 @@ class TestDocumentServicePauseRecoverRetry: with pytest.raises(DocumentIndexingError): DocumentService.pause_document(doc) - def test_recover_document_success(self, db_session_with_containers): + def test_recover_document_success(self, db_session_with_containers: Session): from extensions.ext_redis import redis_client from services.dataset_service import DocumentService @@ -775,7 +775,7 @@ class TestDocumentServicePauseRecoverRetry: assert redis_client.get(cache_key) is None recover_task.delay.assert_called_once_with(doc.dataset_id, doc.id) - def test_retry_document_indexing_success(self, db_session_with_containers): + def test_retry_document_indexing_success(self, db_session_with_containers: Session): from extensions.ext_redis import redis_client from services.dataset_service import DocumentService diff --git a/api/tests/test_containers_integration_tests/services/test_dataset_service_create_dataset.py b/api/tests/test_containers_integration_tests/services/test_dataset_service_create_dataset.py index c486ff5613..08de79f4b7 100644 --- a/api/tests/test_containers_integration_tests/services/test_dataset_service_create_dataset.py +++ b/api/tests/test_containers_integration_tests/services/test_dataset_service_create_dataset.py @@ -6,6 +6,7 @@ from unittest.mock import Mock, patch from uuid import uuid4 import pytest +from sqlalchemy.orm import Session from models.account import Account, Tenant, TenantAccountJoin from services.dataset_service import DatasetService @@ -48,7 +49,7 @@ class TestDatasetServiceCreateRagPipelineDataset: permission="only_me", ) - def test_create_rag_pipeline_dataset_raises_when_current_user_id_is_none(self, db_session_with_containers): + def test_create_rag_pipeline_dataset_raises_when_current_user_id_is_none(self, db_session_with_containers: Session): tenant, _ = self._create_tenant_and_account(db_session_with_containers) mock_user = Mock(id=None) diff --git a/api/tests/test_containers_integration_tests/services/test_dataset_service_delete_dataset.py b/api/tests/test_containers_integration_tests/services/test_dataset_service_delete_dataset.py index 3cac964d89..c43a5d5978 100644 --- a/api/tests/test_containers_integration_tests/services/test_dataset_service_delete_dataset.py +++ b/api/tests/test_containers_integration_tests/services/test_dataset_service_delete_dataset.py @@ -3,6 +3,8 @@ from unittest.mock import patch from uuid import uuid4 +from sqlalchemy.orm import Session + from core.rag.index_processor.constant.index_type import IndexStructureType, IndexTechniqueType from models.account import Account, Tenant, TenantAccountJoin, TenantAccountRole from models.dataset import Dataset, Document @@ -101,7 +103,7 @@ class DatasetDeleteIntegrationDataFactory: class TestDatasetServiceDeleteDataset: """Integration coverage for DatasetService.delete_dataset using testcontainers.""" - def test_delete_dataset_with_documents_success(self, db_session_with_containers): + def test_delete_dataset_with_documents_success(self, db_session_with_containers: Session): """Delete a dataset with documents and dispatch cleanup through the real signal handler.""" # Arrange owner, tenant = DatasetDeleteIntegrationDataFactory.create_account_with_tenant(db_session_with_containers) @@ -144,7 +146,7 @@ class TestDatasetServiceDeleteDataset: dataset.pipeline_id, ) - def test_delete_empty_dataset_success(self, db_session_with_containers): + def test_delete_empty_dataset_success(self, db_session_with_containers: Session): """Delete an empty dataset without scheduling cleanup when both gating fields are absent.""" # Arrange owner, tenant = DatasetDeleteIntegrationDataFactory.create_account_with_tenant(db_session_with_containers) @@ -172,7 +174,7 @@ class TestDatasetServiceDeleteDataset: assert db_session_with_containers.get(Dataset, dataset.id) is None clean_dataset_delay.assert_not_called() - def test_delete_dataset_with_partial_none_values(self, db_session_with_containers): + def test_delete_dataset_with_partial_none_values(self, db_session_with_containers: Session): """Delete a dataset without cleanup when indexing_technique is missing but doc_form resolves.""" # Arrange owner, tenant = DatasetDeleteIntegrationDataFactory.create_account_with_tenant(db_session_with_containers) @@ -200,7 +202,7 @@ class TestDatasetServiceDeleteDataset: assert db_session_with_containers.get(Dataset, dataset.id) is None clean_dataset_delay.assert_not_called() - def test_delete_dataset_with_doc_form_none_indexing_technique_exists(self, db_session_with_containers): + def test_delete_dataset_with_doc_form_none_indexing_technique_exists(self, db_session_with_containers: Session): """Delete a dataset without cleanup when indexing exists but doc_form resolves to None.""" # Arrange owner, tenant = DatasetDeleteIntegrationDataFactory.create_account_with_tenant(db_session_with_containers) @@ -228,7 +230,7 @@ class TestDatasetServiceDeleteDataset: assert db_session_with_containers.get(Dataset, dataset.id) is None clean_dataset_delay.assert_not_called() - def test_delete_dataset_not_found(self, db_session_with_containers): + def test_delete_dataset_not_found(self, db_session_with_containers: Session): """Return False without scheduling cleanup when the target dataset does not exist.""" # Arrange owner, _ = DatasetDeleteIntegrationDataFactory.create_account_with_tenant(db_session_with_containers) diff --git a/api/tests/test_containers_integration_tests/services/test_dataset_service_document.py b/api/tests/test_containers_integration_tests/services/test_dataset_service_document.py index 2bec703f0c..0c089e506b 100644 --- a/api/tests/test_containers_integration_tests/services/test_dataset_service_document.py +++ b/api/tests/test_containers_integration_tests/services/test_dataset_service_document.py @@ -6,6 +6,7 @@ from unittest.mock import create_autospec, patch from uuid import uuid4 import pytest +from sqlalchemy.orm import Session from werkzeug.exceptions import Forbidden, NotFound from core.rag.index_processor.constant.index_type import IndexStructureType @@ -119,13 +120,13 @@ def current_user_mock(): yield current_user -def test_get_document_returns_none_when_document_id_is_missing(db_session_with_containers): +def test_get_document_returns_none_when_document_id_is_missing(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) assert DocumentService.get_document(dataset.id, None) is None -def test_get_document_queries_by_dataset_and_document_id(db_session_with_containers): +def test_get_document_queries_by_dataset_and_document_id(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) document = DocumentServiceIntegrationFactory.create_document(db_session_with_containers, dataset=dataset) @@ -135,7 +136,7 @@ def test_get_document_queries_by_dataset_and_document_id(db_session_with_contain assert result.id == document.id -def test_get_documents_by_ids_returns_empty_for_empty_input(db_session_with_containers): +def test_get_documents_by_ids_returns_empty_for_empty_input(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) result = DocumentService.get_documents_by_ids(dataset.id, []) @@ -143,7 +144,7 @@ def test_get_documents_by_ids_returns_empty_for_empty_input(db_session_with_cont assert result == [] -def test_get_documents_by_ids_uses_single_batch_query(db_session_with_containers): +def test_get_documents_by_ids_uses_single_batch_query(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) doc_a = DocumentServiceIntegrationFactory.create_document(db_session_with_containers, dataset=dataset, name="a.txt") doc_b = DocumentServiceIntegrationFactory.create_document( @@ -158,13 +159,13 @@ def test_get_documents_by_ids_uses_single_batch_query(db_session_with_containers assert {document.id for document in result} == {doc_a.id, doc_b.id} -def test_update_documents_need_summary_returns_zero_for_empty_input(db_session_with_containers): +def test_update_documents_need_summary_returns_zero_for_empty_input(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) assert DocumentService.update_documents_need_summary(dataset.id, []) == 0 -def test_update_documents_need_summary_updates_matching_non_qa_documents(db_session_with_containers): +def test_update_documents_need_summary_updates_matching_non_qa_documents(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) paragraph_doc = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -195,7 +196,7 @@ def test_update_documents_need_summary_updates_matching_non_qa_documents(db_sess assert refreshed_qa.need_summary is True -def test_get_document_download_url_uses_signed_url_helper(db_session_with_containers): +def test_get_document_download_url_uses_signed_url_helper(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) upload_file = DocumentServiceIntegrationFactory.create_upload_file( db_session_with_containers, @@ -215,7 +216,7 @@ def test_get_document_download_url_uses_signed_url_helper(db_session_with_contai get_url.assert_called_once_with(upload_file_id=upload_file.id, as_attachment=True) -def test_get_upload_file_id_for_upload_file_document_rejects_invalid_source_type(db_session_with_containers): +def test_get_upload_file_id_for_upload_file_document_rejects_invalid_source_type(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -232,7 +233,9 @@ def test_get_upload_file_id_for_upload_file_document_rejects_invalid_source_type ) -def test_get_upload_file_id_for_upload_file_document_rejects_missing_upload_file_id(db_session_with_containers): +def test_get_upload_file_id_for_upload_file_document_rejects_missing_upload_file_id( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -248,7 +251,7 @@ def test_get_upload_file_id_for_upload_file_document_rejects_missing_upload_file ) -def test_get_upload_file_id_for_upload_file_document_returns_string_id(db_session_with_containers): +def test_get_upload_file_id_for_upload_file_document_returns_string_id(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -265,7 +268,9 @@ def test_get_upload_file_id_for_upload_file_document_returns_string_id(db_sessio assert result == "99" -def test_get_upload_file_for_upload_file_document_raises_when_file_service_returns_nothing(db_session_with_containers): +def test_get_upload_file_for_upload_file_document_raises_when_file_service_returns_nothing( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -278,7 +283,7 @@ def test_get_upload_file_for_upload_file_document_raises_when_file_service_retur DocumentService._get_upload_file_for_upload_file_document(document) -def test_get_upload_file_for_upload_file_document_returns_upload_file(db_session_with_containers): +def test_get_upload_file_for_upload_file_document_returns_upload_file(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) upload_file = DocumentServiceIntegrationFactory.create_upload_file( db_session_with_containers, @@ -296,7 +301,9 @@ def test_get_upload_file_for_upload_file_document_returns_upload_file(db_session assert result.id == upload_file.id -def test_get_upload_files_by_document_id_for_zip_download_raises_for_missing_documents(db_session_with_containers): +def test_get_upload_files_by_document_id_for_zip_download_raises_for_missing_documents( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) with pytest.raises(NotFound, match="Document not found"): @@ -307,7 +314,9 @@ def test_get_upload_files_by_document_id_for_zip_download_raises_for_missing_doc ) -def test_get_upload_files_by_document_id_for_zip_download_rejects_cross_tenant_access(db_session_with_containers): +def test_get_upload_files_by_document_id_for_zip_download_rejects_cross_tenant_access( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) upload_file = DocumentServiceIntegrationFactory.create_upload_file( db_session_with_containers, @@ -329,7 +338,9 @@ def test_get_upload_files_by_document_id_for_zip_download_rejects_cross_tenant_a ) -def test_get_upload_files_by_document_id_for_zip_download_rejects_missing_upload_files(db_session_with_containers): +def test_get_upload_files_by_document_id_for_zip_download_rejects_missing_upload_files( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -345,7 +356,9 @@ def test_get_upload_files_by_document_id_for_zip_download_rejects_missing_upload ) -def test_get_upload_files_by_document_id_for_zip_download_returns_document_keyed_mapping(db_session_with_containers): +def test_get_upload_files_by_document_id_for_zip_download_returns_document_keyed_mapping( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) upload_file_a = DocumentServiceIntegrationFactory.create_upload_file( db_session_with_containers, @@ -395,7 +408,7 @@ def test_prepare_document_batch_download_zip_raises_not_found_for_missing_datase def test_prepare_document_batch_download_zip_translates_permission_error_to_forbidden( - db_session_with_containers, + db_session_with_containers: Session, current_user_mock, ): dataset = DocumentServiceIntegrationFactory.create_dataset( @@ -418,7 +431,7 @@ def test_prepare_document_batch_download_zip_translates_permission_error_to_forb def test_prepare_document_batch_download_zip_returns_upload_files_in_requested_order( - db_session_with_containers, + db_session_with_containers: Session, current_user_mock, ): dataset = DocumentServiceIntegrationFactory.create_dataset( @@ -461,7 +474,7 @@ def test_prepare_document_batch_download_zip_returns_upload_files_in_requested_o assert download_name.endswith(".zip") -def test_get_document_by_dataset_id_returns_enabled_documents(db_session_with_containers): +def test_get_document_by_dataset_id_returns_enabled_documents(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) enabled_document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -480,7 +493,9 @@ def test_get_document_by_dataset_id_returns_enabled_documents(db_session_with_co assert [document.id for document in result] == [enabled_document.id] -def test_get_working_documents_by_dataset_id_returns_completed_enabled_unarchived_documents(db_session_with_containers): +def test_get_working_documents_by_dataset_id_returns_completed_enabled_unarchived_documents( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) available_document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -501,7 +516,7 @@ def test_get_working_documents_by_dataset_id_returns_completed_enabled_unarchive assert [document.id for document in result] == [available_document.id] -def test_get_error_documents_by_dataset_id_returns_error_and_paused_documents(db_session_with_containers): +def test_get_error_documents_by_dataset_id_returns_error_and_paused_documents(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) error_document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -526,7 +541,7 @@ def test_get_error_documents_by_dataset_id_returns_error_and_paused_documents(db assert {document.id for document in result} == {error_document.id, paused_document.id} -def test_get_batch_documents_filters_by_current_user_tenant(db_session_with_containers): +def test_get_batch_documents_filters_by_current_user_tenant(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) batch = f"batch-{uuid4()}" matching_document = DocumentServiceIntegrationFactory.create_document( @@ -549,7 +564,7 @@ def test_get_batch_documents_filters_by_current_user_tenant(db_session_with_cont assert [document.id for document in result] == [matching_document.id] -def test_get_document_file_detail_returns_upload_file(db_session_with_containers): +def test_get_document_file_detail_returns_upload_file(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) upload_file = DocumentServiceIntegrationFactory.create_upload_file( db_session_with_containers, @@ -563,7 +578,7 @@ def test_get_document_file_detail_returns_upload_file(db_session_with_containers assert result.id == upload_file.id -def test_delete_document_emits_signal_and_commits(db_session_with_containers): +def test_delete_document_emits_signal_and_commits(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) upload_file = DocumentServiceIntegrationFactory.create_upload_file( db_session_with_containers, @@ -588,7 +603,7 @@ def test_delete_document_emits_signal_and_commits(db_session_with_containers): ) -def test_delete_documents_ignores_empty_input(db_session_with_containers): +def test_delete_documents_ignores_empty_input(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) with patch("services.dataset_service.batch_clean_document_task.delay") as delay: @@ -597,7 +612,7 @@ def test_delete_documents_ignores_empty_input(db_session_with_containers): delay.assert_not_called() -def test_delete_documents_deletes_rows_and_dispatches_cleanup_task(db_session_with_containers): +def test_delete_documents_deletes_rows_and_dispatches_cleanup_task(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) dataset.chunk_structure = IndexStructureType.PARAGRAPH_INDEX db_session_with_containers.commit() @@ -637,14 +652,14 @@ def test_delete_documents_deletes_rows_and_dispatches_cleanup_task(db_session_wi assert set(args[3]) == {upload_file_a.id, upload_file_b.id} -def test_get_documents_position_returns_next_position_when_documents_exist(db_session_with_containers): +def test_get_documents_position_returns_next_position_when_documents_exist(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) DocumentServiceIntegrationFactory.create_document(db_session_with_containers, dataset=dataset, position=3) assert DocumentService.get_documents_position(dataset.id) == 4 -def test_get_documents_position_defaults_to_one_when_dataset_is_empty(db_session_with_containers): +def test_get_documents_position_defaults_to_one_when_dataset_is_empty(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) assert DocumentService.get_documents_position(dataset.id) == 1 diff --git a/api/tests/test_containers_integration_tests/services/test_dataset_service_permissions.py b/api/tests/test_containers_integration_tests/services/test_dataset_service_permissions.py index 1b4179c9c7..0603a1e27f 100644 --- a/api/tests/test_containers_integration_tests/services/test_dataset_service_permissions.py +++ b/api/tests/test_containers_integration_tests/services/test_dataset_service_permissions.py @@ -6,6 +6,7 @@ from unittest.mock import patch from uuid import uuid4 import pytest +from flask import Flask from sqlalchemy.orm import Session from werkzeug.exceptions import NotFound @@ -363,7 +364,7 @@ class TestDatasetServicePermissionsAndLifecycle: DatasetService.check_dataset_operator_permission(user=operator, dataset=dataset) - def test_update_dataset_api_status_raises_not_found_for_missing_dataset(self, flask_app_with_containers): + def test_update_dataset_api_status_raises_not_found_for_missing_dataset(self, flask_app_with_containers: Flask): with flask_app_with_containers.app_context(): with pytest.raises(NotFound, match="Dataset not found"): DatasetService.update_dataset_api_status(str(uuid4()), True) @@ -473,7 +474,7 @@ class TestDatasetCollectionBindingServiceIntegration: assert persisted.type == "dataset" assert persisted.collection_name - def test_get_dataset_collection_binding_by_id_and_type_raises_when_missing(self, flask_app_with_containers): + def test_get_dataset_collection_binding_by_id_and_type_raises_when_missing(self, flask_app_with_containers: Flask): with flask_app_with_containers.app_context(): with pytest.raises(ValueError, match="Dataset collection binding not found"): DatasetCollectionBindingService.get_dataset_collection_binding_by_id_and_type(str(uuid4())) diff --git a/api/tests/test_containers_integration_tests/services/test_dataset_service_retrieval.py b/api/tests/test_containers_integration_tests/services/test_dataset_service_retrieval.py index 2f90d16176..0c610311bb 100644 --- a/api/tests/test_containers_integration_tests/services/test_dataset_service_retrieval.py +++ b/api/tests/test_containers_integration_tests/services/test_dataset_service_retrieval.py @@ -16,6 +16,7 @@ from uuid import uuid4 from sqlalchemy.orm import Session from core.rag.index_processor.constant.index_type import IndexTechniqueType +from models import AccountStatus, CreatorUserRole, TenantStatus from models.account import Account, Tenant, TenantAccountJoin, TenantAccountRole from models.dataset import ( AppDatasetJoin, @@ -25,7 +26,7 @@ from models.dataset import ( DatasetProcessRule, DatasetQuery, ) -from models.enums import DatasetQuerySource, DataSourceType, ProcessRuleMode +from models.enums import DatasetQuerySource, DataSourceType, ProcessRuleMode, TagType from models.model import Tag, TagBinding from services.dataset_service import DatasetService, DocumentService @@ -42,11 +43,11 @@ class DatasetRetrievalTestDataFactory: email=f"{uuid4()}@example.com", name=f"user-{uuid4()}", interface_language="en-US", - status="active", + status=AccountStatus.ACTIVE, ) tenant = Tenant( name=f"tenant-{uuid4()}", - status="normal", + status=TenantStatus.NORMAL, ) db_session_with_containers.add_all([account, tenant]) db_session_with_containers.flush() @@ -72,7 +73,7 @@ class DatasetRetrievalTestDataFactory: email=f"{uuid4()}@example.com", name=f"user-{uuid4()}", interface_language="en-US", - status="active", + status=AccountStatus.ACTIVE, ) db_session_with_containers.add(account) db_session_with_containers.flush() @@ -130,7 +131,7 @@ class DatasetRetrievalTestDataFactory: @staticmethod def create_process_rule( - db_session_with_containers: Session, dataset_id: str, created_by: str, mode: str, rules: dict + db_session_with_containers: Session, dataset_id: str, created_by: str, mode: ProcessRuleMode, rules: dict ) -> DatasetProcessRule: """Create a dataset process rule.""" process_rule = DatasetProcessRule( @@ -153,7 +154,7 @@ class DatasetRetrievalTestDataFactory: content=content, source=DatasetQuerySource.APP, source_app_id=None, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=created_by, ) db_session_with_containers.add(dataset_query) @@ -176,7 +177,7 @@ class DatasetRetrievalTestDataFactory: """Create a knowledge tag and bind it to the target dataset.""" tag = Tag( tenant_id=tenant_id, - type="knowledge", + type=TagType.KNOWLEDGE, name=f"tag-{uuid4()}", created_by=created_by, ) diff --git a/api/tests/test_containers_integration_tests/services/test_delete_archived_workflow_run.py b/api/tests/test_containers_integration_tests/services/test_delete_archived_workflow_run.py index fe426ae516..69c39b8bfb 100644 --- a/api/tests/test_containers_integration_tests/services/test_delete_archived_workflow_run.py +++ b/api/tests/test_containers_integration_tests/services/test_delete_archived_workflow_run.py @@ -6,6 +6,7 @@ from datetime import UTC, datetime, timedelta from uuid import uuid4 from sqlalchemy import select +from sqlalchemy.orm import Session from graphon.enums import WorkflowExecutionStatus from models.enums import CreatorUserRole, WorkflowRunTriggeredFrom @@ -46,7 +47,7 @@ class TestArchivedWorkflowRunDeletion: db_session_with_containers.commit() return run - def _create_archive_log(self, db_session_with_containers, *, run: WorkflowRun) -> None: + def _create_archive_log(self, db_session_with_containers: Session, *, run: WorkflowRun) -> None: archive_log = WorkflowArchiveLog( tenant_id=run.tenant_id, app_id=run.app_id, @@ -72,7 +73,7 @@ class TestArchivedWorkflowRunDeletion: db_session_with_containers.add(archive_log) db_session_with_containers.commit() - def test_delete_by_run_id_returns_error_when_run_missing(self, db_session_with_containers): + def test_delete_by_run_id_returns_error_when_run_missing(self, db_session_with_containers: Session): deleter = ArchivedWorkflowRunDeletion() missing_run_id = str(uuid4()) @@ -81,7 +82,7 @@ class TestArchivedWorkflowRunDeletion: assert result.success is False assert result.error == f"Workflow run {missing_run_id} not found" - def test_delete_by_run_id_returns_error_when_not_archived(self, db_session_with_containers): + def test_delete_by_run_id_returns_error_when_not_archived(self, db_session_with_containers: Session): tenant_id = str(uuid4()) run = self._create_workflow_run( db_session_with_containers, @@ -95,7 +96,7 @@ class TestArchivedWorkflowRunDeletion: assert result.success is False assert result.error == f"Workflow run {run.id} is not archived" - def test_delete_batch_uses_repo(self, db_session_with_containers): + def test_delete_batch_uses_repo(self, db_session_with_containers: Session): tenant_id = str(uuid4()) base_time = datetime.now(UTC) run1 = self._create_workflow_run(db_session_with_containers, tenant_id=tenant_id, created_at=base_time) @@ -124,7 +125,7 @@ class TestArchivedWorkflowRunDeletion: ).all() assert remaining_runs == [] - def test_delete_run_calls_repo(self, db_session_with_containers): + def test_delete_run_calls_repo(self, db_session_with_containers: Session): tenant_id = str(uuid4()) run = self._create_workflow_run( db_session_with_containers, @@ -142,7 +143,7 @@ class TestArchivedWorkflowRunDeletion: deleted_run = db_session_with_containers.get(WorkflowRun, run_id) assert deleted_run is None - def test_delete_run_dry_run(self, db_session_with_containers): + def test_delete_run_dry_run(self, db_session_with_containers: Session): """Dry run should return success without actually deleting.""" tenant_id = str(uuid4()) run = self._create_workflow_run( @@ -161,7 +162,7 @@ class TestArchivedWorkflowRunDeletion: db_session_with_containers.expire_all() assert db_session_with_containers.get(WorkflowRun, run_id) is not None - def test_delete_run_exception_returns_error(self, db_session_with_containers): + def test_delete_run_exception_returns_error(self, db_session_with_containers: Session): """Exception during deletion should return failure result.""" from unittest.mock import MagicMock, patch @@ -183,7 +184,7 @@ class TestArchivedWorkflowRunDeletion: assert result.success is False assert result.error == "Database error" - def test_delete_by_run_id_success(self, db_session_with_containers): + def test_delete_by_run_id_success(self, db_session_with_containers: Session): """Successfully delete an archived workflow run by ID.""" tenant_id = str(uuid4()) base_time = datetime.now(UTC) @@ -202,7 +203,7 @@ class TestArchivedWorkflowRunDeletion: db_session_with_containers.expunge_all() assert db_session_with_containers.get(WorkflowRun, run_id) is None - def test_get_workflow_run_repo_caches_instance(self, db_session_with_containers): + def test_get_workflow_run_repo_caches_instance(self, db_session_with_containers: Session): """_get_workflow_run_repo should return a cached repo on subsequent calls.""" deleter = ArchivedWorkflowRunDeletion() diff --git a/api/tests/test_containers_integration_tests/services/test_document_service_display_status.py b/api/tests/test_containers_integration_tests/services/test_document_service_display_status.py index c0047df810..383a5f6374 100644 --- a/api/tests/test_containers_integration_tests/services/test_document_service_display_status.py +++ b/api/tests/test_containers_integration_tests/services/test_document_service_display_status.py @@ -2,6 +2,7 @@ import datetime from uuid import uuid4 from sqlalchemy import select +from sqlalchemy.orm import Session from core.rag.index_processor.constant.index_type import IndexStructureType from models.dataset import Dataset, Document @@ -58,7 +59,7 @@ def _create_document( return document -def test_build_display_status_filters_available(db_session_with_containers): +def test_build_display_status_filters_available(db_session_with_containers: Session): dataset = _create_dataset(db_session_with_containers) available_doc = _create_document( db_session_with_containers, @@ -97,7 +98,7 @@ def test_build_display_status_filters_available(db_session_with_containers): assert [row.id for row in rows] == [available_doc.id] -def test_apply_display_status_filter_applies_when_status_present(db_session_with_containers): +def test_apply_display_status_filter_applies_when_status_present(db_session_with_containers: Session): dataset = _create_dataset(db_session_with_containers) waiting_doc = _create_document( db_session_with_containers, @@ -121,7 +122,7 @@ def test_apply_display_status_filter_applies_when_status_present(db_session_with assert [row.id for row in rows] == [waiting_doc.id] -def test_apply_display_status_filter_returns_same_when_invalid(db_session_with_containers): +def test_apply_display_status_filter_returns_same_when_invalid(db_session_with_containers: Session): dataset = _create_dataset(db_session_with_containers) doc1 = _create_document( db_session_with_containers, diff --git a/api/tests/test_containers_integration_tests/services/test_end_user_service.py b/api/tests/test_containers_integration_tests/services/test_end_user_service.py index cafabc939b..3f611d92f7 100644 --- a/api/tests/test_containers_integration_tests/services/test_end_user_service.py +++ b/api/tests/test_containers_integration_tests/services/test_end_user_service.py @@ -4,8 +4,10 @@ from unittest.mock import patch from uuid import uuid4 import pytest +from sqlalchemy.orm import Session from core.app.entities.app_invoke_entities import InvokeFrom +from models import TenantAccountRole from models.account import Account, Tenant, TenantAccountJoin from models.model import App, DefaultEndUserSessionID, EndUser from services.end_user_service import EndUserService @@ -15,7 +17,7 @@ class TestEndUserServiceFactory: """Factory class for creating test data and mock objects for end user service tests.""" @staticmethod - def create_app_and_account(db_session_with_containers): + def create_app_and_account(db_session_with_containers: Session): tenant = Tenant(name=f"Tenant {uuid4()}") db_session_with_containers.add(tenant) db_session_with_containers.flush() @@ -34,7 +36,7 @@ class TestEndUserServiceFactory: tenant_join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role="owner", + role=TenantAccountRole.OWNER, current=True, ) db_session_with_containers.add(tenant_join) @@ -102,7 +104,7 @@ class TestEndUserServiceGetOrCreateEndUser: """Provide test data factory.""" return TestEndUserServiceFactory() - def test_get_or_create_end_user_with_custom_user_id(self, db_session_with_containers, factory): + def test_get_or_create_end_user_with_custom_user_id(self, db_session_with_containers: Session, factory): """Test getting or creating end user with custom user_id.""" # Arrange app = factory.create_app_and_account(db_session_with_containers) @@ -118,7 +120,7 @@ class TestEndUserServiceGetOrCreateEndUser: assert result.type == InvokeFrom.SERVICE_API assert result.is_anonymous is False - def test_get_or_create_end_user_without_user_id(self, db_session_with_containers, factory): + def test_get_or_create_end_user_without_user_id(self, db_session_with_containers: Session, factory): """Test getting or creating end user without user_id uses default session.""" # Arrange app = factory.create_app_and_account(db_session_with_containers) @@ -131,7 +133,7 @@ class TestEndUserServiceGetOrCreateEndUser: # Verify _is_anonymous is set correctly (property always returns False) assert result._is_anonymous is True - def test_get_existing_end_user(self, db_session_with_containers, factory): + def test_get_existing_end_user(self, db_session_with_containers: Session, factory): """Test retrieving an existing end user.""" # Arrange app = factory.create_app_and_account(db_session_with_containers) @@ -167,7 +169,7 @@ class TestEndUserServiceGetOrCreateEndUserByType: """Provide test data factory.""" return TestEndUserServiceFactory() - def test_create_end_user_service_api_type(self, db_session_with_containers, factory): + def test_create_end_user_service_api_type(self, db_session_with_containers: Session, factory): """Test creating new end user with SERVICE_API type.""" # Arrange app = factory.create_app_and_account(db_session_with_containers) @@ -189,7 +191,7 @@ class TestEndUserServiceGetOrCreateEndUserByType: assert result.app_id == app_id assert result.session_id == user_id - def test_create_end_user_web_app_type(self, db_session_with_containers, factory): + def test_create_end_user_web_app_type(self, db_session_with_containers: Session, factory): """Test creating new end user with WEB_APP type.""" # Arrange app = factory.create_app_and_account(db_session_with_containers) @@ -209,7 +211,7 @@ class TestEndUserServiceGetOrCreateEndUserByType: assert result.type == InvokeFrom.WEB_APP @patch("services.end_user_service.logger") - def test_upgrade_legacy_end_user_type(self, mock_logger, db_session_with_containers, factory): + def test_upgrade_legacy_end_user_type(self, mock_logger, db_session_with_containers: Session, factory): """Test upgrading legacy end user with different type.""" # Arrange app = factory.create_app_and_account(db_session_with_containers) @@ -243,7 +245,7 @@ class TestEndUserServiceGetOrCreateEndUserByType: assert "Upgrading legacy EndUser" in log_call @patch("services.end_user_service.logger") - def test_get_existing_end_user_matching_type(self, mock_logger, db_session_with_containers, factory): + def test_get_existing_end_user_matching_type(self, mock_logger, db_session_with_containers: Session, factory): """Test retrieving existing end user with matching type.""" # Arrange app = factory.create_app_and_account(db_session_with_containers) @@ -272,7 +274,7 @@ class TestEndUserServiceGetOrCreateEndUserByType: assert result.type == InvokeFrom.SERVICE_API mock_logger.info.assert_not_called() - def test_create_anonymous_user_with_default_session(self, db_session_with_containers, factory): + def test_create_anonymous_user_with_default_session(self, db_session_with_containers: Session, factory): """Test creating anonymous user when user_id is None.""" # Arrange app = factory.create_app_and_account(db_session_with_containers) @@ -293,7 +295,7 @@ class TestEndUserServiceGetOrCreateEndUserByType: assert result._is_anonymous is True assert result.external_user_id == DefaultEndUserSessionID.DEFAULT_SESSION_ID - def test_query_ordering_prioritizes_matching_type(self, db_session_with_containers, factory): + def test_query_ordering_prioritizes_matching_type(self, db_session_with_containers: Session, factory): """Test that query ordering prioritizes records with matching type.""" # Arrange app = factory.create_app_and_account(db_session_with_containers) @@ -328,7 +330,7 @@ class TestEndUserServiceGetOrCreateEndUserByType: assert result.id == matching.id assert result.id != non_matching.id - def test_external_user_id_matches_session_id(self, db_session_with_containers, factory): + def test_external_user_id_matches_session_id(self, db_session_with_containers: Session, factory): """Test that external_user_id is set to match session_id.""" # Arrange app = factory.create_app_and_account(db_session_with_containers) @@ -357,7 +359,9 @@ class TestEndUserServiceGetOrCreateEndUserByType: InvokeFrom.DEBUGGER, ], ) - def test_create_end_user_with_different_invoke_types(self, db_session_with_containers, invoke_type, factory): + def test_create_end_user_with_different_invoke_types( + self, db_session_with_containers: Session, invoke_type, factory + ): """Test creating end users with different InvokeFrom types.""" # Arrange app = factory.create_app_and_account(db_session_with_containers) @@ -385,7 +389,7 @@ class TestEndUserServiceGetEndUserById: """Provide test data factory.""" return TestEndUserServiceFactory() - def test_get_end_user_by_id_returns_end_user(self, db_session_with_containers, factory): + def test_get_end_user_by_id_returns_end_user(self, db_session_with_containers: Session, factory): app = factory.create_app_and_account(db_session_with_containers) existing_user = factory.create_end_user( db_session_with_containers, @@ -404,7 +408,7 @@ class TestEndUserServiceGetEndUserById: assert result is not None assert result.id == existing_user.id - def test_get_end_user_by_id_returns_none(self, db_session_with_containers, factory): + def test_get_end_user_by_id_returns_none(self, db_session_with_containers: Session, factory): app = factory.create_app_and_account(db_session_with_containers) result = EndUserService.get_end_user_by_id( @@ -423,7 +427,7 @@ class TestEndUserServiceCreateBatch: def factory(self): return TestEndUserServiceFactory() - def _create_multiple_apps(self, db_session_with_containers, factory, count: int = 3): + def _create_multiple_apps(self, db_session_with_containers: Session, factory, count: int = 3): """Create multiple apps under the same tenant.""" first_app = factory.create_app_and_account(db_session_with_containers) tenant_id = first_app.tenant_id @@ -452,13 +456,13 @@ class TestEndUserServiceCreateBatch: all_apps = db_session_with_containers.query(App).filter(App.tenant_id == tenant_id).all() return tenant_id, all_apps - def test_create_batch_empty_app_ids(self, db_session_with_containers): + def test_create_batch_empty_app_ids(self, db_session_with_containers: Session): result = EndUserService.create_end_user_batch( type=InvokeFrom.SERVICE_API, tenant_id=str(uuid4()), app_ids=[], user_id="user-1" ) assert result == {} - def test_create_batch_creates_users_for_all_apps(self, db_session_with_containers, factory): + def test_create_batch_creates_users_for_all_apps(self, db_session_with_containers: Session, factory): tenant_id, apps = self._create_multiple_apps(db_session_with_containers, factory, count=3) app_ids = [a.id for a in apps] user_id = f"user-{uuid4()}" @@ -473,7 +477,7 @@ class TestEndUserServiceCreateBatch: assert result[app_id].session_id == user_id assert result[app_id].type == InvokeFrom.SERVICE_API - def test_create_batch_default_session_id(self, db_session_with_containers, factory): + def test_create_batch_default_session_id(self, db_session_with_containers: Session, factory): tenant_id, apps = self._create_multiple_apps(db_session_with_containers, factory, count=2) app_ids = [a.id for a in apps] @@ -486,7 +490,7 @@ class TestEndUserServiceCreateBatch: assert end_user.session_id == DefaultEndUserSessionID.DEFAULT_SESSION_ID assert end_user._is_anonymous is True - def test_create_batch_deduplicate_app_ids(self, db_session_with_containers, factory): + def test_create_batch_deduplicate_app_ids(self, db_session_with_containers: Session, factory): tenant_id, apps = self._create_multiple_apps(db_session_with_containers, factory, count=2) app_ids = [apps[0].id, apps[1].id, apps[0].id, apps[1].id] user_id = f"user-{uuid4()}" @@ -497,7 +501,7 @@ class TestEndUserServiceCreateBatch: assert len(result) == 2 - def test_create_batch_returns_existing_users(self, db_session_with_containers, factory): + def test_create_batch_returns_existing_users(self, db_session_with_containers: Session, factory): tenant_id, apps = self._create_multiple_apps(db_session_with_containers, factory, count=2) app_ids = [a.id for a in apps] user_id = f"user-{uuid4()}" @@ -516,7 +520,7 @@ class TestEndUserServiceCreateBatch: for app_id in app_ids: assert first_result[app_id].id == second_result[app_id].id - def test_create_batch_partial_existing_users(self, db_session_with_containers, factory): + def test_create_batch_partial_existing_users(self, db_session_with_containers: Session, factory): tenant_id, apps = self._create_multiple_apps(db_session_with_containers, factory, count=3) user_id = f"user-{uuid4()}" @@ -545,7 +549,7 @@ class TestEndUserServiceCreateBatch: "invoke_type", [InvokeFrom.SERVICE_API, InvokeFrom.WEB_APP, InvokeFrom.EXPLORE, InvokeFrom.DEBUGGER], ) - def test_create_batch_all_invoke_types(self, db_session_with_containers, invoke_type, factory): + def test_create_batch_all_invoke_types(self, db_session_with_containers: Session, invoke_type, factory): tenant_id, apps = self._create_multiple_apps(db_session_with_containers, factory, count=1) user_id = f"user-{uuid4()}" diff --git a/api/tests/test_containers_integration_tests/services/test_feature_service.py b/api/tests/test_containers_integration_tests/services/test_feature_service.py index 315936d721..a678e37b41 100644 --- a/api/tests/test_containers_integration_tests/services/test_feature_service.py +++ b/api/tests/test_containers_integration_tests/services/test_feature_service.py @@ -2,6 +2,7 @@ from unittest.mock import patch import pytest from faker import Faker +from sqlalchemy.orm import Session from enums.cloud_plan import CloudPlan from services.feature_service import ( @@ -81,7 +82,7 @@ class TestFeatureService: fake = Faker() return fake.uuid4() - def test_get_features_success(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_features_success(self, db_session_with_containers: Session, mock_external_service_dependencies): """ Test successful feature retrieval with billing and enterprise enabled. @@ -156,7 +157,7 @@ class TestFeatureService: tenant_id ) - def test_get_features_sandbox_plan(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_features_sandbox_plan(self, db_session_with_containers: Session, mock_external_service_dependencies): """ Test feature retrieval for sandbox plan with specific limitations. @@ -222,7 +223,9 @@ class TestFeatureService: # Verify mock interactions mock_external_service_dependencies["billing_service"].get_info.assert_called_once_with(tenant_id) - def test_get_knowledge_rate_limit_success(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_knowledge_rate_limit_success( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test successful knowledge rate limit retrieval with billing enabled. @@ -255,7 +258,7 @@ class TestFeatureService: tenant_id ) - def test_get_system_features_success(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_system_features_success(self, db_session_with_containers: Session, mock_external_service_dependencies): """ Test successful system features retrieval with enterprise and marketplace enabled. @@ -332,7 +335,9 @@ class TestFeatureService: # Verify mock interactions mock_external_service_dependencies["enterprise_service"].get_info.assert_called_once() - def test_get_system_features_unauthenticated(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_system_features_unauthenticated( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test system features retrieval for an unauthenticated user. @@ -386,7 +391,9 @@ class TestFeatureService: # Marketplace should be visible assert result.enable_marketplace is True - def test_get_system_features_basic_config(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_system_features_basic_config( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test system features retrieval with basic configuration (no enterprise). @@ -436,7 +443,9 @@ class TestFeatureService: # Verify plugin package size (uses default value from dify_config) assert result.max_plugin_package_size == 15728640 - def test_get_features_billing_disabled(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_features_billing_disabled( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test feature retrieval when billing is disabled. @@ -492,7 +501,7 @@ class TestFeatureService: assert result.webapp_copyright_enabled is False def test_get_knowledge_rate_limit_billing_disabled( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test knowledge rate limit retrieval when billing is disabled. @@ -523,7 +532,9 @@ class TestFeatureService: # Verify no billing service calls mock_external_service_dependencies["billing_service"].get_knowledge_rate_limit.assert_not_called() - def test_get_features_enterprise_only(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_features_enterprise_only( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test feature retrieval with enterprise enabled but billing disabled. @@ -583,7 +594,7 @@ class TestFeatureService: mock_external_service_dependencies["billing_service"].get_info.assert_not_called() def test_get_system_features_enterprise_disabled( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test system features retrieval when enterprise is disabled. @@ -633,14 +644,14 @@ class TestFeatureService: assert result.max_plugin_package_size == 15728640 # Verify default license status - assert result.license.status.value == "none" + assert result.license.status == "none" assert result.license.expired_at == "" assert result.license.workspaces.enabled is False # Verify no enterprise service calls mock_external_service_dependencies["enterprise_service"].get_info.assert_not_called() - def test_get_features_no_tenant_id(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_features_no_tenant_id(self, db_session_with_containers: Session, mock_external_service_dependencies): """ Test feature retrieval without tenant ID (billing disabled). @@ -686,7 +697,9 @@ class TestFeatureService: # Verify no billing service calls mock_external_service_dependencies["billing_service"].get_info.assert_not_called() - def test_get_features_partial_billing_info(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_features_partial_billing_info( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test feature retrieval with partial billing information. @@ -746,7 +759,9 @@ class TestFeatureService: # Verify mock interactions mock_external_service_dependencies["billing_service"].get_info.assert_called_once_with(tenant_id) - def test_get_features_edge_case_vector_space(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_features_edge_case_vector_space( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test feature retrieval with edge case vector space configuration. @@ -807,7 +822,7 @@ class TestFeatureService: mock_external_service_dependencies["billing_service"].get_info.assert_called_once_with(tenant_id) def test_get_system_features_edge_case_webapp_auth( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test system features retrieval with edge case webapp auth configuration. @@ -863,7 +878,9 @@ class TestFeatureService: # Verify mock interactions mock_external_service_dependencies["enterprise_service"].get_info.assert_called_once() - def test_get_features_edge_case_members_quota(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_features_edge_case_members_quota( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test feature retrieval with edge case members quota configuration. @@ -924,7 +941,7 @@ class TestFeatureService: mock_external_service_dependencies["billing_service"].get_info.assert_called_once_with(tenant_id) def test_plugin_installation_permission_scopes( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test system features retrieval with different plugin installation permission scopes. @@ -1023,7 +1040,7 @@ class TestFeatureService: assert result.plugin_installation_permission.restrict_to_marketplace_only is True def test_get_features_workspace_members_missing( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test feature retrieval when workspace members info is missing from enterprise. @@ -1064,7 +1081,9 @@ class TestFeatureService: tenant_id ) - def test_get_system_features_license_inactive(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_system_features_license_inactive( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test system features retrieval with inactive license. @@ -1117,7 +1136,7 @@ class TestFeatureService: mock_external_service_dependencies["enterprise_service"].get_info.assert_called_once() def test_get_system_features_partial_enterprise_info( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test system features retrieval with partial enterprise information. @@ -1186,7 +1205,9 @@ class TestFeatureService: # Verify mock interactions mock_external_service_dependencies["enterprise_service"].get_info.assert_called_once() - def test_get_features_edge_case_limits(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_features_edge_case_limits( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test feature retrieval with edge case limit values. @@ -1244,7 +1265,7 @@ class TestFeatureService: mock_external_service_dependencies["billing_service"].get_info.assert_called_once_with(tenant_id) def test_get_system_features_edge_case_protocols( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test system features retrieval with edge case protocol values. @@ -1297,7 +1318,9 @@ class TestFeatureService: # Verify mock interactions mock_external_service_dependencies["enterprise_service"].get_info.assert_called_once() - def test_get_features_edge_case_education(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_features_edge_case_education( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test feature retrieval with edge case education configuration. @@ -1353,7 +1376,7 @@ class TestFeatureService: mock_external_service_dependencies["billing_service"].get_info.assert_called_once_with(tenant_id) def test_license_limitation_model_is_available( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test LicenseLimitationModel.is_available method with various scenarios. @@ -1394,7 +1417,7 @@ class TestFeatureService: assert exact_limit.is_available(3) is True def test_get_features_workspace_members_disabled( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test feature retrieval when workspace members are disabled in enterprise. @@ -1433,7 +1456,9 @@ class TestFeatureService: # Verify mock interactions mock_external_service_dependencies["enterprise_service"].get_workspace_info.assert_called_once_with(tenant_id) - def test_get_system_features_license_expired(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_system_features_license_expired( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test system features retrieval with expired license. @@ -1486,7 +1511,7 @@ class TestFeatureService: mock_external_service_dependencies["enterprise_service"].get_info.assert_called_once() def test_get_features_edge_case_docs_processing( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test feature retrieval with edge case document processing configuration. @@ -1544,7 +1569,7 @@ class TestFeatureService: mock_external_service_dependencies["billing_service"].get_info.assert_called_once_with(tenant_id) def test_get_system_features_edge_case_branding( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test system features retrieval with edge case branding configuration. @@ -1606,7 +1631,7 @@ class TestFeatureService: mock_external_service_dependencies["enterprise_service"].get_info.assert_called_once() def test_get_features_edge_case_annotation_quota( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test feature retrieval with edge case annotation quota configuration. @@ -1668,7 +1693,7 @@ class TestFeatureService: mock_external_service_dependencies["billing_service"].get_info.assert_called_once_with(tenant_id) def test_get_features_edge_case_documents_upload( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test feature retrieval with edge case documents upload settings. @@ -1733,7 +1758,7 @@ class TestFeatureService: mock_external_service_dependencies["billing_service"].get_info.assert_called_once_with(tenant_id) def test_get_system_features_edge_case_license_lost( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test system features with lost license status. @@ -1784,7 +1809,7 @@ class TestFeatureService: mock_external_service_dependencies["enterprise_service"].get_info.assert_called_once() def test_get_features_edge_case_education_disabled( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test feature retrieval with education feature disabled. diff --git a/api/tests/test_containers_integration_tests/services/test_feedback_service.py b/api/tests/test_containers_integration_tests/services/test_feedback_service.py index 3dcd6586e2..a4663450d4 100644 --- a/api/tests/test_containers_integration_tests/services/test_feedback_service.py +++ b/api/tests/test_containers_integration_tests/services/test_feedback_service.py @@ -23,7 +23,7 @@ class TestFeedbackService: """Test FeedbackService methods.""" @pytest.fixture - def mock_db_session(self, monkeypatch): + def mock_db_session(self, monkeypatch: pytest.MonkeyPatch): """Mock database session.""" mock_session = mock.Mock() monkeypatch.setattr(db, "session", mock_session) diff --git a/api/tests/test_containers_integration_tests/services/test_human_input_delivery_test_service.py b/api/tests/test_containers_integration_tests/services/test_human_input_delivery_test_service.py index ed75363f3b..bfc2af6509 100644 --- a/api/tests/test_containers_integration_tests/services/test_human_input_delivery_test_service.py +++ b/api/tests/test_containers_integration_tests/services/test_human_input_delivery_test_service.py @@ -6,6 +6,7 @@ from uuid import uuid4 import pytest from sqlalchemy.engine import Engine +from sqlalchemy.orm import Session from configs import dify_config from core.workflow.human_input_adapter import ( @@ -88,7 +89,7 @@ class TestDeliveryTestRegistry: with pytest.raises(DeliveryTestUnsupportedError, match="Delivery method does not support test send."): registry.dispatch(context=context, method=method) - def test_default(self, flask_app_with_containers, db_session_with_containers): + def test_default(self, flask_app_with_containers, db_session_with_containers: Session): registry = DeliveryTestRegistry.default() assert len(registry._handlers) == 1 assert isinstance(registry._handlers[0], EmailDeliveryTestHandler) @@ -121,7 +122,7 @@ class TestEmailDeliveryTestHandler: with pytest.raises(DeliveryTestUnsupportedError): handler.send_test(context=MagicMock(), method=MagicMock()) - def test_send_test_feature_disabled(self, monkeypatch): + def test_send_test_feature_disabled(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( service_module.FeatureService, "get_features", @@ -136,7 +137,7 @@ class TestEmailDeliveryTestHandler: with pytest.raises(DeliveryTestError, match="Email delivery is not available"): handler.send_test(context=context, method=method) - def test_send_test_mail_not_inited(self, monkeypatch): + def test_send_test_mail_not_inited(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( service_module.FeatureService, "get_features", @@ -153,7 +154,7 @@ class TestEmailDeliveryTestHandler: with pytest.raises(DeliveryTestError, match="Mail client is not initialized."): handler.send_test(context=context, method=method) - def test_send_test_no_recipients(self, monkeypatch): + def test_send_test_no_recipients(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( service_module.FeatureService, "get_features", @@ -172,7 +173,7 @@ class TestEmailDeliveryTestHandler: with pytest.raises(DeliveryTestError, match="No recipients configured"): handler.send_test(context=context, method=method) - def test_send_test_success(self, monkeypatch): + def test_send_test_success(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( service_module.FeatureService, "get_features", @@ -208,7 +209,7 @@ class TestEmailDeliveryTestHandler: assert kwargs["to"] == "test@example.com" assert "RENDERED_Subj" in kwargs["subject"] - def test_send_test_sanitizes_subject(self, monkeypatch): + def test_send_test_sanitizes_subject(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( service_module.FeatureService, "get_features", @@ -260,7 +261,7 @@ class TestEmailDeliveryTestHandler: ) assert handler._resolve_recipients(tenant_id="t1", method=method) == ["ext@example.com"] - def test_resolve_recipients_member(self, flask_app_with_containers, db_session_with_containers): + def test_resolve_recipients_member(self, flask_app_with_containers, db_session_with_containers: Session): tenant_id = str(uuid4()) account = Account(name="Test User", email="member@example.com") db_session_with_containers.add(account) @@ -282,7 +283,7 @@ class TestEmailDeliveryTestHandler: ) assert handler._resolve_recipients(tenant_id=tenant_id, method=method) == ["member@example.com"] - def test_resolve_recipients_whole_workspace(self, flask_app_with_containers, db_session_with_containers): + def test_resolve_recipients_whole_workspace(self, flask_app_with_containers, db_session_with_containers: Session): tenant_id = str(uuid4()) account1 = Account(name="User 1", email=f"u1-{uuid4()}@example.com") account2 = Account(name="User 2", email=f"u2-{uuid4()}@example.com") diff --git a/api/tests/test_containers_integration_tests/services/test_message_service.py b/api/tests/test_containers_integration_tests/services/test_message_service.py index bdf6d9b951..6d0d281c6b 100644 --- a/api/tests/test_containers_integration_tests/services/test_message_service.py +++ b/api/tests/test_containers_integration_tests/services/test_message_service.py @@ -6,7 +6,7 @@ from sqlalchemy.orm import Session from models.enums import ConversationFromSource, FeedbackRating, InvokeFrom from models.model import MessageFeedback -from services.app_service import AppService +from services.app_service import AppService, CreateAppParams from services.errors.message import ( FirstMessageNotExistsError, LastMessageNotExistsError, @@ -103,16 +103,16 @@ class TestMessageService: tenant = account.current_tenant # Setup app creation arguments - app_args = { - "name": fake.company(), - "description": fake.text(max_nb_chars=100), - "mode": "advanced-chat", # Use advanced-chat mode to use mocked workflow - "icon_type": "emoji", - "icon": "🤖", - "icon_background": "#FF6B6B", - "api_rph": 100, - "api_rpm": 10, - } + app_args = CreateAppParams( + name=fake.company(), + description=fake.text(max_nb_chars=100), + mode="advanced-chat", # Use advanced-chat mode to use mocked workflow, + icon_type="emoji", + icon="🤖", + icon_background="#FF6B6B", + api_rph=100, + api_rpm=10, + ) # Create app app_service = AppService() diff --git a/api/tests/test_containers_integration_tests/services/test_message_service_execution_extra_content.py b/api/tests/test_containers_integration_tests/services/test_message_service_execution_extra_content.py index 44e5a82868..52ebc0131f 100644 --- a/api/tests/test_containers_integration_tests/services/test_message_service_execution_extra_content.py +++ b/api/tests/test_containers_integration_tests/services/test_message_service_execution_extra_content.py @@ -1,6 +1,7 @@ from __future__ import annotations import pytest +from sqlalchemy.orm import Session from services.message_service import MessageService from tests.test_containers_integration_tests.helpers.execution_extra_content import ( @@ -9,7 +10,7 @@ from tests.test_containers_integration_tests.helpers.execution_extra_content imp @pytest.mark.usefixtures("flask_req_ctx_with_containers") -def test_pagination_returns_extra_contents(db_session_with_containers): +def test_pagination_returns_extra_contents(db_session_with_containers: Session): fixture = create_human_input_message_fixture(db_session_with_containers) pagination = MessageService.pagination_by_first_id( diff --git a/api/tests/test_containers_integration_tests/services/test_messages_clean_service.py b/api/tests/test_containers_integration_tests/services/test_messages_clean_service.py index cd63d3ad6c..1a1efe0337 100644 --- a/api/tests/test_containers_integration_tests/services/test_messages_clean_service.py +++ b/api/tests/test_containers_integration_tests/services/test_messages_clean_service.py @@ -165,7 +165,7 @@ class TestMessagesCleanServiceIntegration: return app - def _create_conversation(self, db_session_with_containers: Session, app): + def _create_conversation(self, db_session_with_containers: Session, app: App): """Helper to create a conversation.""" conversation = Conversation( app_id=app.id, diff --git a/api/tests/test_containers_integration_tests/services/test_metadata_partial_update.py b/api/tests/test_containers_integration_tests/services/test_metadata_partial_update.py index b55a19eaa9..fffa82bf5c 100644 --- a/api/tests/test_containers_integration_tests/services/test_metadata_partial_update.py +++ b/api/tests/test_containers_integration_tests/services/test_metadata_partial_update.py @@ -5,6 +5,7 @@ from uuid import uuid4 import pytest from sqlalchemy import select +from sqlalchemy.orm import Session from models.dataset import Dataset, DatasetMetadataBinding, Document from models.enums import DataSourceType, DocumentCreatedFrom @@ -65,7 +66,7 @@ class TestMetadataPartialUpdate: yield account def test_partial_update_merges_metadata( - self, flask_app_with_containers, db_session_with_containers, tenant_id, mock_current_account + self, flask_app_with_containers, db_session_with_containers: Session, tenant_id, mock_current_account ): dataset = _create_dataset(db_session_with_containers, tenant_id=tenant_id) document = _create_document( @@ -92,7 +93,7 @@ class TestMetadataPartialUpdate: assert updated_doc.doc_metadata["new_key"] == "new_value" def test_full_update_replaces_metadata( - self, flask_app_with_containers, db_session_with_containers, tenant_id, mock_current_account + self, flask_app_with_containers, db_session_with_containers: Session, tenant_id, mock_current_account ): dataset = _create_dataset(db_session_with_containers, tenant_id=tenant_id) document = _create_document( @@ -119,7 +120,7 @@ class TestMetadataPartialUpdate: assert "existing_key" not in updated_doc.doc_metadata def test_partial_update_skips_existing_binding( - self, flask_app_with_containers, db_session_with_containers, tenant_id, user_id, mock_current_account + self, flask_app_with_containers, db_session_with_containers: Session, tenant_id, user_id, mock_current_account ): dataset = _create_dataset(db_session_with_containers, tenant_id=tenant_id) document = _create_document( @@ -159,7 +160,7 @@ class TestMetadataPartialUpdate: assert len(bindings) == 1 def test_rollback_called_on_commit_failure( - self, flask_app_with_containers, db_session_with_containers, tenant_id, mock_current_account + self, flask_app_with_containers, db_session_with_containers: Session, tenant_id, mock_current_account ): dataset = _create_dataset(db_session_with_containers, tenant_id=tenant_id) document = _create_document( diff --git a/api/tests/test_containers_integration_tests/services/test_oauth_server_service.py b/api/tests/test_containers_integration_tests/services/test_oauth_server_service.py index c146a5924b..5fa5de6d80 100644 --- a/api/tests/test_containers_integration_tests/services/test_oauth_server_service.py +++ b/api/tests/test_containers_integration_tests/services/test_oauth_server_service.py @@ -8,6 +8,7 @@ from unittest.mock import MagicMock, patch from uuid import uuid4 import pytest +from sqlalchemy.orm import Session from werkzeug.exceptions import BadRequest from models.model import OAuthProviderApp @@ -25,7 +26,7 @@ from services.oauth_server import ( class TestOAuthServerServiceGetProviderApp: """DB-backed tests for get_oauth_provider_app.""" - def _create_oauth_provider_app(self, db_session_with_containers, *, client_id: str) -> OAuthProviderApp: + def _create_oauth_provider_app(self, db_session_with_containers: Session, *, client_id: str) -> OAuthProviderApp: app = OAuthProviderApp( app_icon="icon.png", client_id=client_id, @@ -38,7 +39,7 @@ class TestOAuthServerServiceGetProviderApp: db_session_with_containers.commit() return app - def test_get_oauth_provider_app_returns_app_when_exists(self, db_session_with_containers): + def test_get_oauth_provider_app_returns_app_when_exists(self, db_session_with_containers: Session): client_id = f"client-{uuid4()}" created = self._create_oauth_provider_app(db_session_with_containers, client_id=client_id) @@ -48,7 +49,7 @@ class TestOAuthServerServiceGetProviderApp: assert result.client_id == client_id assert result.id == created.id - def test_get_oauth_provider_app_returns_none_when_not_exists(self, db_session_with_containers): + def test_get_oauth_provider_app_returns_none_when_not_exists(self, db_session_with_containers: Session): result = OAuthServerService.get_oauth_provider_app(f"nonexistent-{uuid4()}") assert result is None diff --git a/api/tests/test_containers_integration_tests/services/test_ops_service.py b/api/tests/test_containers_integration_tests/services/test_ops_service.py index e2e1a228b2..ff76bce416 100644 --- a/api/tests/test_containers_integration_tests/services/test_ops_service.py +++ b/api/tests/test_containers_integration_tests/services/test_ops_service.py @@ -11,7 +11,7 @@ from sqlalchemy.orm import Session from core.ops.entities.config_entity import TracingProviderEnum from models.model import TraceAppConfig from services.account_service import AccountService, TenantService -from services.app_service import AppService +from services.app_service import AppService, CreateAppParams from services.ops_service import OpsService from tests.test_containers_integration_tests.helpers import generate_valid_password @@ -57,14 +57,14 @@ class TestOpsService: app_service = AppService() app = app_service.create_app( tenant.id, - { - "name": fake.company(), - "description": fake.text(max_nb_chars=100), - "mode": "chat", - "icon_type": "emoji", - "icon": "🤖", - "icon_background": "#FF6B6B", - }, + CreateAppParams( + name=fake.company(), + description=fake.text(max_nb_chars=100), + mode="chat", + icon_type="emoji", + icon="🤖", + icon_background="#FF6B6B", + ), account, ) return app, account diff --git a/api/tests/test_containers_integration_tests/services/test_restore_archived_workflow_run.py b/api/tests/test_containers_integration_tests/services/test_restore_archived_workflow_run.py index 7036524918..2f20949611 100644 --- a/api/tests/test_containers_integration_tests/services/test_restore_archived_workflow_run.py +++ b/api/tests/test_containers_integration_tests/services/test_restore_archived_workflow_run.py @@ -8,6 +8,7 @@ from datetime import datetime from uuid import uuid4 from sqlalchemy import select +from sqlalchemy.orm import Session from models.workflow import WorkflowPause, WorkflowRun from services.retention.workflow_run.restore_archived_workflow_run import WorkflowRunRestore @@ -39,7 +40,7 @@ class TestWorkflowRunRestore: assert result["created_at"].month == 1 assert result["name"] == "test" - def test_restore_table_records_returns_rowcount(self, db_session_with_containers): + def test_restore_table_records_returns_rowcount(self, db_session_with_containers: Session): """Restore should return inserted rowcount.""" restore = WorkflowRunRestore() record_id = str(uuid4()) @@ -65,7 +66,7 @@ class TestWorkflowRunRestore: restored_pause = db_session_with_containers.scalar(select(WorkflowPause).where(WorkflowPause.id == record_id)) assert restored_pause is not None - def test_restore_table_records_unknown_table(self, db_session_with_containers): + def test_restore_table_records_unknown_table(self, db_session_with_containers: Session): """Unknown table names should be ignored gracefully.""" restore = WorkflowRunRestore() diff --git a/api/tests/test_containers_integration_tests/services/test_saved_message_service.py b/api/tests/test_containers_integration_tests/services/test_saved_message_service.py index 70aa813142..7368ad4249 100644 --- a/api/tests/test_containers_integration_tests/services/test_saved_message_service.py +++ b/api/tests/test_containers_integration_tests/services/test_saved_message_service.py @@ -4,10 +4,11 @@ import pytest from faker import Faker from sqlalchemy.orm import Session +from models import App, CreatorUserRole from models.enums import ConversationFromSource from models.model import EndUser, Message from models.web import SavedMessage -from services.app_service import AppService +from services.app_service import AppService, CreateAppParams from services.saved_message_service import SavedMessageService from tests.test_containers_integration_tests.helpers import generate_valid_password @@ -72,23 +73,23 @@ class TestSavedMessageService: tenant = account.current_tenant # Create app with realistic data - app_args = { - "name": fake.company(), - "description": fake.text(max_nb_chars=100), - "mode": "chat", - "icon_type": "emoji", - "icon": "🤖", - "icon_background": "#FF6B6B", - "api_rph": 100, - "api_rpm": 10, - } + app_args = CreateAppParams( + name=fake.company(), + description=fake.text(max_nb_chars=100), + mode="chat", + icon_type="emoji", + icon="🤖", + icon_background="#FF6B6B", + api_rph=100, + api_rpm=10, + ) app_service = AppService() app = app_service.create_app(tenant.id, app_args, account) return app, account - def _create_test_end_user(self, db_session_with_containers: Session, app): + def _create_test_end_user(self, db_session_with_containers: Session, app: App): """ Helper method to create a test end user for testing. @@ -116,7 +117,7 @@ class TestSavedMessageService: return end_user - def _create_test_message(self, db_session_with_containers: Session, app, user): + def _create_test_message(self, db_session_with_containers: Session, app: App, user): """ Helper method to create a test message for testing. @@ -199,13 +200,13 @@ class TestSavedMessageService: saved_message1 = SavedMessage( app_id=app.id, message_id=message1.id, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=account.id, ) saved_message2 = SavedMessage( app_id=app.id, message_id=message2.id, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=account.id, ) @@ -272,13 +273,13 @@ class TestSavedMessageService: saved_message1 = SavedMessage( app_id=app.id, message_id=message1.id, - created_by_role="end_user", + created_by_role=CreatorUserRole.END_USER, created_by=end_user.id, ) saved_message2 = SavedMessage( app_id=app.id, message_id=message2.id, - created_by_role="end_user", + created_by_role=CreatorUserRole.END_USER, created_by=end_user.id, ) @@ -449,7 +450,7 @@ class TestSavedMessageService: saved_message = SavedMessage( app_id=app.id, message_id=message.id, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=account.id, ) @@ -540,7 +541,9 @@ class TestSavedMessageService: message = self._create_test_message(db_session_with_containers, app, account) # Pre-create a saved message - saved = SavedMessage(app_id=app.id, message_id=message.id, created_by_role="account", created_by=account.id) + saved = SavedMessage( + app_id=app.id, message_id=message.id, created_by_role=CreatorUserRole.ACCOUNT, created_by=account.id + ) db_session_with_containers.add(saved) db_session_with_containers.commit() @@ -571,7 +574,9 @@ class TestSavedMessageService: end_user = self._create_test_end_user(db_session_with_containers, app) message = self._create_test_message(db_session_with_containers, app, end_user) - saved = SavedMessage(app_id=app.id, message_id=message.id, created_by_role="end_user", created_by=end_user.id) + saved = SavedMessage( + app_id=app.id, message_id=message.id, created_by_role=CreatorUserRole.END_USER, created_by=end_user.id + ) db_session_with_containers.add(saved) db_session_with_containers.commit() @@ -596,10 +601,10 @@ class TestSavedMessageService: # Both users save the same message saved_account = SavedMessage( - app_id=app.id, message_id=message.id, created_by_role="account", created_by=account1.id + app_id=app.id, message_id=message.id, created_by_role=CreatorUserRole.ACCOUNT, created_by=account1.id ) saved_end_user = SavedMessage( - app_id=app.id, message_id=message.id, created_by_role="end_user", created_by=end_user.id + app_id=app.id, message_id=message.id, created_by_role=CreatorUserRole.END_USER, created_by=end_user.id ) db_session_with_containers.add_all([saved_account, saved_end_user]) db_session_with_containers.commit() diff --git a/api/tests/test_containers_integration_tests/services/test_tag_service.py b/api/tests/test_containers_integration_tests/services/test_tag_service.py index 5a6bf0466e..583b6128e6 100644 --- a/api/tests/test_containers_integration_tests/services/test_tag_service.py +++ b/api/tests/test_containers_integration_tests/services/test_tag_service.py @@ -1099,38 +1099,39 @@ class TestTagService: db_session_with_containers, mock_external_service_dependencies ) - # Create tag - tag = self._create_test_tags( - db_session_with_containers, mock_external_service_dependencies, tenant.id, "knowledge", 1 - )[0] + # Create tags + tags = self._create_test_tags( + db_session_with_containers, mock_external_service_dependencies, tenant.id, "knowledge", 2 + ) - # Create dataset and bind tag + # Create dataset and bind tags dataset = self._create_test_dataset(db_session_with_containers, mock_external_service_dependencies, tenant.id) self._create_test_tag_bindings( - db_session_with_containers, mock_external_service_dependencies, [tag], dataset.id, tenant.id + db_session_with_containers, mock_external_service_dependencies, tags, dataset.id, tenant.id ) - # Verify binding exists before deletion - - binding_before = ( + # Verify bindings exist before deletion + bindings_before = ( db_session_with_containers.query(TagBinding) - .where(TagBinding.tag_id == tag.id, TagBinding.target_id == dataset.id) - .first() + .where(TagBinding.tag_id.in_([tag.id for tag in tags]), TagBinding.target_id == dataset.id) + .all() ) - assert binding_before is not None + assert len(bindings_before) == 2 # Act: Execute the method under test - delete_payload = TagBindingDeletePayload(type="knowledge", target_id=dataset.id, tag_id=tag.id) + delete_payload = TagBindingDeletePayload( + type="knowledge", target_id=dataset.id, tag_ids=[tag.id for tag in tags] + ) TagService.delete_tag_binding(delete_payload) # Assert: Verify the expected outcomes - # Verify tag binding was deleted - binding_after = ( + # Verify tag bindings were deleted + bindings_after = ( db_session_with_containers.query(TagBinding) - .where(TagBinding.tag_id == tag.id, TagBinding.target_id == dataset.id) - .first() + .where(TagBinding.tag_id.in_([tag.id for tag in tags]), TagBinding.target_id == dataset.id) + .all() ) - assert binding_after is None + assert len(bindings_after) == 0 def test_delete_tag_binding_non_existent_binding( self, db_session_with_containers: Session, mock_external_service_dependencies @@ -1156,7 +1157,7 @@ class TestTagService: app = self._create_test_app(db_session_with_containers, mock_external_service_dependencies, tenant.id) # Act: Try to delete non-existent binding - delete_payload = TagBindingDeletePayload(type="app", target_id=app.id, tag_id=tag.id) + delete_payload = TagBindingDeletePayload(type="app", target_id=app.id, tag_ids=[tag.id]) TagService.delete_tag_binding(delete_payload) # Assert: Verify the expected outcomes diff --git a/api/tests/test_containers_integration_tests/services/test_web_conversation_service.py b/api/tests/test_containers_integration_tests/services/test_web_conversation_service.py index f2307fbd7d..8e53a2d6cd 100644 --- a/api/tests/test_containers_integration_tests/services/test_web_conversation_service.py +++ b/api/tests/test_containers_integration_tests/services/test_web_conversation_service.py @@ -6,12 +6,12 @@ from sqlalchemy import select from sqlalchemy.orm import Session from core.app.entities.app_invoke_entities import InvokeFrom -from models import Account +from models import Account, App from models.enums import ConversationFromSource from models.model import Conversation, EndUser from models.web import PinnedConversation from services.account_service import AccountService, TenantService -from services.app_service import AppService +from services.app_service import AppService, CreateAppParams from services.web_conversation_service import WebConversationService from tests.test_containers_integration_tests.helpers import generate_valid_password @@ -77,23 +77,23 @@ class TestWebConversationService: tenant = account.current_tenant # Create app with realistic data - app_args = { - "name": fake.company(), - "description": fake.text(max_nb_chars=100), - "mode": "chat", - "icon_type": "emoji", - "icon": "🤖", - "icon_background": "#FF6B6B", - "api_rph": 100, - "api_rpm": 10, - } + app_args = CreateAppParams( + name=fake.company(), + description=fake.text(max_nb_chars=100), + mode="chat", + icon_type="emoji", + icon="🤖", + icon_background="#FF6B6B", + api_rph=100, + api_rpm=10, + ) app_service = AppService() app = app_service.create_app(tenant.id, app_args, account) return app, account - def _create_test_end_user(self, db_session_with_containers: Session, app): + def _create_test_end_user(self, db_session_with_containers: Session, app: App): """ Helper method to create a test end user for testing. diff --git a/api/tests/test_containers_integration_tests/services/test_webhook_service.py b/api/tests/test_containers_integration_tests/services/test_webhook_service.py index 970da98c55..52b1229302 100644 --- a/api/tests/test_containers_integration_tests/services/test_webhook_service.py +++ b/api/tests/test_containers_integration_tests/services/test_webhook_service.py @@ -5,6 +5,7 @@ from unittest.mock import MagicMock, patch import pytest from faker import Faker from flask import Flask +from sqlalchemy.orm import Session from werkzeug.datastructures import FileStorage from models.enums import AppTriggerStatus, AppTriggerType @@ -52,7 +53,7 @@ class TestWebhookService: } @pytest.fixture - def test_data(self, db_session_with_containers, mock_external_dependencies): + def test_data(self, db_session_with_containers: Session, mock_external_dependencies): """Create test data for webhook service tests.""" fake = Faker() @@ -160,7 +161,7 @@ class TestWebhookService: "app_trigger": app_trigger, } - def test_get_webhook_trigger_and_workflow_success(self, test_data, flask_app_with_containers): + def test_get_webhook_trigger_and_workflow_success(self, test_data, flask_app_with_containers: Flask): """Test successful retrieval of webhook trigger and workflow.""" webhook_id = test_data["webhook_id"] @@ -175,7 +176,7 @@ class TestWebhookService: assert node_config["id"] == "webhook_node" assert node_config["data"].title == "Test Webhook" - def test_get_webhook_trigger_and_workflow_not_found(self, flask_app_with_containers): + def test_get_webhook_trigger_and_workflow_not_found(self, flask_app_with_containers: Flask): """Test webhook trigger not found scenario.""" with flask_app_with_containers.app_context(): with pytest.raises(ValueError, match="Webhook not found"): @@ -421,7 +422,9 @@ class TestWebhookService: assert result["files"] == {} - def test_trigger_workflow_execution_success(self, test_data, mock_external_dependencies, flask_app_with_containers): + def test_trigger_workflow_execution_success( + self, test_data, mock_external_dependencies, flask_app_with_containers: Flask + ): """Test successful workflow execution trigger.""" webhook_data = { "method": "POST", @@ -452,7 +455,7 @@ class TestWebhookService: mock_external_dependencies["async_service"].trigger_workflow_async.assert_called_once() def test_trigger_workflow_execution_end_user_service_failure( - self, test_data, mock_external_dependencies, flask_app_with_containers + self, test_data, mock_external_dependencies, flask_app_with_containers: Flask ): """Test workflow execution trigger when EndUserService fails.""" webhook_data = {"method": "POST", "headers": {}, "query_params": {}, "body": {}, "files": {}} @@ -540,8 +543,8 @@ class TestWebhookService: "bad_file": MagicMock(filename="test.bad", content_type="text/plain"), } - files["good_file"].read.return_value = b"content" - files["bad_file"].read.side_effect = Exception("Read error") + files["good_file"].stream.read.return_value = b"content" + files["bad_file"].stream.read.side_effect = Exception("Read error") webhook_trigger = MagicMock() webhook_trigger.tenant_id = "test_tenant" diff --git a/api/tests/test_containers_integration_tests/services/test_webhook_service_relationships.py b/api/tests/test_containers_integration_tests/services/test_webhook_service_relationships.py index 85ce3a6ba6..69cde847f8 100644 --- a/api/tests/test_containers_integration_tests/services/test_webhook_service_relationships.py +++ b/api/tests/test_containers_integration_tests/services/test_webhook_service_relationships.py @@ -6,6 +6,7 @@ from unittest.mock import MagicMock, patch from uuid import uuid4 import pytest +from flask import Flask from sqlalchemy import select from sqlalchemy.orm import Session @@ -165,7 +166,7 @@ class WebhookServiceRelationshipFactory: class TestWebhookServiceLookupWithContainers: def test_get_webhook_trigger_and_workflow_raises_when_app_trigger_missing( - self, db_session_with_containers: Session, flask_app_with_containers + self, db_session_with_containers: Session, flask_app_with_containers: Flask ): del flask_app_with_containers factory = WebhookServiceRelationshipFactory @@ -182,7 +183,7 @@ class TestWebhookServiceLookupWithContainers: WebhookService.get_webhook_trigger_and_workflow(webhook_trigger.webhook_id) def test_get_webhook_trigger_and_workflow_raises_when_app_trigger_rate_limited( - self, db_session_with_containers: Session, flask_app_with_containers + self, db_session_with_containers: Session, flask_app_with_containers: Flask ): del flask_app_with_containers factory = WebhookServiceRelationshipFactory @@ -202,7 +203,7 @@ class TestWebhookServiceLookupWithContainers: WebhookService.get_webhook_trigger_and_workflow(webhook_trigger.webhook_id) def test_get_webhook_trigger_and_workflow_raises_when_app_trigger_disabled( - self, db_session_with_containers: Session, flask_app_with_containers + self, db_session_with_containers: Session, flask_app_with_containers: Flask ): del flask_app_with_containers factory = WebhookServiceRelationshipFactory @@ -222,7 +223,7 @@ class TestWebhookServiceLookupWithContainers: WebhookService.get_webhook_trigger_and_workflow(webhook_trigger.webhook_id) def test_get_webhook_trigger_and_workflow_raises_when_workflow_missing( - self, db_session_with_containers: Session, flask_app_with_containers + self, db_session_with_containers: Session, flask_app_with_containers: Flask ): del flask_app_with_containers factory = WebhookServiceRelationshipFactory @@ -239,7 +240,7 @@ class TestWebhookServiceLookupWithContainers: WebhookService.get_webhook_trigger_and_workflow(webhook_trigger.webhook_id) def test_get_webhook_trigger_and_workflow_returns_debug_draft_workflow( - self, db_session_with_containers: Session, flask_app_with_containers + self, db_session_with_containers: Session, flask_app_with_containers: Flask ): del flask_app_with_containers factory = WebhookServiceRelationshipFactory @@ -275,7 +276,7 @@ class TestWebhookServiceLookupWithContainers: class TestWebhookServiceTriggerExecutionWithContainers: def test_trigger_workflow_execution_triggers_async_workflow_successfully( - self, db_session_with_containers: Session, flask_app_with_containers + self, db_session_with_containers: Session, flask_app_with_containers: Flask ): del flask_app_with_containers factory = WebhookServiceRelationshipFactory @@ -318,7 +319,7 @@ class TestWebhookServiceTriggerExecutionWithContainers: assert trigger_args[2].root_node_id == webhook_trigger.node_id def test_trigger_workflow_execution_marks_tenant_rate_limited_when_quota_exceeded( - self, db_session_with_containers: Session, flask_app_with_containers + self, db_session_with_containers: Session, flask_app_with_containers: Flask ): del flask_app_with_containers factory = WebhookServiceRelationshipFactory @@ -354,7 +355,7 @@ class TestWebhookServiceTriggerExecutionWithContainers: mock_mark_rate_limited.assert_called_once_with(tenant.id) def test_trigger_workflow_execution_logs_and_reraises_unexpected_errors( - self, db_session_with_containers: Session, flask_app_with_containers + self, db_session_with_containers: Session, flask_app_with_containers: Flask ): del flask_app_with_containers factory = WebhookServiceRelationshipFactory @@ -386,7 +387,7 @@ class TestWebhookServiceTriggerExecutionWithContainers: class TestWebhookServiceRelationshipSyncWithContainers: def test_sync_webhook_relationships_raises_when_workflow_exceeds_node_limit( - self, db_session_with_containers: Session, flask_app_with_containers + self, db_session_with_containers: Session, flask_app_with_containers: Flask ): del flask_app_with_containers factory = WebhookServiceRelationshipFactory @@ -401,7 +402,7 @@ class TestWebhookServiceRelationshipSyncWithContainers: WebhookService.sync_webhook_relationships(app, workflow) def test_sync_webhook_relationships_raises_when_lock_not_acquired( - self, db_session_with_containers: Session, flask_app_with_containers + self, db_session_with_containers: Session, flask_app_with_containers: Flask ): del flask_app_with_containers factory = WebhookServiceRelationshipFactory @@ -418,7 +419,7 @@ class TestWebhookServiceRelationshipSyncWithContainers: WebhookService.sync_webhook_relationships(app, workflow) def test_sync_webhook_relationships_creates_missing_records_and_deletes_stale_records( - self, db_session_with_containers: Session, flask_app_with_containers + self, db_session_with_containers: Session, flask_app_with_containers: Flask ): del flask_app_with_containers factory = WebhookServiceRelationshipFactory @@ -455,7 +456,7 @@ class TestWebhookServiceRelationshipSyncWithContainers: assert db_session_with_containers.get(WorkflowWebhookTrigger, stale_trigger_id) is None def test_sync_webhook_relationships_sets_redis_cache_for_new_record( - self, db_session_with_containers: Session, flask_app_with_containers + self, db_session_with_containers: Session, flask_app_with_containers: Flask ): del flask_app_with_containers factory = WebhookServiceRelationshipFactory @@ -481,7 +482,7 @@ class TestWebhookServiceRelationshipSyncWithContainers: assert cached_payload["webhook_id"] == "cache-webhook-id-00001" def test_sync_webhook_relationships_logs_when_lock_release_fails( - self, db_session_with_containers: Session, flask_app_with_containers + self, db_session_with_containers: Session, flask_app_with_containers: Flask ): del flask_app_with_containers factory = WebhookServiceRelationshipFactory diff --git a/api/tests/test_containers_integration_tests/services/test_workflow_app_service.py b/api/tests/test_containers_integration_tests/services/test_workflow_app_service.py index 1e57b5603d..07a49130d0 100644 --- a/api/tests/test_containers_integration_tests/services/test_workflow_app_service.py +++ b/api/tests/test_containers_integration_tests/services/test_workflow_app_service.py @@ -17,7 +17,7 @@ from models.workflow import WorkflowAppLogCreatedFrom from services.account_service import AccountService, TenantService # Delay import of AppService to avoid circular dependency -# from services.app_service import AppService +# from services.app_service import AppService, CreateAppParams from services.workflow_app_service import LogView, WorkflowAppService from tests.test_containers_integration_tests.helpers import generate_valid_password @@ -82,20 +82,20 @@ class TestWorkflowAppService: TenantService.create_owner_tenant_if_not_exist(account, name=fake.company()) tenant = account.current_tenant - # Create app with realistic data - app_args = { - "name": fake.company(), - "description": fake.text(max_nb_chars=100), - "mode": "workflow", - "icon_type": "emoji", - "icon": "🤖", - "icon_background": "#FF6B6B", - "api_rph": 100, - "api_rpm": 10, - } - # Import here to avoid circular dependency - from services.app_service import AppService + from services.app_service import AppService, CreateAppParams + + # Create app with realistic data + app_args = CreateAppParams( + name=fake.company(), + description=fake.text(max_nb_chars=100), + mode="workflow", + icon_type="emoji", + icon="🤖", + icon_background="#FF6B6B", + api_rph=100, + api_rpm=10, + ) app_service = AppService() app = app_service.create_app(tenant.id, app_args, account) @@ -146,20 +146,20 @@ class TestWorkflowAppService: """ fake = Faker() - # Create app with realistic data - app_args = { - "name": fake.company(), - "description": fake.text(max_nb_chars=100), - "mode": "workflow", - "icon_type": "emoji", - "icon": "🤖", - "icon_background": "#FF6B6B", - "api_rph": 100, - "api_rpm": 10, - } - # Import here to avoid circular dependency - from services.app_service import AppService + from services.app_service import AppService, CreateAppParams + + # Create app with realistic data + app_args = CreateAppParams( + name=fake.company(), + description=fake.text(max_nb_chars=100), + mode="workflow", + icon_type="emoji", + icon="🤖", + icon_background="#FF6B6B", + api_rph=100, + api_rpm=10, + ) app_service = AppService() app = app_service.create_app(tenant.id, app_args, account) @@ -1530,7 +1530,7 @@ class TestWorkflowAppService: assert result_cross_tenant["total"] == 0 def test_get_paginate_workflow_app_logs_raises_when_account_filter_email_not_found( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) service = WorkflowAppService() @@ -1543,7 +1543,7 @@ class TestWorkflowAppService: ) def test_get_paginate_workflow_app_logs_filters_by_account( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) service = WorkflowAppService() @@ -1558,7 +1558,9 @@ class TestWorkflowAppService: assert result["total"] >= 0 assert isinstance(result["data"], list) - def test_get_paginate_workflow_archive_logs(self, db_session_with_containers, mock_external_service_dependencies): + def test_get_paginate_workflow_archive_logs( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) service = WorkflowAppService() diff --git a/api/tests/test_containers_integration_tests/services/test_workflow_draft_variable_service.py b/api/tests/test_containers_integration_tests/services/test_workflow_draft_variable_service.py index 86cf2327c7..82fe391b08 100644 --- a/api/tests/test_containers_integration_tests/services/test_workflow_draft_variable_service.py +++ b/api/tests/test_containers_integration_tests/services/test_workflow_draft_variable_service.py @@ -45,7 +45,9 @@ class TestWorkflowDraftVariableService: # WorkflowDraftVariableService doesn't have external dependencies that need mocking return {} - def _create_test_app(self, db_session_with_containers: Session, mock_external_service_dependencies, fake=None): + def _create_test_app( + self, db_session_with_containers: Session, mock_external_service_dependencies, fake: Faker | None = None + ): """ Helper method to create a test app with realistic data for testing. @@ -80,7 +82,7 @@ class TestWorkflowDraftVariableService: db_session_with_containers.commit() return app - def _create_test_workflow(self, db_session_with_containers: Session, app, fake=None): + def _create_test_workflow(self, db_session_with_containers: Session, app, fake: Faker | None = None): """ Helper method to create a test workflow associated with an app. diff --git a/api/tests/test_containers_integration_tests/services/test_workflow_run_service.py b/api/tests/test_containers_integration_tests/services/test_workflow_run_service.py index d02a078281..09fe1570bc 100644 --- a/api/tests/test_containers_integration_tests/services/test_workflow_run_service.py +++ b/api/tests/test_containers_integration_tests/services/test_workflow_run_service.py @@ -13,7 +13,7 @@ from models.model import ( ) from models.workflow import WorkflowRun from services.account_service import AccountService, TenantService -from services.app_service import AppService +from services.app_service import AppService, CreateAppParams from services.workflow_run_service import WorkflowRunService from tests.test_containers_integration_tests.helpers import generate_valid_password @@ -79,16 +79,16 @@ class TestWorkflowRunService: tenant = account.current_tenant # Create app with realistic data - app_args = { - "name": fake.company(), - "description": fake.text(max_nb_chars=100), - "mode": "chat", - "icon_type": "emoji", - "icon": "🤖", - "icon_background": "#FF6B6B", - "api_rph": 100, - "api_rpm": 10, - } + app_args = CreateAppParams( + name=fake.company(), + description=fake.text(max_nb_chars=100), + mode="chat", + icon_type="emoji", + icon="🤖", + icon_background="#FF6B6B", + api_rph=100, + api_rpm=10, + ) app_service = AppService() app = app_service.create_app(tenant.id, app_args, account) @@ -535,13 +535,13 @@ class TestWorkflowRunService: tenant = account.current_tenant # Create app - app_args = { - "name": "Test App", - "mode": "chat", - "icon_type": "emoji", - "icon": "🚀", - "icon_background": "#4ECDC4", - } + app_args = CreateAppParams( + name="Test App", + mode="chat", + icon_type="emoji", + icon="🚀", + icon_background="#4ECDC4", + ) app = app_service.create_app(tenant.id, app_args, account) # Create workflow run without node executions @@ -586,13 +586,13 @@ class TestWorkflowRunService: tenant = account.current_tenant # Create app - app_args = { - "name": "Test App", - "mode": "chat", - "icon_type": "emoji", - "icon": "🚀", - "icon_background": "#4ECDC4", - } + app_args = CreateAppParams( + name="Test App", + mode="chat", + icon_type="emoji", + icon="🚀", + icon_background="#4ECDC4", + ) app = app_service.create_app(tenant.id, app_args, account) # Use invalid workflow run ID @@ -637,13 +637,13 @@ class TestWorkflowRunService: tenant = account.current_tenant # Create app - app_args = { - "name": "Test App", - "mode": "chat", - "icon_type": "emoji", - "icon": "🚀", - "icon_background": "#4ECDC4", - } + app_args = CreateAppParams( + name="Test App", + mode="chat", + icon_type="emoji", + icon="🚀", + icon_background="#4ECDC4", + ) app = app_service.create_app(tenant.id, app_args, account) # Create workflow run diff --git a/api/tests/test_containers_integration_tests/services/test_workflow_service.py b/api/tests/test_containers_integration_tests/services/test_workflow_service.py index b5ce8a53de..9ba1fda08b 100644 --- a/api/tests/test_containers_integration_tests/services/test_workflow_service.py +++ b/api/tests/test_containers_integration_tests/services/test_workflow_service.py @@ -12,7 +12,7 @@ import pytest from faker import Faker from sqlalchemy.orm import Session -from models import Account, App, Workflow +from models import Account, AccountStatus, App, TenantStatus, Workflow from models.model import AppMode from models.workflow import WorkflowType from services.workflow_service import WorkflowService @@ -33,7 +33,7 @@ class TestWorkflowService: and realistic testing environment with actual database interactions. """ - def _create_test_account(self, db_session_with_containers: Session, fake=None): + def _create_test_account(self, db_session_with_containers: Session, fake: Faker | None = None): """ Helper method to create a test account with realistic data. @@ -49,7 +49,7 @@ class TestWorkflowService: email=fake.email(), name=fake.name(), avatar=fake.url(), - status="active", + status=AccountStatus.ACTIVE, interface_language="en-US", # Set interface language for Site creation ) account.created_at = fake.date_time_this_year() @@ -62,7 +62,7 @@ class TestWorkflowService: tenant = Tenant( name=f"Test Tenant {fake.company()}", plan="basic", - status="normal", + status=TenantStatus.NORMAL, ) tenant.id = account.current_tenant_id tenant.created_at = fake.date_time_this_year() @@ -77,7 +77,7 @@ class TestWorkflowService: return account - def _create_test_app(self, db_session_with_containers: Session, fake=None): + def _create_test_app(self, db_session_with_containers: Session, fake: Faker | None = None): """ Helper method to create a test app with realistic data. @@ -109,7 +109,7 @@ class TestWorkflowService: db_session_with_containers.commit() return app - def _create_test_workflow(self, db_session_with_containers: Session, app, account, fake=None): + def _create_test_workflow(self, db_session_with_containers: Session, app, account, fake: Faker | None = None): """ Helper method to create a test workflow associated with an app. diff --git a/api/tests/test_containers_integration_tests/services/tools/test_workflow_tools_manage_service.py b/api/tests/test_containers_integration_tests/services/tools/test_workflow_tools_manage_service.py index 21a1975879..9b574fe2df 100644 --- a/api/tests/test_containers_integration_tests/services/tools/test_workflow_tools_manage_service.py +++ b/api/tests/test_containers_integration_tests/services/tools/test_workflow_tools_manage_service.py @@ -11,7 +11,7 @@ from core.tools.errors import WorkflowToolHumanInputNotSupportedError from models.tools import WorkflowToolProvider from models.workflow import Workflow as WorkflowModel from services.account_service import AccountService, TenantService -from services.app_service import AppService +from services.app_service import AppService, CreateAppParams from services.tools.workflow_tools_manage_service import WorkflowToolManageService from tests.test_containers_integration_tests.helpers import generate_valid_password @@ -94,16 +94,16 @@ class TestWorkflowToolManageService: tenant = account.current_tenant # Create app with realistic data - app_args = { - "name": fake.company(), - "description": fake.text(max_nb_chars=100), - "mode": "workflow", - "icon_type": "emoji", - "icon": "🤖", - "icon_background": "#FF6B6B", - "api_rph": 100, - "api_rpm": 10, - } + app_args = CreateAppParams( + name=fake.company(), + description=fake.text(max_nb_chars=100), + mode="workflow", + icon_type="emoji", + icon="🤖", + icon_background="#FF6B6B", + api_rph=100, + api_rpm=10, + ) app_service = AppService() app = app_service.create_app(tenant.id, app_args, account) diff --git a/api/tests/test_containers_integration_tests/services/workflow/test_workflow_deletion.py b/api/tests/test_containers_integration_tests/services/workflow/test_workflow_deletion.py index 29e1e240b4..afc4908c15 100644 --- a/api/tests/test_containers_integration_tests/services/workflow/test_workflow_deletion.py +++ b/api/tests/test_containers_integration_tests/services/workflow/test_workflow_deletion.py @@ -100,7 +100,7 @@ class TestWorkflowDeletion: session.flush() return provider - def test_delete_workflow_success(self, db_session_with_containers): + def test_delete_workflow_success(self, db_session_with_containers: Session): tenant, account = self._create_tenant_and_account(db_session_with_containers) app = self._create_app(db_session_with_containers, tenant=tenant, account=account) workflow = self._create_workflow( @@ -118,7 +118,7 @@ class TestWorkflowDeletion: db_session_with_containers.expire_all() assert db_session_with_containers.get(Workflow, workflow_id) is None - def test_delete_draft_workflow_raises_error(self, db_session_with_containers): + def test_delete_draft_workflow_raises_error(self, db_session_with_containers: Session): tenant, account = self._create_tenant_and_account(db_session_with_containers) app = self._create_app(db_session_with_containers, tenant=tenant, account=account) workflow = self._create_workflow( @@ -130,7 +130,7 @@ class TestWorkflowDeletion: with pytest.raises(DraftWorkflowDeletionError): service.delete_workflow(session=db_session_with_containers, workflow_id=workflow.id, tenant_id=tenant.id) - def test_delete_workflow_in_use_by_app_raises_error(self, db_session_with_containers): + def test_delete_workflow_in_use_by_app_raises_error(self, db_session_with_containers: Session): tenant, account = self._create_tenant_and_account(db_session_with_containers) app = self._create_app(db_session_with_containers, tenant=tenant, account=account) workflow = self._create_workflow( @@ -144,7 +144,7 @@ class TestWorkflowDeletion: with pytest.raises(WorkflowInUseError, match="currently in use by app"): service.delete_workflow(session=db_session_with_containers, workflow_id=workflow.id, tenant_id=tenant.id) - def test_delete_workflow_published_as_tool_raises_error(self, db_session_with_containers): + def test_delete_workflow_published_as_tool_raises_error(self, db_session_with_containers: Session): tenant, account = self._create_tenant_and_account(db_session_with_containers) app = self._create_app(db_session_with_containers, tenant=tenant, account=account) workflow = self._create_workflow( diff --git a/api/tests/test_containers_integration_tests/services/workflow/test_workflow_node_execution_service_repository.py b/api/tests/test_containers_integration_tests/services/workflow/test_workflow_node_execution_service_repository.py index 4dab895135..32b76c3469 100644 --- a/api/tests/test_containers_integration_tests/services/workflow/test_workflow_node_execution_service_repository.py +++ b/api/tests/test_containers_integration_tests/services/workflow/test_workflow_node_execution_service_repository.py @@ -64,7 +64,7 @@ class TestSQLAlchemyWorkflowNodeExecutionServiceRepository: db_session_with_containers.commit() return execution - def test_get_node_last_execution_found(self, db_session_with_containers): + def test_get_node_last_execution_found(self, db_session_with_containers: Session): """Test getting the last execution for a node when it exists.""" # Arrange tenant_id = str(uuid4()) @@ -110,7 +110,7 @@ class TestSQLAlchemyWorkflowNodeExecutionServiceRepository: assert result.id == expected.id assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED - def test_get_node_last_execution_not_found(self, db_session_with_containers): + def test_get_node_last_execution_not_found(self, db_session_with_containers: Session): """Test getting the last execution for a node when it doesn't exist.""" # Arrange tenant_id = str(uuid4()) @@ -129,7 +129,7 @@ class TestSQLAlchemyWorkflowNodeExecutionServiceRepository: # Assert assert result is None - def test_get_executions_by_workflow_run_empty(self, db_session_with_containers): + def test_get_executions_by_workflow_run_empty(self, db_session_with_containers: Session): """Test getting executions for a workflow run when none exist.""" # Arrange tenant_id = str(uuid4()) @@ -147,7 +147,7 @@ class TestSQLAlchemyWorkflowNodeExecutionServiceRepository: # Assert assert result == [] - def test_get_execution_by_id_found(self, db_session_with_containers): + def test_get_execution_by_id_found(self, db_session_with_containers: Session): """Test getting execution by ID when it exists.""" # Arrange execution = self._create_execution( @@ -170,7 +170,7 @@ class TestSQLAlchemyWorkflowNodeExecutionServiceRepository: assert result is not None assert result.id == execution.id - def test_get_execution_by_id_not_found(self, db_session_with_containers): + def test_get_execution_by_id_not_found(self, db_session_with_containers: Session): """Test getting execution by ID when it doesn't exist.""" # Arrange repository = self._create_repository(db_session_with_containers) @@ -182,7 +182,7 @@ class TestSQLAlchemyWorkflowNodeExecutionServiceRepository: # Assert assert result is None - def test_delete_expired_executions(self, db_session_with_containers): + def test_delete_expired_executions(self, db_session_with_containers: Session): """Test deleting expired executions.""" # Arrange tenant_id = str(uuid4()) @@ -248,7 +248,7 @@ class TestSQLAlchemyWorkflowNodeExecutionServiceRepository: assert old_execution_2_id not in remaining_ids assert kept_execution_id in remaining_ids - def test_delete_executions_by_app(self, db_session_with_containers): + def test_delete_executions_by_app(self, db_session_with_containers: Session): """Test deleting executions by app.""" # Arrange tenant_id = str(uuid4()) @@ -313,7 +313,7 @@ class TestSQLAlchemyWorkflowNodeExecutionServiceRepository: assert deleted_2_id not in remaining_ids assert kept_id in remaining_ids - def test_get_expired_executions_batch(self, db_session_with_containers): + def test_get_expired_executions_batch(self, db_session_with_containers: Session): """Test getting expired executions batch for backup.""" # Arrange tenant_id = str(uuid4()) @@ -370,7 +370,7 @@ class TestSQLAlchemyWorkflowNodeExecutionServiceRepository: assert old_execution_1.id in result_ids assert old_execution_2.id in result_ids - def test_delete_executions_by_ids(self, db_session_with_containers): + def test_delete_executions_by_ids(self, db_session_with_containers: Session): """Test deleting executions by IDs.""" # Arrange tenant_id = str(uuid4()) @@ -424,7 +424,7 @@ class TestSQLAlchemyWorkflowNodeExecutionServiceRepository: ).all() assert remaining == [] - def test_delete_executions_by_ids_empty_list(self, db_session_with_containers): + def test_delete_executions_by_ids_empty_list(self, db_session_with_containers: Session): """Test deleting executions with empty ID list.""" # Arrange repository = self._create_repository(db_session_with_containers) diff --git a/api/tests/test_containers_integration_tests/tasks/test_clean_notion_document_task.py b/api/tests/test_containers_integration_tests/tasks/test_clean_notion_document_task.py index 7e5c374b5d..1c8d5969e0 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_clean_notion_document_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_clean_notion_document_task.py @@ -71,7 +71,7 @@ class TestCleanNotionDocumentTask: yield mock_factory def test_clean_notion_document_task_success( - self, db_session_with_containers, mock_index_processor_factory, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_index_processor_factory, mock_external_service_dependencies ): """ Test successful cleanup of Notion documents with proper database operations. @@ -176,7 +176,7 @@ class TestCleanNotionDocumentTask: # 5. The task completes without errors def test_clean_notion_document_task_dataset_not_found( - self, db_session_with_containers, mock_index_processor_factory, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_index_processor_factory, mock_external_service_dependencies ): """ Test cleanup task behavior when dataset is not found. @@ -196,7 +196,7 @@ class TestCleanNotionDocumentTask: mock_index_processor_factory.return_value.init_index_processor.assert_not_called() def test_clean_notion_document_task_empty_document_list( - self, db_session_with_containers, mock_index_processor_factory, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_index_processor_factory, mock_external_service_dependencies ): """ Test cleanup task behavior with empty document list. @@ -240,7 +240,7 @@ class TestCleanNotionDocumentTask: assert args[1] == [] def test_clean_notion_document_task_with_different_index_types( - self, db_session_with_containers, mock_index_processor_factory, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_index_processor_factory, mock_external_service_dependencies ): """ Test cleanup task with different dataset index types. @@ -328,7 +328,7 @@ class TestCleanNotionDocumentTask: mock_index_processor_factory.reset_mock() def test_clean_notion_document_task_with_segments_no_index_node_ids( - self, db_session_with_containers, mock_index_processor_factory, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_index_processor_factory, mock_external_service_dependencies ): """ Test cleanup task with segments that have no index_node_ids. @@ -411,7 +411,7 @@ class TestCleanNotionDocumentTask: # are properly deleted from the database. def test_clean_notion_document_task_partial_document_cleanup( - self, db_session_with_containers, mock_index_processor_factory, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_index_processor_factory, mock_external_service_dependencies ): """ Test cleanup task with partial document cleanup scenario. @@ -513,7 +513,7 @@ class TestCleanNotionDocumentTask: # The database operations work correctly, isolating only the specified documents. def test_clean_notion_document_task_with_mixed_segment_statuses( - self, db_session_with_containers, mock_index_processor_factory, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_index_processor_factory, mock_external_service_dependencies ): """ Test cleanup task with segments in different statuses. @@ -603,7 +603,7 @@ class TestCleanNotionDocumentTask: # IndexProcessor verification would require more sophisticated mocking. def test_clean_notion_document_task_continues_when_index_processor_fails( - self, db_session_with_containers, mock_index_processor_factory, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_index_processor_factory, mock_external_service_dependencies ): """ Index processor failure (e.g. transient billing API error propagated via @@ -707,7 +707,7 @@ class TestCleanNotionDocumentTask: assert _count_segments(db_session_with_containers, DocumentSegment.document_id == document.id) == 0 def test_clean_notion_document_task_with_large_number_of_documents( - self, db_session_with_containers, mock_index_processor_factory, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_index_processor_factory, mock_external_service_dependencies ): """ Test cleanup task with a large number of documents and segments. @@ -806,7 +806,7 @@ class TestCleanNotionDocumentTask: # The database efficiently handles large-scale deletions. def test_clean_notion_document_task_with_documents_from_different_tenants( - self, db_session_with_containers, mock_index_processor_factory, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_index_processor_factory, mock_external_service_dependencies ): """ Test cleanup task with documents from different tenants. @@ -918,7 +918,7 @@ class TestCleanNotionDocumentTask: # Only documents from the target dataset are affected, maintaining tenant separation. def test_clean_notion_document_task_with_documents_in_different_states( - self, db_session_with_containers, mock_index_processor_factory, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_index_processor_factory, mock_external_service_dependencies ): """ Test cleanup task with documents in different indexing states. @@ -1024,7 +1024,7 @@ class TestCleanNotionDocumentTask: # All documents are deleted regardless of their indexing status. def test_clean_notion_document_task_with_documents_having_metadata( - self, db_session_with_containers, mock_index_processor_factory, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_index_processor_factory, mock_external_service_dependencies ): """ Test cleanup task with documents that have rich metadata. diff --git a/api/tests/test_containers_integration_tests/tasks/test_create_segment_to_index_task.py b/api/tests/test_containers_integration_tests/tasks/test_create_segment_to_index_task.py index 9084667c31..a8d295e6a9 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_create_segment_to_index_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_create_segment_to_index_task.py @@ -12,10 +12,11 @@ from uuid import uuid4 import pytest from faker import Faker from sqlalchemy import delete +from sqlalchemy.orm import Session from core.rag.index_processor.constant.index_type import IndexStructureType, IndexTechniqueType from extensions.ext_redis import redis_client -from models import Account, Tenant, TenantAccountJoin, TenantAccountRole +from models import Account, AccountStatus, Tenant, TenantAccountJoin, TenantAccountRole, TenantStatus from models.dataset import Dataset, Document, DocumentSegment from models.enums import DataSourceType, DocumentCreatedFrom, IndexingStatus, SegmentStatus from tasks.create_segment_to_index_task import create_segment_to_index_task @@ -25,7 +26,7 @@ class TestCreateSegmentToIndexTask: """Integration tests for create_segment_to_index_task using testcontainers.""" @pytest.fixture(autouse=True) - def cleanup_database(self, db_session_with_containers): + def cleanup_database(self, db_session_with_containers: Session): """Clean up database and Redis before each test to ensure isolation.""" # Clear all test data using fixture session @@ -55,7 +56,7 @@ class TestCreateSegmentToIndexTask: "index_processor": mock_processor, } - def _create_test_account_and_tenant(self, db_session_with_containers): + def _create_test_account_and_tenant(self, db_session_with_containers: Session): """ Helper method to create a test account and tenant for testing. @@ -72,7 +73,7 @@ class TestCreateSegmentToIndexTask: email=fake.email(), name=fake.name(), interface_language="en-US", - status="active", + status=AccountStatus.ACTIVE, ) db_session_with_containers.add(account) @@ -81,7 +82,7 @@ class TestCreateSegmentToIndexTask: # Create tenant tenant = Tenant( name=fake.company(), - status="normal", + status=TenantStatus.NORMAL, plan="basic", ) db_session_with_containers.add(tenant) @@ -102,7 +103,7 @@ class TestCreateSegmentToIndexTask: return account, tenant - def _create_test_dataset_and_document(self, db_session_with_containers, tenant_id, account_id): + def _create_test_dataset_and_document(self, db_session_with_containers: Session, tenant_id, account_id): """ Helper method to create a test dataset and document for testing. @@ -151,7 +152,13 @@ class TestCreateSegmentToIndexTask: return dataset, document def _create_test_segment( - self, db_session_with_containers, dataset_id, document_id, tenant_id, account_id, status=SegmentStatus.WAITING + self, + db_session_with_containers: Session, + dataset_id, + document_id, + tenant_id, + account_id, + status=SegmentStatus.WAITING, ): """ Helper method to create a test document segment for testing. @@ -189,7 +196,9 @@ class TestCreateSegmentToIndexTask: return segment - def test_create_segment_to_index_success(self, db_session_with_containers, mock_external_service_dependencies): + def test_create_segment_to_index_success( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test successful creation of segment to index. @@ -225,7 +234,7 @@ class TestCreateSegmentToIndexTask: assert redis_client.exists(cache_key) == 0 def test_create_segment_to_index_segment_not_found( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test handling of non-existent segment ID. @@ -246,7 +255,7 @@ class TestCreateSegmentToIndexTask: mock_external_service_dependencies["index_processor_factory"].assert_not_called() def test_create_segment_to_index_invalid_status( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test handling of segment with invalid status. @@ -277,7 +286,9 @@ class TestCreateSegmentToIndexTask: # Verify no index processor calls were made mock_external_service_dependencies["index_processor_factory"].assert_not_called() - def test_create_segment_to_index_no_dataset(self, db_session_with_containers, mock_external_service_dependencies): + def test_create_segment_to_index_no_dataset( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test handling of segment without associated dataset. @@ -330,7 +341,9 @@ class TestCreateSegmentToIndexTask: # Verify no index processor calls were made mock_external_service_dependencies["index_processor_factory"].assert_not_called() - def test_create_segment_to_index_no_document(self, db_session_with_containers, mock_external_service_dependencies): + def test_create_segment_to_index_no_document( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test handling of segment without associated document. @@ -367,7 +380,7 @@ class TestCreateSegmentToIndexTask: mock_external_service_dependencies["index_processor_factory"].assert_not_called() def test_create_segment_to_index_document_disabled( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test handling of segment with disabled document. @@ -403,7 +416,7 @@ class TestCreateSegmentToIndexTask: mock_external_service_dependencies["index_processor_factory"].assert_not_called() def test_create_segment_to_index_document_archived( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test handling of segment with archived document. @@ -439,7 +452,7 @@ class TestCreateSegmentToIndexTask: mock_external_service_dependencies["index_processor_factory"].assert_not_called() def test_create_segment_to_index_document_indexing_incomplete( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test handling of segment with document that has incomplete indexing. @@ -475,7 +488,7 @@ class TestCreateSegmentToIndexTask: mock_external_service_dependencies["index_processor_factory"].assert_not_called() def test_create_segment_to_index_processor_exception( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test handling of index processor exceptions. @@ -511,7 +524,7 @@ class TestCreateSegmentToIndexTask: assert redis_client.exists(cache_key) == 0 def test_create_segment_to_index_with_keywords( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test segment indexing with custom keywords. @@ -543,7 +556,7 @@ class TestCreateSegmentToIndexTask: mock_external_service_dependencies["index_processor"].load.assert_called_once() def test_create_segment_to_index_different_doc_forms( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test segment indexing with different document forms. @@ -586,7 +599,7 @@ class TestCreateSegmentToIndexTask: mock_external_service_dependencies["index_processor_factory"].assert_called_with(doc_form) def test_create_segment_to_index_performance_timing( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test segment indexing performance and timing. @@ -617,7 +630,7 @@ class TestCreateSegmentToIndexTask: assert segment.status == SegmentStatus.COMPLETED def test_create_segment_to_index_concurrent_execution( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test concurrent execution of segment indexing tasks. @@ -654,7 +667,7 @@ class TestCreateSegmentToIndexTask: assert mock_external_service_dependencies["index_processor_factory"].call_count == 3 def test_create_segment_to_index_large_content( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test segment indexing with large content. @@ -703,7 +716,7 @@ class TestCreateSegmentToIndexTask: assert segment.completed_at is not None def test_create_segment_to_index_redis_failure( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test segment indexing when Redis operations fail. @@ -743,7 +756,7 @@ class TestCreateSegmentToIndexTask: assert redis_client.exists(cache_key) == 1 def test_create_segment_to_index_database_transaction_rollback( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test segment indexing with database transaction handling. @@ -775,7 +788,7 @@ class TestCreateSegmentToIndexTask: assert segment.error is not None def test_create_segment_to_index_metadata_validation( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test segment indexing with metadata validation. @@ -817,7 +830,7 @@ class TestCreateSegmentToIndexTask: assert doc is not None def test_create_segment_to_index_status_transition_flow( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test complete status transition flow during indexing. @@ -852,7 +865,7 @@ class TestCreateSegmentToIndexTask: assert segment.indexing_at <= segment.completed_at def test_create_segment_to_index_with_empty_content( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test segment indexing with empty or minimal content. @@ -894,7 +907,7 @@ class TestCreateSegmentToIndexTask: assert segment.completed_at is not None def test_create_segment_to_index_with_special_characters( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test segment indexing with special characters and unicode content. @@ -940,7 +953,7 @@ class TestCreateSegmentToIndexTask: assert segment.completed_at is not None def test_create_segment_to_index_with_long_keywords( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test segment indexing with long keyword lists. @@ -974,7 +987,7 @@ class TestCreateSegmentToIndexTask: mock_external_service_dependencies["index_processor"].load.assert_called_once() def test_create_segment_to_index_tenant_isolation( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test segment indexing with proper tenant isolation. @@ -1017,7 +1030,7 @@ class TestCreateSegmentToIndexTask: assert segment1.tenant_id != segment2.tenant_id def test_create_segment_to_index_with_none_keywords( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test segment indexing with None keywords parameter. @@ -1048,7 +1061,7 @@ class TestCreateSegmentToIndexTask: mock_external_service_dependencies["index_processor"].load.assert_called_once() def test_create_segment_to_index_comprehensive_integration( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Comprehensive integration test covering multiple scenarios. diff --git a/api/tests/test_containers_integration_tests/tasks/test_dataset_indexing_task.py b/api/tests/test_containers_integration_tests/tasks/test_dataset_indexing_task.py index 684097851b..5287cd06db 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_dataset_indexing_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_dataset_indexing_task.py @@ -7,11 +7,12 @@ from unittest.mock import MagicMock, patch import pytest from faker import Faker from sqlalchemy import select +from sqlalchemy.orm import Session from core.indexing_runner import DocumentIsPausedError from core.rag.index_processor.constant.index_type import IndexTechniqueType from enums.cloud_plan import CloudPlan -from models import Account, Tenant, TenantAccountJoin, TenantAccountRole +from models import Account, AccountStatus, Tenant, TenantAccountJoin, TenantAccountRole, TenantStatus from models.dataset import Dataset, Document from models.enums import DataSourceType, DocumentCreatedFrom, IndexingStatus from tasks.document_indexing_task import ( @@ -53,7 +54,7 @@ class _TrackedSessionContext: @pytest.fixture(autouse=True) -def _ensure_testcontainers_db(db_session_with_containers): +def _ensure_testcontainers_db(db_session_with_containers: Session): """Ensure this suite always runs on testcontainers infrastructure.""" return db_session_with_containers @@ -120,12 +121,12 @@ class TestDatasetIndexingTaskIntegration: email=fake.email(), name=fake.name(), interface_language="en-US", - status="active", + status=AccountStatus.ACTIVE, ) db_session_with_containers.add(account) db_session_with_containers.flush() - tenant = Tenant(name=fake.company(), status="normal") + tenant = Tenant(name=fake.company(), status=TenantStatus.NORMAL) db_session_with_containers.add(tenant) db_session_with_containers.flush() @@ -174,11 +175,11 @@ class TestDatasetIndexingTaskIntegration: return dataset, documents - def _query_document(self, db_session_with_containers, document_id: str) -> Document | None: + def _query_document(self, db_session_with_containers: Session, document_id: str) -> Document | None: """Return the latest persisted document state.""" return db_session_with_containers.scalar(select(Document).where(Document.id == document_id).limit(1)) - def _assert_documents_parsing(self, db_session_with_containers, document_ids: Sequence[str]) -> None: + def _assert_documents_parsing(self, db_session_with_containers: Session, document_ids: Sequence[str]) -> None: """Assert all target documents are persisted in parsing status.""" db_session_with_containers.expire_all() for document_id in document_ids: @@ -212,7 +213,9 @@ class TestDatasetIndexingTaskIntegration: assert len(opened) >= 2 assert opened_ids <= closed_ids - def test_legacy_document_indexing_task_still_works(self, db_session_with_containers, patched_external_dependencies): + def test_legacy_document_indexing_task_still_works( + self, db_session_with_containers: Session, patched_external_dependencies + ): """Ensure the legacy task entrypoint still updates parsing status.""" # Arrange dataset, documents = self._create_test_dataset_and_documents(db_session_with_containers, document_count=2) @@ -225,7 +228,9 @@ class TestDatasetIndexingTaskIntegration: patched_external_dependencies["indexing_runner_instance"].run.assert_called_once() self._assert_documents_parsing(db_session_with_containers, document_ids) - def test_batch_processing_multiple_documents(self, db_session_with_containers, patched_external_dependencies): + def test_batch_processing_multiple_documents( + self, db_session_with_containers: Session, patched_external_dependencies + ): """Process multiple documents in one batch.""" # Arrange dataset, documents = self._create_test_dataset_and_documents(db_session_with_containers, document_count=3) @@ -240,7 +245,9 @@ class TestDatasetIndexingTaskIntegration: assert len(run_args) == len(document_ids) self._assert_documents_parsing(db_session_with_containers, document_ids) - def test_batch_processing_with_limit_check(self, db_session_with_containers, patched_external_dependencies): + def test_batch_processing_with_limit_check( + self, db_session_with_containers: Session, patched_external_dependencies + ): """Reject batches larger than configured upload limit. This test patches config only to force a deterministic limit branch while keeping SQL writes real. @@ -263,7 +270,7 @@ class TestDatasetIndexingTaskIntegration: self._assert_documents_error_contains(db_session_with_containers, document_ids, "batch upload limit") def test_batch_processing_sandbox_plan_single_document_only( - self, db_session_with_containers, patched_external_dependencies + self, db_session_with_containers: Session, patched_external_dependencies ): """Reject multi-document upload under sandbox plan.""" # Arrange @@ -280,7 +287,9 @@ class TestDatasetIndexingTaskIntegration: patched_external_dependencies["indexing_runner_instance"].run.assert_not_called() self._assert_documents_error_contains(db_session_with_containers, document_ids, "does not support batch upload") - def test_batch_processing_empty_document_list(self, db_session_with_containers, patched_external_dependencies): + def test_batch_processing_empty_document_list( + self, db_session_with_containers: Session, patched_external_dependencies + ): """Handle empty list input without failing.""" # Arrange dataset, _ = self._create_test_dataset_and_documents(db_session_with_containers, document_count=0) @@ -292,7 +301,7 @@ class TestDatasetIndexingTaskIntegration: patched_external_dependencies["indexing_runner_instance"].run.assert_called_once_with([]) def test_tenant_queue_dispatches_next_task_after_completion( - self, db_session_with_containers, patched_external_dependencies + self, db_session_with_containers: Session, patched_external_dependencies ): """Dispatch the next queued task after current tenant task completes. @@ -337,7 +346,7 @@ class TestDatasetIndexingTaskIntegration: delete_key_spy.assert_not_called() def test_tenant_queue_deletes_running_key_when_no_follow_up_tasks( - self, db_session_with_containers, patched_external_dependencies + self, db_session_with_containers: Session, patched_external_dependencies ): """Delete tenant running flag when queue has no pending tasks. @@ -362,7 +371,7 @@ class TestDatasetIndexingTaskIntegration: delete_key_spy.assert_called_once() def test_validation_failure_sets_error_status_when_vector_space_at_limit( - self, db_session_with_containers, patched_external_dependencies + self, db_session_with_containers: Session, patched_external_dependencies ): """Set error status when vector space validation fails before runner phase.""" # Arrange @@ -382,7 +391,7 @@ class TestDatasetIndexingTaskIntegration: self._assert_documents_error_contains(db_session_with_containers, document_ids, "over the limit") def test_runner_exception_does_not_crash_indexing_task( - self, db_session_with_containers, patched_external_dependencies + self, db_session_with_containers: Session, patched_external_dependencies ): """Catch generic runner exceptions without crashing the task.""" # Arrange @@ -397,7 +406,7 @@ class TestDatasetIndexingTaskIntegration: patched_external_dependencies["indexing_runner_instance"].run.assert_called_once() self._assert_documents_parsing(db_session_with_containers, document_ids) - def test_document_paused_error_handling(self, db_session_with_containers, patched_external_dependencies): + def test_document_paused_error_handling(self, db_session_with_containers: Session, patched_external_dependencies): """Handle DocumentIsPausedError and keep persisted state consistent.""" # Arrange dataset, documents = self._create_test_dataset_and_documents(db_session_with_containers, document_count=2) @@ -424,7 +433,7 @@ class TestDatasetIndexingTaskIntegration: patched_external_dependencies["indexing_runner_instance"].run.assert_not_called() def test_tenant_queue_error_handling_still_processes_next_task( - self, db_session_with_containers, patched_external_dependencies + self, db_session_with_containers: Session, patched_external_dependencies ): """Even on current task failure, enqueue the next waiting tenant task. @@ -491,7 +500,7 @@ class TestDatasetIndexingTaskIntegration: self._assert_all_opened_sessions_closed(session_close_tracker) def test_multiple_documents_with_mixed_success_and_failure( - self, db_session_with_containers, patched_external_dependencies + self, db_session_with_containers: Session, patched_external_dependencies ): """Process only existing documents when request includes missing ids.""" # Arrange @@ -508,7 +517,7 @@ class TestDatasetIndexingTaskIntegration: self._assert_documents_parsing(db_session_with_containers, existing_ids) def test_tenant_queue_dispatches_up_to_concurrency_limit( - self, db_session_with_containers, patched_external_dependencies + self, db_session_with_containers: Session, patched_external_dependencies ): """Dispatch only up to configured concurrency under queued backlog burst. @@ -543,7 +552,7 @@ class TestDatasetIndexingTaskIntegration: assert task_dispatch_spy.apply_async.call_count == concurrency_limit assert set_waiting_spy.call_count == concurrency_limit - def test_task_queue_fifo_ordering(self, db_session_with_containers, patched_external_dependencies): + def test_task_queue_fifo_ordering(self, db_session_with_containers: Session, patched_external_dependencies): """Keep FIFO ordering when dispatching next queued tasks. Queue APIs are patched to isolate dispatch side effects while preserving DB assertions. @@ -576,7 +585,9 @@ class TestDatasetIndexingTaskIntegration: call_kwargs = task_dispatch_spy.apply_async.call_args_list[index].kwargs.get("kwargs", {}) assert call_kwargs.get("document_ids") == expected_task["document_ids"] - def test_billing_disabled_skips_limit_checks(self, db_session_with_containers, patched_external_dependencies): + def test_billing_disabled_skips_limit_checks( + self, db_session_with_containers: Session, patched_external_dependencies + ): """Skip limit checks when billing feature is disabled.""" # Arrange large_document_ids = [str(uuid.uuid4()) for _ in range(100)] @@ -595,7 +606,7 @@ class TestDatasetIndexingTaskIntegration: assert len(run_args) == 100 self._assert_documents_parsing(db_session_with_containers, large_document_ids) - def test_complete_workflow_normal_task(self, db_session_with_containers, patched_external_dependencies): + def test_complete_workflow_normal_task(self, db_session_with_containers: Session, patched_external_dependencies): """Run end-to-end normal queue workflow with tenant queue cleanup. Queue APIs are patched to isolate dispatch side effects while preserving DB assertions. @@ -618,7 +629,7 @@ class TestDatasetIndexingTaskIntegration: self._assert_documents_parsing(db_session_with_containers, document_ids) delete_key_spy.assert_called_once() - def test_complete_workflow_priority_task(self, db_session_with_containers, patched_external_dependencies): + def test_complete_workflow_priority_task(self, db_session_with_containers: Session, patched_external_dependencies): """Run end-to-end priority queue workflow with tenant queue cleanup. Queue APIs are patched to isolate dispatch side effects while preserving DB assertions. @@ -641,7 +652,7 @@ class TestDatasetIndexingTaskIntegration: self._assert_documents_parsing(db_session_with_containers, document_ids) delete_key_spy.assert_called_once() - def test_single_document_processing(self, db_session_with_containers, patched_external_dependencies): + def test_single_document_processing(self, db_session_with_containers: Session, patched_external_dependencies): """Process the minimum batch size (single document).""" # Arrange dataset, documents = self._create_test_dataset_and_documents(db_session_with_containers, document_count=1) @@ -655,7 +666,9 @@ class TestDatasetIndexingTaskIntegration: assert len(run_args) == 1 self._assert_documents_parsing(db_session_with_containers, [document_id]) - def test_document_with_special_characters_in_id(self, db_session_with_containers, patched_external_dependencies): + def test_document_with_special_characters_in_id( + self, db_session_with_containers: Session, patched_external_dependencies + ): """Handle standard UUID ids with hyphen characters safely.""" # Arrange special_document_id = str(uuid.uuid4()) @@ -670,7 +683,9 @@ class TestDatasetIndexingTaskIntegration: # Assert self._assert_documents_parsing(db_session_with_containers, [special_document_id]) - def test_zero_vector_space_limit_allows_unlimited(self, db_session_with_containers, patched_external_dependencies): + def test_zero_vector_space_limit_allows_unlimited( + self, db_session_with_containers: Session, patched_external_dependencies + ): """Treat vector limit 0 as unlimited and continue indexing.""" # Arrange dataset, documents = self._create_test_dataset_and_documents(db_session_with_containers, document_count=3) @@ -689,7 +704,7 @@ class TestDatasetIndexingTaskIntegration: self._assert_documents_parsing(db_session_with_containers, document_ids) def test_negative_vector_space_values_handled_gracefully( - self, db_session_with_containers, patched_external_dependencies + self, db_session_with_containers: Session, patched_external_dependencies ): """Treat negative vector limits as non-blocking and continue indexing.""" # Arrange @@ -708,7 +723,7 @@ class TestDatasetIndexingTaskIntegration: patched_external_dependencies["indexing_runner_instance"].run.assert_called_once() self._assert_documents_parsing(db_session_with_containers, document_ids) - def test_large_document_batch_processing(self, db_session_with_containers, patched_external_dependencies): + def test_large_document_batch_processing(self, db_session_with_containers: Session, patched_external_dependencies): """Process a batch exactly at configured upload limit. This test patches config only to force a deterministic limit branch while keeping SQL writes real. diff --git a/api/tests/test_containers_integration_tests/tasks/test_deal_dataset_vector_index_task.py b/api/tests/test_containers_integration_tests/tasks/test_deal_dataset_vector_index_task.py index 48fec441c5..e4cbb9e589 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_deal_dataset_vector_index_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_deal_dataset_vector_index_task.py @@ -12,6 +12,7 @@ from unittest.mock import ANY, Mock, patch import pytest from faker import Faker from sqlalchemy import select +from sqlalchemy.orm import Session from core.rag.index_processor.constant.index_type import IndexStructureType from models.dataset import Dataset, Document, DocumentSegment @@ -55,7 +56,7 @@ class TestDealDatasetVectorIndexTask: yield mock_factory @pytest.fixture - def account_and_tenant(self, db_session_with_containers, mock_external_service_dependencies): + def account_and_tenant(self, db_session_with_containers: Session, mock_external_service_dependencies): """Create an account with an owner tenant for testing. Returns a tuple of (account, tenant) where tenant is guaranteed to be non-None. @@ -73,7 +74,7 @@ class TestDealDatasetVectorIndexTask: return account, tenant def test_deal_dataset_vector_index_task_remove_action_success( - self, db_session_with_containers, mock_index_processor_factory, account_and_tenant + self, db_session_with_containers: Session, mock_index_processor_factory, account_and_tenant ): """ Test successful removal of dataset vector index. @@ -131,7 +132,7 @@ class TestDealDatasetVectorIndexTask: assert mock_processor.clean.call_count >= 0 # For now, just check it doesn't fail def test_deal_dataset_vector_index_task_add_action_success( - self, db_session_with_containers, mock_index_processor_factory, account_and_tenant + self, db_session_with_containers: Session, mock_index_processor_factory, account_and_tenant ): """ Test successful addition of dataset vector index. @@ -233,7 +234,7 @@ class TestDealDatasetVectorIndexTask: mock_processor.load.assert_called_once() def test_deal_dataset_vector_index_task_update_action_success( - self, db_session_with_containers, mock_index_processor_factory, account_and_tenant + self, db_session_with_containers: Session, mock_index_processor_factory, account_and_tenant ): """ Test successful update of dataset vector index. @@ -337,7 +338,7 @@ class TestDealDatasetVectorIndexTask: mock_processor.load.assert_called_once() def test_deal_dataset_vector_index_task_dataset_not_found_error( - self, db_session_with_containers, mock_index_processor_factory, account_and_tenant + self, db_session_with_containers: Session, mock_index_processor_factory, account_and_tenant ): """ Test task behavior when dataset is not found. @@ -357,7 +358,7 @@ class TestDealDatasetVectorIndexTask: mock_processor.load.assert_not_called() def test_deal_dataset_vector_index_task_add_action_no_documents( - self, db_session_with_containers, mock_index_processor_factory, account_and_tenant + self, db_session_with_containers: Session, mock_index_processor_factory, account_and_tenant ): """ Test add action when no documents exist for the dataset. @@ -389,7 +390,7 @@ class TestDealDatasetVectorIndexTask: mock_processor.load.assert_not_called() def test_deal_dataset_vector_index_task_add_action_no_segments( - self, db_session_with_containers, mock_index_processor_factory, account_and_tenant + self, db_session_with_containers: Session, mock_index_processor_factory, account_and_tenant ): """ Test add action when documents exist but have no segments. @@ -447,7 +448,7 @@ class TestDealDatasetVectorIndexTask: mock_processor.load.assert_not_called() def test_deal_dataset_vector_index_task_update_action_no_documents( - self, db_session_with_containers, mock_index_processor_factory, account_and_tenant + self, db_session_with_containers: Session, mock_index_processor_factory, account_and_tenant ): """ Test update action when no documents exist for the dataset. @@ -480,7 +481,7 @@ class TestDealDatasetVectorIndexTask: mock_processor.load.assert_not_called() def test_deal_dataset_vector_index_task_add_action_with_exception_handling( - self, db_session_with_containers, mock_index_processor_factory, account_and_tenant + self, db_session_with_containers: Session, mock_index_processor_factory, account_and_tenant ): """ Test add action with exception handling during processing. @@ -578,7 +579,7 @@ class TestDealDatasetVectorIndexTask: assert "Test exception during indexing" in updated_document.error def test_deal_dataset_vector_index_task_with_custom_index_type( - self, db_session_with_containers, mock_index_processor_factory, account_and_tenant + self, db_session_with_containers: Session, mock_index_processor_factory, account_and_tenant ): """ Test task behavior with custom index type (QA_INDEX). @@ -656,7 +657,7 @@ class TestDealDatasetVectorIndexTask: mock_processor.load.assert_called_once() def test_deal_dataset_vector_index_task_with_default_index_type( - self, db_session_with_containers, mock_index_processor_factory, account_and_tenant + self, db_session_with_containers: Session, mock_index_processor_factory, account_and_tenant ): """ Test task behavior with default index type (PARAGRAPH_INDEX). @@ -734,7 +735,7 @@ class TestDealDatasetVectorIndexTask: mock_processor.load.assert_called_once() def test_deal_dataset_vector_index_task_multiple_documents_processing( - self, db_session_with_containers, mock_index_processor_factory, account_and_tenant + self, db_session_with_containers: Session, mock_index_processor_factory, account_and_tenant ): """ Test task processing with multiple documents and segments. @@ -839,7 +840,7 @@ class TestDealDatasetVectorIndexTask: assert mock_processor.load.call_count == 3 def test_deal_dataset_vector_index_task_document_status_transitions( - self, db_session_with_containers, mock_index_processor_factory, account_and_tenant + self, db_session_with_containers: Session, mock_index_processor_factory, account_and_tenant ): """ Test document status transitions during task execution. @@ -938,7 +939,7 @@ class TestDealDatasetVectorIndexTask: assert updated_document.indexing_status == IndexingStatus.COMPLETED def test_deal_dataset_vector_index_task_with_disabled_documents( - self, db_session_with_containers, mock_index_processor_factory, account_and_tenant + self, db_session_with_containers: Session, mock_index_processor_factory, account_and_tenant ): """ Test task behavior with disabled documents. @@ -1061,7 +1062,7 @@ class TestDealDatasetVectorIndexTask: mock_processor.load.assert_called_once() def test_deal_dataset_vector_index_task_with_archived_documents( - self, db_session_with_containers, mock_index_processor_factory, account_and_tenant + self, db_session_with_containers: Session, mock_index_processor_factory, account_and_tenant ): """ Test task behavior with archived documents. @@ -1184,7 +1185,7 @@ class TestDealDatasetVectorIndexTask: mock_processor.load.assert_called_once() def test_deal_dataset_vector_index_task_with_incomplete_documents( - self, db_session_with_containers, mock_index_processor_factory, account_and_tenant + self, db_session_with_containers: Session, mock_index_processor_factory, account_and_tenant ): """ Test task behavior with documents that have incomplete indexing status. diff --git a/api/tests/test_containers_integration_tests/tasks/test_delete_segment_from_index_task.py b/api/tests/test_containers_integration_tests/tasks/test_delete_segment_from_index_task.py index 8a69707b38..f4a71040c1 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_delete_segment_from_index_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_delete_segment_from_index_task.py @@ -11,9 +11,19 @@ import logging from unittest.mock import MagicMock, patch from faker import Faker +from sqlalchemy.orm import Session from core.rag.index_processor.constant.index_type import IndexStructureType, IndexTechniqueType -from models import Account, Dataset, Document, DocumentSegment, Tenant +from models import ( + Account, + AccountStatus, + Dataset, + DatasetPermissionEnum, + Document, + DocumentSegment, + Tenant, + TenantStatus, +) from models.enums import DataSourceType, DocumentCreatedFrom, DocumentDocType, IndexingStatus, SegmentStatus from tasks.delete_segment_from_index_task import delete_segment_from_index_task @@ -37,7 +47,7 @@ class TestDeleteSegmentFromIndexTask: and realistic testing environment with actual database interactions. """ - def _create_test_tenant(self, db_session_with_containers, fake=None): + def _create_test_tenant(self, db_session_with_containers: Session, fake: Faker | None = None): """ Helper method to create a test tenant with realistic data. @@ -49,7 +59,7 @@ class TestDeleteSegmentFromIndexTask: Tenant: Created test tenant instance """ fake = fake or Faker() - tenant = Tenant(name=f"Test Tenant {fake.company()}", plan="basic", status="normal") + tenant = Tenant(name=f"Test Tenant {fake.company()}", plan="basic", status=TenantStatus.NORMAL) tenant.id = fake.uuid4() tenant.created_at = fake.date_time_this_year() tenant.updated_at = tenant.created_at @@ -58,7 +68,7 @@ class TestDeleteSegmentFromIndexTask: db_session_with_containers.commit() return tenant - def _create_test_account(self, db_session_with_containers, tenant, fake=None): + def _create_test_account(self, db_session_with_containers: Session, tenant, fake: Faker | None = None): """ Helper method to create a test account with realistic data. @@ -75,7 +85,7 @@ class TestDeleteSegmentFromIndexTask: name=fake.name(), email=fake.email(), avatar=fake.url(), - status="active", + status=AccountStatus.ACTIVE, interface_language="en-US", ) account.id = fake.uuid4() @@ -86,7 +96,9 @@ class TestDeleteSegmentFromIndexTask: db_session_with_containers.commit() return account - def _create_test_dataset(self, db_session_with_containers, tenant, account, fake=None): + def _create_test_dataset( + self, db_session_with_containers: Session, tenant: Tenant, account: Account, fake: Faker | None = None + ): """ Helper method to create a test dataset with realistic data. @@ -106,7 +118,7 @@ class TestDeleteSegmentFromIndexTask: dataset.name = f"Test Dataset {fake.word()}" dataset.description = fake.text(max_nb_chars=200) dataset.provider = "vendor" - dataset.permission = "only_me" + dataset.permission = DatasetPermissionEnum.ONLY_ME dataset.data_source_type = DataSourceType.UPLOAD_FILE dataset.indexing_technique = IndexTechniqueType.HIGH_QUALITY dataset.index_struct = '{"type": "paragraph"}' @@ -122,7 +134,7 @@ class TestDeleteSegmentFromIndexTask: db_session_with_containers.commit() return dataset - def _create_test_document(self, db_session_with_containers, dataset, account, fake=None, **kwargs): + def _create_test_document(self, db_session_with_containers: Session, dataset, account, fake=None, **kwargs): """ Helper method to create a test document with realistic data. @@ -172,7 +184,14 @@ class TestDeleteSegmentFromIndexTask: db_session_with_containers.commit() return document - def _create_test_document_segments(self, db_session_with_containers, document, account, count=3, fake=None): + def _create_test_document_segments( + self, + db_session_with_containers: Session, + document: Document, + account: Account, + count: int = 3, + fake: Faker | None = None, + ): """ Helper method to create test document segments with realistic data. @@ -218,7 +237,9 @@ class TestDeleteSegmentFromIndexTask: return segments @patch("tasks.delete_segment_from_index_task.IndexProcessorFactory", autospec=True) - def test_delete_segment_from_index_task_success(self, mock_index_processor_factory, db_session_with_containers): + def test_delete_segment_from_index_task_success( + self, mock_index_processor_factory, db_session_with_containers: Session + ): """ Test successful segment deletion from index with comprehensive verification. @@ -267,7 +288,7 @@ class TestDeleteSegmentFromIndexTask: assert call_args[1]["with_keywords"] is True assert call_args[1]["delete_child_chunks"] is True - def test_delete_segment_from_index_task_dataset_not_found(self, db_session_with_containers): + def test_delete_segment_from_index_task_dataset_not_found(self, db_session_with_containers: Session): """ Test task behavior when dataset is not found. @@ -288,7 +309,7 @@ class TestDeleteSegmentFromIndexTask: # Verify the task completed without exceptions assert result is None # Task should return None when dataset not found - def test_delete_segment_from_index_task_document_not_found(self, db_session_with_containers): + def test_delete_segment_from_index_task_document_not_found(self, db_session_with_containers: Session): """ Test task behavior when document is not found. @@ -314,7 +335,7 @@ class TestDeleteSegmentFromIndexTask: # Verify the task completed without exceptions assert result is None # Task should return None when document not found - def test_delete_segment_from_index_task_document_disabled(self, db_session_with_containers): + def test_delete_segment_from_index_task_document_disabled(self, db_session_with_containers: Session): """ Test task behavior when document is disabled. @@ -342,7 +363,7 @@ class TestDeleteSegmentFromIndexTask: # Verify the task completed without exceptions assert result is None # Task should return None when document is disabled - def test_delete_segment_from_index_task_document_archived(self, db_session_with_containers): + def test_delete_segment_from_index_task_document_archived(self, db_session_with_containers: Session): """ Test task behavior when document is archived. @@ -370,7 +391,7 @@ class TestDeleteSegmentFromIndexTask: # Verify the task completed without exceptions assert result is None # Task should return None when document is archived - def test_delete_segment_from_index_task_document_not_completed(self, db_session_with_containers): + def test_delete_segment_from_index_task_document_not_completed(self, db_session_with_containers: Session): """ Test task behavior when document indexing is not completed. diff --git a/api/tests/test_containers_integration_tests/tasks/test_disable_segments_from_index_task.py b/api/tests/test_containers_integration_tests/tasks/test_disable_segments_from_index_task.py index 6e03bd9351..6a95bfc425 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_disable_segments_from_index_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_disable_segments_from_index_task.py @@ -7,13 +7,14 @@ The task is responsible for removing document segments from the search index whe """ from unittest.mock import MagicMock, patch +from uuid import uuid4 from faker import Faker from sqlalchemy import select from sqlalchemy.orm import Session from core.rag.index_processor.constant.index_type import IndexStructureType, IndexTechniqueType -from models import Account, Dataset, DocumentSegment +from models import Account, AccountStatus, Dataset, DocumentSegment, TenantAccountRole, TenantStatus from models import Document as DatasetDocument from models.dataset import DatasetProcessRule from models.enums import DataSourceType, DocumentCreatedFrom, ProcessRuleMode, SegmentStatus @@ -35,7 +36,7 @@ class TestDisableSegmentsFromIndexTask: and realistic testing environment with actual database interactions. """ - def _create_test_account(self, db_session_with_containers: Session, fake=None): + def _create_test_account(self, db_session_with_containers: Session, fake: Faker | None = None): """ Helper method to create a test account with realistic data. @@ -51,24 +52,23 @@ class TestDisableSegmentsFromIndexTask: email=fake.email(), name=fake.name(), avatar=fake.url(), - status="active", + status=AccountStatus.ACTIVE, interface_language="en-US", ) - account.id = fake.uuid4() # monkey-patch attributes for test setup + account.updated_at = fake.date_time_this_year() + account.created_at = fake.date_time_this_year() + account.role = TenantAccountRole.OWNER + account.id = fake.uuid4() account.tenant_id = fake.uuid4() account.type = "normal" - account.role = "owner" - account.created_at = fake.date_time_this_year() - account.updated_at = account.created_at - # Create a tenant for the account from models.account import Tenant tenant = Tenant( name=f"Test Tenant {fake.company()}", plan="basic", - status="normal", + status=TenantStatus.NORMAL, ) tenant.id = account.tenant_id tenant.created_at = fake.date_time_this_year() @@ -83,7 +83,7 @@ class TestDisableSegmentsFromIndexTask: return account - def _create_test_dataset(self, db_session_with_containers: Session, account, fake=None): + def _create_test_dataset(self, db_session_with_containers: Session, account: Account, fake: Faker | None = None): """ Helper method to create a test dataset with realistic data. @@ -117,7 +117,9 @@ class TestDisableSegmentsFromIndexTask: return dataset - def _create_test_document(self, db_session_with_containers: Session, dataset, account, fake=None): + def _create_test_document( + self, db_session_with_containers: Session, dataset: Dataset, account: Account, fake: Faker | None = None + ): """ Helper method to create a test document with realistic data. @@ -163,7 +165,7 @@ class TestDisableSegmentsFromIndexTask: return document def _create_test_segments( - self, db_session_with_containers: Session, document, dataset, account, count=3, fake=None + self, db_session_with_containers: Session, document, dataset: Dataset, account: Account, count=3, fake=None ): """ Helper method to create test document segments with realistic data. @@ -216,7 +218,9 @@ class TestDisableSegmentsFromIndexTask: return segments - def _create_dataset_process_rule(self, db_session_with_containers: Session, dataset, fake=None): + def _create_dataset_process_rule( + self, db_session_with_containers: Session, dataset: Dataset, fake: Faker | None = None + ): """ Helper method to create a dataset process rule. @@ -229,21 +233,19 @@ class TestDisableSegmentsFromIndexTask: DatasetProcessRule: Created process rule instance """ fake = fake or Faker() - process_rule = DatasetProcessRule() - process_rule.id = fake.uuid4() - process_rule.tenant_id = dataset.tenant_id - process_rule.dataset_id = dataset.id - process_rule.mode = ProcessRuleMode.AUTOMATIC - process_rule.rules = ( - "{" - '"mode": "automatic", ' - '"rules": {' - '"pre_processing_rules": [], "segmentation": ' - '{"separator": "\\n\\n", "max_tokens": 1000, "chunk_overlap": 50}}' - "}" + process_rule = DatasetProcessRule( + dataset_id=dataset.id, + mode=ProcessRuleMode.AUTOMATIC, + rules=( + "{" + '"mode": "automatic", ' + '"rules": {' + '"pre_processing_rules": [], "segmentation": ' + '{"separator": "\\n\\n", "max_tokens": 1000, "chunk_overlap": 50}}' + "}" + ), + created_by=str(uuid4()), ) - process_rule.created_by = dataset.created_by - process_rule.updated_by = dataset.updated_by db_session_with_containers.add(process_rule) db_session_with_containers.commit() diff --git a/api/tests/test_containers_integration_tests/tasks/test_document_indexing_sync_task.py b/api/tests/test_containers_integration_tests/tasks/test_document_indexing_sync_task.py index b6e7e6e5c9..77cd259833 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_document_indexing_sync_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_document_indexing_sync_task.py @@ -13,6 +13,7 @@ from uuid import uuid4 import pytest from sqlalchemy import delete, func, select, update +from sqlalchemy.orm import Session from core.indexing_runner import DocumentIsPausedError, IndexingRunner from core.rag.index_processor.constant.index_type import IndexStructureType, IndexTechniqueType @@ -162,7 +163,7 @@ class TestDocumentIndexingSyncTask: "indexing_runner": indexing_runner, } - def _create_notion_sync_context(self, db_session_with_containers, *, data_source_info: dict | None = None): + def _create_notion_sync_context(self, db_session_with_containers: Session, *, data_source_info: dict | None = None): account, tenant = DocumentIndexingSyncTaskTestDataFactory.create_account_with_tenant(db_session_with_containers) dataset = DocumentIndexingSyncTaskTestDataFactory.create_dataset( db_session_with_containers, @@ -206,7 +207,7 @@ class TestDocumentIndexingSyncTask: "notion_info": notion_info, } - def test_document_not_found(self, db_session_with_containers, mock_external_dependencies): + def test_document_not_found(self, db_session_with_containers: Session, mock_external_dependencies): """Test that task handles missing document gracefully.""" # Arrange dataset_id = str(uuid4()) @@ -219,7 +220,7 @@ class TestDocumentIndexingSyncTask: mock_external_dependencies["datasource_service"].get_datasource_credentials.assert_not_called() mock_external_dependencies["indexing_runner"].run.assert_not_called() - def test_missing_notion_workspace_id(self, db_session_with_containers, mock_external_dependencies): + def test_missing_notion_workspace_id(self, db_session_with_containers: Session, mock_external_dependencies): """Test that task raises error when notion_workspace_id is missing.""" # Arrange context = self._create_notion_sync_context( @@ -235,7 +236,7 @@ class TestDocumentIndexingSyncTask: with pytest.raises(ValueError, match="no notion page found"): document_indexing_sync_task(context["dataset"].id, context["document"].id) - def test_missing_notion_page_id(self, db_session_with_containers, mock_external_dependencies): + def test_missing_notion_page_id(self, db_session_with_containers: Session, mock_external_dependencies): """Test that task raises error when notion_page_id is missing.""" # Arrange context = self._create_notion_sync_context( @@ -251,7 +252,7 @@ class TestDocumentIndexingSyncTask: with pytest.raises(ValueError, match="no notion page found"): document_indexing_sync_task(context["dataset"].id, context["document"].id) - def test_empty_data_source_info(self, db_session_with_containers, mock_external_dependencies): + def test_empty_data_source_info(self, db_session_with_containers: Session, mock_external_dependencies): """Test that task raises error when data_source_info is empty.""" # Arrange context = self._create_notion_sync_context(db_session_with_containers, data_source_info=None) @@ -264,7 +265,7 @@ class TestDocumentIndexingSyncTask: with pytest.raises(ValueError, match="no notion page found"): document_indexing_sync_task(context["dataset"].id, context["document"].id) - def test_credential_not_found(self, db_session_with_containers, mock_external_dependencies): + def test_credential_not_found(self, db_session_with_containers: Session, mock_external_dependencies): """Test that task sets document error state when credential is missing.""" # Arrange context = self._create_notion_sync_context(db_session_with_containers) @@ -284,7 +285,7 @@ class TestDocumentIndexingSyncTask: assert updated_document.stopped_at is not None mock_external_dependencies["indexing_runner"].run.assert_not_called() - def test_page_not_updated(self, db_session_with_containers, mock_external_dependencies): + def test_page_not_updated(self, db_session_with_containers: Session, mock_external_dependencies): """Test that task exits early when notion page is unchanged.""" # Arrange context = self._create_notion_sync_context(db_session_with_containers) @@ -310,7 +311,7 @@ class TestDocumentIndexingSyncTask: mock_external_dependencies["index_processor"].clean.assert_not_called() mock_external_dependencies["indexing_runner"].run.assert_not_called() - def test_successful_sync_when_page_updated(self, db_session_with_containers, mock_external_dependencies): + def test_successful_sync_when_page_updated(self, db_session_with_containers: Session, mock_external_dependencies): """Test full successful sync flow with SQL state updates and side effects.""" # Arrange context = self._create_notion_sync_context(db_session_with_containers) @@ -349,7 +350,7 @@ class TestDocumentIndexingSyncTask: assert len(run_documents) == 1 assert getattr(run_documents[0], "id", None) == context["document"].id - def test_dataset_not_found_during_cleaning(self, db_session_with_containers, mock_external_dependencies): + def test_dataset_not_found_during_cleaning(self, db_session_with_containers: Session, mock_external_dependencies): """Test that task still updates document and reindexes if dataset vanishes before clean.""" # Arrange context = self._create_notion_sync_context(db_session_with_containers) @@ -376,7 +377,9 @@ class TestDocumentIndexingSyncTask: mock_external_dependencies["index_processor"].clean.assert_not_called() mock_external_dependencies["indexing_runner"].run.assert_called_once() - def test_cleaning_error_continues_to_indexing(self, db_session_with_containers, mock_external_dependencies): + def test_cleaning_error_continues_to_indexing( + self, db_session_with_containers: Session, mock_external_dependencies + ): """Test that indexing continues when index cleanup fails.""" # Arrange context = self._create_notion_sync_context(db_session_with_containers) @@ -400,7 +403,9 @@ class TestDocumentIndexingSyncTask: assert remaining_segments == 0 mock_external_dependencies["indexing_runner"].run.assert_called_once() - def test_indexing_runner_document_paused_error(self, db_session_with_containers, mock_external_dependencies): + def test_indexing_runner_document_paused_error( + self, db_session_with_containers: Session, mock_external_dependencies + ): """Test that DocumentIsPausedError does not flip document into error state.""" # Arrange context = self._create_notion_sync_context(db_session_with_containers) @@ -418,7 +423,7 @@ class TestDocumentIndexingSyncTask: assert updated_document.indexing_status == IndexingStatus.PARSING assert updated_document.error is None - def test_indexing_runner_general_error(self, db_session_with_containers, mock_external_dependencies): + def test_indexing_runner_general_error(self, db_session_with_containers: Session, mock_external_dependencies): """Test that indexing errors are persisted to document state.""" # Arrange context = self._create_notion_sync_context(db_session_with_containers) diff --git a/api/tests/test_containers_integration_tests/tasks/test_document_indexing_task.py b/api/tests/test_containers_integration_tests/tasks/test_document_indexing_task.py index cf1a8666f3..6c1454b6d8 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_document_indexing_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_document_indexing_task.py @@ -3,11 +3,12 @@ from unittest.mock import MagicMock, patch import pytest from faker import Faker +from sqlalchemy.orm import Session from core.entities.document_task import DocumentTask from core.rag.index_processor.constant.index_type import IndexTechniqueType from enums.cloud_plan import CloudPlan -from models import Account, Tenant, TenantAccountJoin, TenantAccountRole +from models import Account, AccountStatus, Tenant, TenantAccountJoin, TenantAccountRole, TenantStatus from models.dataset import Dataset, Document from models.enums import DataSourceType, DocumentCreatedFrom, IndexingStatus from tasks.document_indexing_task import ( @@ -51,7 +52,7 @@ class TestDocumentIndexingTasks: } def _create_test_dataset_and_documents( - self, db_session_with_containers, mock_external_service_dependencies, document_count=3 + self, db_session_with_containers: Session, mock_external_service_dependencies, document_count=3 ): """ Helper method to create a test dataset and documents for testing. @@ -71,14 +72,14 @@ class TestDocumentIndexingTasks: email=fake.email(), name=fake.name(), interface_language="en-US", - status="active", + status=AccountStatus.ACTIVE, ) db_session_with_containers.add(account) db_session_with_containers.commit() tenant = Tenant( name=fake.company(), - status="normal", + status=TenantStatus.NORMAL, ) db_session_with_containers.add(tenant) db_session_with_containers.commit() @@ -133,7 +134,7 @@ class TestDocumentIndexingTasks: return dataset, documents def _create_test_dataset_with_billing_features( - self, db_session_with_containers, mock_external_service_dependencies, billing_enabled=True + self, db_session_with_containers: Session, mock_external_service_dependencies, billing_enabled=True ): """ Helper method to create a test dataset with billing features configured. @@ -153,14 +154,14 @@ class TestDocumentIndexingTasks: email=fake.email(), name=fake.name(), interface_language="en-US", - status="active", + status=AccountStatus.ACTIVE, ) db_session_with_containers.add(account) db_session_with_containers.commit() tenant = Tenant( name=fake.company(), - status="normal", + status=TenantStatus.NORMAL, ) db_session_with_containers.add(tenant) db_session_with_containers.commit() @@ -221,7 +222,9 @@ class TestDocumentIndexingTasks: return dataset, documents - def test_document_indexing_task_success(self, db_session_with_containers, mock_external_service_dependencies): + def test_document_indexing_task_success( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test successful document indexing with multiple documents. @@ -262,7 +265,7 @@ class TestDocumentIndexingTasks: assert len(processed_documents) == 3 def test_document_indexing_task_dataset_not_found( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test handling of non-existent dataset. @@ -286,7 +289,7 @@ class TestDocumentIndexingTasks: mock_external_service_dependencies["indexing_runner_instance"].run.assert_not_called() def test_document_indexing_task_document_not_found_in_dataset( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test handling when some documents don't exist in the dataset. @@ -332,7 +335,7 @@ class TestDocumentIndexingTasks: assert len(processed_documents) == 2 # Only existing documents def test_document_indexing_task_indexing_runner_exception( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test handling of IndexingRunner exceptions. @@ -373,7 +376,7 @@ class TestDocumentIndexingTasks: assert updated_document.processing_started_at is not None def test_document_indexing_task_mixed_document_states( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test processing documents with mixed initial states. @@ -456,7 +459,7 @@ class TestDocumentIndexingTasks: assert len(processed_documents) == 4 def test_document_indexing_task_billing_sandbox_plan_batch_limit( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test billing validation for sandbox plan batch upload limit. @@ -518,7 +521,7 @@ class TestDocumentIndexingTasks: mock_external_service_dependencies["indexing_runner"].assert_not_called() def test_document_indexing_task_billing_disabled_success( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test successful processing when billing is disabled. @@ -554,7 +557,7 @@ class TestDocumentIndexingTasks: assert updated_document.processing_started_at is not None def test_document_indexing_task_document_is_paused_error( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test handling of DocumentIsPausedError from IndexingRunner. @@ -597,7 +600,9 @@ class TestDocumentIndexingTasks: assert updated_document.processing_started_at is not None # ==================== NEW TESTS FOR REFACTORED FUNCTIONS ==================== - def test_old_document_indexing_task_success(self, db_session_with_containers, mock_external_service_dependencies): + def test_old_document_indexing_task_success( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test document_indexing_task basic functionality. @@ -619,7 +624,7 @@ class TestDocumentIndexingTasks: mock_external_service_dependencies["indexing_runner_instance"].run.assert_called_once() def test_normal_document_indexing_task_success( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test normal_document_indexing_task basic functionality. @@ -643,7 +648,7 @@ class TestDocumentIndexingTasks: mock_external_service_dependencies["indexing_runner_instance"].run.assert_called_once() def test_priority_document_indexing_task_success( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test priority_document_indexing_task basic functionality. @@ -667,7 +672,7 @@ class TestDocumentIndexingTasks: mock_external_service_dependencies["indexing_runner_instance"].run.assert_called_once() def test_document_indexing_with_tenant_queue_success( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test _document_indexing_with_tenant_queue function with no waiting tasks. @@ -717,7 +722,7 @@ class TestDocumentIndexingTasks: mock_task_func.delay.assert_not_called() def test_document_indexing_with_tenant_queue_with_waiting_tasks( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test _document_indexing_with_tenant_queue function with waiting tasks in queue using real Redis. @@ -776,7 +781,7 @@ class TestDocumentIndexingTasks: assert len(remaining_tasks) == 1 def test_document_indexing_with_tenant_queue_error_handling( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test error handling in _document_indexing_with_tenant_queue using real Redis. @@ -848,7 +853,7 @@ class TestDocumentIndexingTasks: assert len(remaining_tasks) == 0 def test_document_indexing_with_tenant_queue_tenant_isolation( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test tenant isolation in _document_indexing_with_tenant_queue using real Redis. diff --git a/api/tests/test_containers_integration_tests/tasks/test_document_indexing_update_task.py b/api/tests/test_containers_integration_tests/tasks/test_document_indexing_update_task.py index a9a8c0f30c..208fc1aa1d 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_document_indexing_update_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_document_indexing_update_task.py @@ -3,9 +3,10 @@ from unittest.mock import MagicMock, patch import pytest from faker import Faker from sqlalchemy import func, select +from sqlalchemy.orm import Session from core.rag.index_processor.constant.index_type import IndexStructureType, IndexTechniqueType -from models import Account, Tenant, TenantAccountJoin, TenantAccountRole +from models import Account, AccountStatus, Tenant, TenantAccountJoin, TenantAccountRole, TenantStatus from models.dataset import Dataset, Document, DocumentSegment from models.enums import DataSourceType, DocumentCreatedFrom, IndexingStatus, SegmentStatus from tasks.document_indexing_update_task import document_indexing_update_task @@ -33,7 +34,7 @@ class TestDocumentIndexingUpdateTask: "runner_instance": runner_instance, } - def _create_dataset_document_with_segments(self, db_session_with_containers, *, segment_count: int = 2): + def _create_dataset_document_with_segments(self, db_session_with_containers: Session, *, segment_count: int = 2): fake = Faker() # Account and tenant @@ -41,12 +42,12 @@ class TestDocumentIndexingUpdateTask: email=fake.email(), name=fake.name(), interface_language="en-US", - status="active", + status=AccountStatus.ACTIVE, ) db_session_with_containers.add(account) db_session_with_containers.commit() - tenant = Tenant(name=fake.company(), status="normal") + tenant = Tenant(name=fake.company(), status=TenantStatus.NORMAL) db_session_with_containers.add(tenant) db_session_with_containers.commit() @@ -114,7 +115,7 @@ class TestDocumentIndexingUpdateTask: return dataset, document, node_ids - def test_cleans_segments_and_reindexes(self, db_session_with_containers, mock_external_dependencies): + def test_cleans_segments_and_reindexes(self, db_session_with_containers: Session, mock_external_dependencies): dataset, document, node_ids = self._create_dataset_document_with_segments(db_session_with_containers) # Act @@ -153,7 +154,9 @@ class TestDocumentIndexingUpdateTask: first = run_docs[0] assert getattr(first, "id", None) == document.id - def test_clean_error_is_logged_and_indexing_continues(self, db_session_with_containers, mock_external_dependencies): + def test_clean_error_is_logged_and_indexing_continues( + self, db_session_with_containers: Session, mock_external_dependencies + ): dataset, document, node_ids = self._create_dataset_document_with_segments(db_session_with_containers) # Force clean to raise; task should continue to indexing @@ -173,7 +176,7 @@ class TestDocumentIndexingUpdateTask: ) assert remaining > 0 - def test_document_not_found_noop(self, db_session_with_containers, mock_external_dependencies): + def test_document_not_found_noop(self, db_session_with_containers: Session, mock_external_dependencies): fake = Faker() # Act with non-existent document id document_indexing_update_task(dataset_id=fake.uuid4(), document_id=fake.uuid4()) diff --git a/api/tests/test_containers_integration_tests/tasks/test_duplicate_document_indexing_task.py b/api/tests/test_containers_integration_tests/tasks/test_duplicate_document_indexing_task.py index 39c58987fd..12440f3e6b 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_duplicate_document_indexing_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_duplicate_document_indexing_task.py @@ -3,6 +3,7 @@ from unittest.mock import MagicMock, patch import pytest from faker import Faker from sqlalchemy import select +from sqlalchemy.orm import Session from core.indexing_runner import DocumentIsPausedError from core.rag.index_processor.constant.index_type import IndexStructureType, IndexTechniqueType @@ -62,7 +63,7 @@ class TestDuplicateDocumentIndexingTasks: } def _create_test_dataset_and_documents( - self, db_session_with_containers, mock_external_service_dependencies, document_count=3 + self, db_session_with_containers: Session, mock_external_service_dependencies, document_count=3 ): """ Helper method to create a test dataset and documents for testing. @@ -145,7 +146,11 @@ class TestDuplicateDocumentIndexingTasks: return dataset, documents def _create_test_dataset_with_segments( - self, db_session_with_containers, mock_external_service_dependencies, document_count=3, segments_per_doc=2 + self, + db_session_with_containers: Session, + mock_external_service_dependencies, + document_count=3, + segments_per_doc=2, ): """ Helper method to create a test dataset with documents and segments. @@ -197,7 +202,7 @@ class TestDuplicateDocumentIndexingTasks: return dataset, documents, segments def _create_test_dataset_with_billing_features( - self, db_session_with_containers, mock_external_service_dependencies, billing_enabled=True + self, db_session_with_containers: Session, mock_external_service_dependencies, billing_enabled=True ): """ Helper method to create a test dataset with billing features configured. @@ -287,7 +292,7 @@ class TestDuplicateDocumentIndexingTasks: return dataset, documents def _test_duplicate_document_indexing_task_success( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test successful duplicate document indexing with multiple documents. @@ -329,7 +334,7 @@ class TestDuplicateDocumentIndexingTasks: assert len(processed_documents) == 3 def _test_duplicate_document_indexing_task_with_segment_cleanup( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test duplicate document indexing with existing segments that need cleanup. @@ -379,7 +384,7 @@ class TestDuplicateDocumentIndexingTasks: mock_external_service_dependencies["indexing_runner_instance"].run.assert_called_once() def _test_duplicate_document_indexing_task_dataset_not_found( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test handling of non-existent dataset. @@ -404,7 +409,7 @@ class TestDuplicateDocumentIndexingTasks: mock_external_service_dependencies["index_processor"].clean.assert_not_called() def test_duplicate_document_indexing_task_document_not_found_in_dataset( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test handling when some documents don't exist in the dataset. @@ -450,7 +455,7 @@ class TestDuplicateDocumentIndexingTasks: assert len(processed_documents) == 2 # Only existing documents def _test_duplicate_document_indexing_task_indexing_runner_exception( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test handling of IndexingRunner exceptions. @@ -491,7 +496,7 @@ class TestDuplicateDocumentIndexingTasks: assert updated_document.processing_started_at is not None def _test_duplicate_document_indexing_task_billing_sandbox_plan_batch_limit( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test billing validation for sandbox plan batch upload limit. @@ -554,7 +559,7 @@ class TestDuplicateDocumentIndexingTasks: mock_external_service_dependencies["indexing_runner_instance"].run.assert_not_called() def _test_duplicate_document_indexing_task_billing_vector_space_limit_exceeded( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test billing validation for vector space limit. @@ -596,7 +601,7 @@ class TestDuplicateDocumentIndexingTasks: mock_external_service_dependencies["indexing_runner_instance"].run.assert_not_called() def test_duplicate_document_indexing_task_with_empty_document_list( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test handling of empty document list. @@ -622,7 +627,7 @@ class TestDuplicateDocumentIndexingTasks: mock_external_service_dependencies["indexing_runner_instance"].run.assert_called_once_with([]) def test_deprecated_duplicate_document_indexing_task_delegates_to_core( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test that deprecated duplicate_document_indexing_task delegates to core function. @@ -655,7 +660,7 @@ class TestDuplicateDocumentIndexingTasks: @patch("tasks.duplicate_document_indexing_task.TenantIsolatedTaskQueue", autospec=True) def test_normal_duplicate_document_indexing_task_with_tenant_queue( - self, mock_queue_class, db_session_with_containers, mock_external_service_dependencies + self, mock_queue_class, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test normal_duplicate_document_indexing_task with tenant isolation queue. @@ -698,7 +703,7 @@ class TestDuplicateDocumentIndexingTasks: @patch("tasks.duplicate_document_indexing_task.TenantIsolatedTaskQueue", autospec=True) def test_priority_duplicate_document_indexing_task_with_tenant_queue( - self, mock_queue_class, db_session_with_containers, mock_external_service_dependencies + self, mock_queue_class, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test priority_duplicate_document_indexing_task with tenant isolation queue. @@ -742,7 +747,7 @@ class TestDuplicateDocumentIndexingTasks: @patch("tasks.duplicate_document_indexing_task.TenantIsolatedTaskQueue", autospec=True) def test_tenant_queue_wrapper_processes_next_tasks( - self, mock_queue_class, db_session_with_containers, mock_external_service_dependencies + self, mock_queue_class, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test tenant queue wrapper processes next queued tasks. @@ -789,7 +794,7 @@ class TestDuplicateDocumentIndexingTasks: mock_queue.delete_task_key.assert_not_called() def test_successful_duplicate_document_indexing( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """Test successful duplicate document indexing flow.""" self._test_duplicate_document_indexing_task_success( @@ -797,7 +802,7 @@ class TestDuplicateDocumentIndexingTasks: ) def test_duplicate_document_indexing_dataset_not_found( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """Test duplicate document indexing when dataset is not found.""" self._test_duplicate_document_indexing_task_dataset_not_found( @@ -805,7 +810,7 @@ class TestDuplicateDocumentIndexingTasks: ) def test_duplicate_document_indexing_with_billing_enabled_sandbox_plan( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """Test duplicate document indexing with billing enabled and sandbox plan.""" self._test_duplicate_document_indexing_task_billing_sandbox_plan_batch_limit( @@ -813,7 +818,7 @@ class TestDuplicateDocumentIndexingTasks: ) def test_duplicate_document_indexing_with_billing_limit_exceeded( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """Test duplicate document indexing when billing limit is exceeded.""" self._test_duplicate_document_indexing_task_billing_vector_space_limit_exceeded( @@ -821,7 +826,7 @@ class TestDuplicateDocumentIndexingTasks: ) def test_duplicate_document_indexing_runner_error( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """Test duplicate document indexing when IndexingRunner raises an error.""" self._test_duplicate_document_indexing_task_indexing_runner_exception( @@ -829,7 +834,7 @@ class TestDuplicateDocumentIndexingTasks: ) def _test_duplicate_document_indexing_task_document_is_paused( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """Test duplicate document indexing when document is paused.""" # Arrange @@ -860,7 +865,7 @@ class TestDuplicateDocumentIndexingTasks: mock_external_service_dependencies["indexing_runner_instance"].run.assert_called_once() def test_duplicate_document_indexing_document_is_paused( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """Test duplicate document indexing when document is paused.""" self._test_duplicate_document_indexing_task_document_is_paused( @@ -868,7 +873,7 @@ class TestDuplicateDocumentIndexingTasks: ) def test_duplicate_document_indexing_cleans_old_segments( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """Test that duplicate document indexing cleans old segments.""" self._test_duplicate_document_indexing_task_with_segment_cleanup( diff --git a/api/tests/test_containers_integration_tests/tasks/test_mail_account_deletion_task.py b/api/tests/test_containers_integration_tests/tasks/test_mail_account_deletion_task.py index ff72232d12..c4895839c9 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_mail_account_deletion_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_mail_account_deletion_task.py @@ -5,6 +5,7 @@ from faker import Faker from sqlalchemy.orm import Session from libs.email_i18n import EmailType +from models import TenantStatus from models.account import Account, Tenant, TenantAccountJoin, TenantAccountRole from tasks.mail_account_deletion_task import send_account_deletion_verification_code, send_deletion_success_task @@ -55,7 +56,7 @@ class TestMailAccountDeletionTask: # Create tenant tenant = Tenant( name=fake.company(), - status="normal", + status=TenantStatus.NORMAL, ) db_session_with_containers.add(tenant) db_session_with_containers.commit() diff --git a/api/tests/test_containers_integration_tests/tasks/test_mail_change_mail_task.py b/api/tests/test_containers_integration_tests/tasks/test_mail_change_mail_task.py index 177af266fb..a697878bb6 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_mail_change_mail_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_mail_change_mail_task.py @@ -2,6 +2,7 @@ from unittest.mock import patch import pytest from faker import Faker +from sqlalchemy.orm import Session from libs.email_i18n import EmailType from models.account import Account, Tenant, TenantAccountJoin, TenantAccountRole @@ -29,7 +30,7 @@ class TestMailChangeMailTask: "get_email_i18n_service": mock_get_email_i18n_service, } - def _create_test_account(self, db_session_with_containers): + def _create_test_account(self, db_session_with_containers: Session): """ Helper method to create a test account for testing. @@ -72,7 +73,7 @@ class TestMailChangeMailTask: return account def test_send_change_mail_task_success_old_email_phase( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test successful change email task execution for old_email phase. @@ -103,7 +104,7 @@ class TestMailChangeMailTask: ) def test_send_change_mail_task_success_new_email_phase( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test successful change email task execution for new_email phase. @@ -134,7 +135,7 @@ class TestMailChangeMailTask: ) def test_send_change_mail_task_mail_not_initialized( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test change email task when mail service is not initialized. @@ -159,7 +160,7 @@ class TestMailChangeMailTask: mock_external_service_dependencies["email_i18n_service"].send_change_email.assert_not_called() def test_send_change_mail_task_email_service_exception( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test change email task when email service raises an exception. @@ -191,7 +192,7 @@ class TestMailChangeMailTask: ) def test_send_change_mail_completed_notification_task_success( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test successful change email completed notification task execution. @@ -224,7 +225,7 @@ class TestMailChangeMailTask: ) def test_send_change_mail_completed_notification_task_mail_not_initialized( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test change email completed notification task when mail service is not initialized. @@ -247,7 +248,7 @@ class TestMailChangeMailTask: mock_external_service_dependencies["email_i18n_service"].send_email.assert_not_called() def test_send_change_mail_completed_notification_task_email_service_exception( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test change email completed notification task when email service raises an exception. diff --git a/api/tests/test_containers_integration_tests/tasks/test_mail_email_code_login_task.py b/api/tests/test_containers_integration_tests/tasks/test_mail_email_code_login_task.py index 8343711998..0eec166fe2 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_mail_email_code_login_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_mail_email_code_login_task.py @@ -15,8 +15,10 @@ from unittest.mock import MagicMock, patch import pytest from faker import Faker from sqlalchemy import delete +from sqlalchemy.orm import Session from libs.email_i18n import EmailType +from models import AccountStatus, TenantStatus from models.account import Account, Tenant, TenantAccountJoin, TenantAccountRole from tasks.mail_email_code_login import send_email_code_login_mail_task @@ -37,7 +39,7 @@ class TestSendEmailCodeLoginMailTask: """ @pytest.fixture(autouse=True) - def cleanup_database(self, db_session_with_containers): + def cleanup_database(self, db_session_with_containers: Session): """Clean up database before each test to ensure isolation.""" from extensions.ext_redis import redis_client @@ -71,7 +73,7 @@ class TestSendEmailCodeLoginMailTask: "email_service_instance": mock_email_service_instance, } - def _create_test_account(self, db_session_with_containers, fake=None): + def _create_test_account(self, db_session_with_containers: Session, fake: Faker | None = None): """ Helper method to create a test account for testing. @@ -90,7 +92,7 @@ class TestSendEmailCodeLoginMailTask: email=fake.email(), name=fake.name(), interface_language="en-US", - status="active", + status=AccountStatus.ACTIVE, ) db_session_with_containers.add(account) @@ -98,7 +100,7 @@ class TestSendEmailCodeLoginMailTask: return account - def _create_test_tenant_and_account(self, db_session_with_containers, fake=None): + def _create_test_tenant_and_account(self, db_session_with_containers: Session, fake: Faker | None = None): """ Helper method to create a test tenant and account for testing. @@ -119,7 +121,7 @@ class TestSendEmailCodeLoginMailTask: tenant = Tenant( name=fake.company(), plan="basic", - status="normal", + status=TenantStatus.NORMAL, ) db_session_with_containers.add(tenant) @@ -138,7 +140,7 @@ class TestSendEmailCodeLoginMailTask: return account, tenant def test_send_email_code_login_mail_task_success_english( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test successful email code login mail sending in English. @@ -182,7 +184,7 @@ class TestSendEmailCodeLoginMailTask: ) def test_send_email_code_login_mail_task_success_chinese( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test successful email code login mail sending in Chinese. @@ -221,7 +223,7 @@ class TestSendEmailCodeLoginMailTask: ) def test_send_email_code_login_mail_task_success_multiple_languages( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test successful email code login mail sending with multiple languages. @@ -261,7 +263,7 @@ class TestSendEmailCodeLoginMailTask: assert call_args[1]["template_context"]["code"] == test_codes[i] def test_send_email_code_login_mail_task_mail_not_initialized( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test email code login mail task when mail service is not initialized. @@ -299,7 +301,7 @@ class TestSendEmailCodeLoginMailTask: mock_email_service_instance.send_email.assert_not_called() def test_send_email_code_login_mail_task_email_service_exception( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test email code login mail task when email service raises an exception. @@ -346,7 +348,7 @@ class TestSendEmailCodeLoginMailTask: ) def test_send_email_code_login_mail_task_invalid_parameters( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test email code login mail task with invalid parameters. @@ -388,7 +390,7 @@ class TestSendEmailCodeLoginMailTask: mock_email_service_instance.send_email.assert_called_once() def test_send_email_code_login_mail_task_edge_cases( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test email code login mail task with edge cases and boundary conditions. @@ -451,7 +453,7 @@ class TestSendEmailCodeLoginMailTask: ) def test_send_email_code_login_mail_task_database_integration( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test email code login mail task with database integration. @@ -497,7 +499,7 @@ class TestSendEmailCodeLoginMailTask: assert account.status == "active" def test_send_email_code_login_mail_task_redis_integration( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test email code login mail task with Redis integration. @@ -541,7 +543,7 @@ class TestSendEmailCodeLoginMailTask: redis_client.delete(cache_key) def test_send_email_code_login_mail_task_error_handling_comprehensive( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test comprehensive error handling for email code login mail task. diff --git a/api/tests/test_containers_integration_tests/tasks/test_mail_human_input_delivery_task.py b/api/tests/test_containers_integration_tests/tasks/test_mail_human_input_delivery_task.py index 95a867dbb5..a452bee9f8 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_mail_human_input_delivery_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_mail_human_input_delivery_task.py @@ -4,6 +4,7 @@ from unittest.mock import patch import pytest from sqlalchemy import delete +from sqlalchemy.orm import Session from configs import dify_config from core.app.app_config.entities import WorkflowUIBasedAppConfig @@ -30,7 +31,7 @@ from tasks.mail_human_input_delivery_task import dispatch_human_input_email_task @pytest.fixture(autouse=True) -def cleanup_database(db_session_with_containers): +def cleanup_database(db_session_with_containers: Session): db_session_with_containers.execute(delete(HumanInputFormRecipient)) db_session_with_containers.execute(delete(HumanInputDelivery)) db_session_with_containers.execute(delete(HumanInputForm)) @@ -42,7 +43,7 @@ def cleanup_database(db_session_with_containers): db_session_with_containers.commit() -def _create_workspace_member(db_session_with_containers): +def _create_workspace_member(db_session_with_containers: Session): account = Account( email="owner@example.com", name="Owner", @@ -172,7 +173,9 @@ def _create_workflow_pause_state( db_session_with_containers.commit() -def test_dispatch_human_input_email_task_integration(monkeypatch: pytest.MonkeyPatch, db_session_with_containers): +def test_dispatch_human_input_email_task_integration( + monkeypatch: pytest.MonkeyPatch, db_session_with_containers: Session +): tenant, account = _create_workspace_member(db_session_with_containers) workflow_run_id = str(uuid.uuid4()) workflow_id = str(uuid.uuid4()) diff --git a/api/tests/test_containers_integration_tests/tasks/test_mail_inner_task.py b/api/tests/test_containers_integration_tests/tasks/test_mail_inner_task.py index 1a20b6deec..f8e54ea9e6 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_mail_inner_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_mail_inner_task.py @@ -2,6 +2,7 @@ from unittest.mock import patch import pytest from faker import Faker +from sqlalchemy.orm import Session from tasks.mail_inner_task import send_inner_email_task @@ -51,7 +52,7 @@ class TestMailInnerTask: }, } - def test_send_inner_email_success(self, db_session_with_containers, mock_external_service_dependencies): + def test_send_inner_email_success(self, db_session_with_containers: Session, mock_external_service_dependencies): """ Test successful email sending with valid data. @@ -90,7 +91,9 @@ class TestMailInnerTask: html_content="Test email content", ) - def test_send_inner_email_single_recipient(self, db_session_with_containers, mock_external_service_dependencies): + def test_send_inner_email_single_recipient( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test email sending with single recipient. @@ -126,7 +129,9 @@ class TestMailInnerTask: html_content="Test email content", ) - def test_send_inner_email_empty_substitutions(self, db_session_with_containers, mock_external_service_dependencies): + def test_send_inner_email_empty_substitutions( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test email sending with empty substitutions. @@ -163,7 +168,7 @@ class TestMailInnerTask: ) def test_send_inner_email_mail_not_initialized( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test email sending when mail service is not initialized. @@ -193,7 +198,7 @@ class TestMailInnerTask: mock_external_service_dependencies["email_service"].send_raw_email.assert_not_called() def test_send_inner_email_template_rendering_error( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test email sending when template rendering fails. @@ -222,7 +227,9 @@ class TestMailInnerTask: # Verify no email service calls due to exception mock_external_service_dependencies["email_service"].send_raw_email.assert_not_called() - def test_send_inner_email_service_error(self, db_session_with_containers, mock_external_service_dependencies): + def test_send_inner_email_service_error( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test email sending when email service fails. diff --git a/api/tests/test_containers_integration_tests/tasks/test_mail_invite_member_task.py b/api/tests/test_containers_integration_tests/tasks/test_mail_invite_member_task.py index d34828c4b1..c8c7a4d961 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_mail_invite_member_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_mail_invite_member_task.py @@ -18,6 +18,7 @@ from unittest.mock import MagicMock, patch import pytest from faker import Faker from sqlalchemy import delete, select +from sqlalchemy.orm import Session from extensions.ext_redis import redis_client from libs.email_i18n import EmailType @@ -42,7 +43,7 @@ class TestMailInviteMemberTask: """ @pytest.fixture(autouse=True) - def cleanup_database(self, db_session_with_containers): + def cleanup_database(self, db_session_with_containers: Session): """Clean up database before each test to ensure isolation.""" # Clear all test data db_session_with_containers.execute(delete(TenantAccountJoin)) @@ -78,7 +79,7 @@ class TestMailInviteMemberTask: "config": mock_config, } - def _create_test_account_and_tenant(self, db_session_with_containers): + def _create_test_account_and_tenant(self, db_session_with_containers: Session): """ Helper method to create a test account and tenant for testing. @@ -147,7 +148,7 @@ class TestMailInviteMemberTask: redis_client.setex(cache_key, 24 * 60 * 60, json.dumps(invitation_data)) # 24 hours return token - def _create_pending_account_for_invitation(self, db_session_with_containers, email, tenant): + def _create_pending_account_for_invitation(self, db_session_with_containers: Session, email, tenant): """ Helper method to create a pending account for invitation testing. @@ -185,7 +186,9 @@ class TestMailInviteMemberTask: return account - def test_send_invite_member_mail_success(self, db_session_with_containers, mock_external_service_dependencies): + def test_send_invite_member_mail_success( + self, db_session_with_containers: Session, mock_external_service_dependencies + ): """ Test successful invitation email sending with all parameters. @@ -231,7 +234,7 @@ class TestMailInviteMemberTask: assert template_context["url"] == f"https://console.dify.ai/activate?token={token}" def test_send_invite_member_mail_different_languages( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test invitation email sending with different language codes. @@ -263,7 +266,7 @@ class TestMailInviteMemberTask: assert call_args[1]["language_code"] == language def test_send_invite_member_mail_mail_not_initialized( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test behavior when mail service is not initialized. @@ -292,7 +295,7 @@ class TestMailInviteMemberTask: mock_email_service.send_email.assert_not_called() def test_send_invite_member_mail_email_service_exception( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test error handling when email service raises an exception. @@ -322,7 +325,7 @@ class TestMailInviteMemberTask: assert "Send invite member mail to %s failed" in error_call def test_send_invite_member_mail_template_context_validation( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test template context contains all required fields for email rendering. @@ -368,7 +371,7 @@ class TestMailInviteMemberTask: assert template_context["url"] == f"https://console.dify.ai/activate?token={token}" def test_send_invite_member_mail_integration_with_redis_token( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test integration with Redis token validation. @@ -407,7 +410,7 @@ class TestMailInviteMemberTask: assert invitation_data["workspace_id"] == tenant.id def test_send_invite_member_mail_with_special_characters( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test email sending with special characters in names and workspace names. @@ -449,7 +452,7 @@ class TestMailInviteMemberTask: assert template_context["workspace_name"] == workspace_name def test_send_invite_member_mail_real_database_integration( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test real database integration with actual invitation flow. @@ -501,7 +504,7 @@ class TestMailInviteMemberTask: assert tenant_join.role == TenantAccountRole.NORMAL def test_send_invite_member_mail_token_lifecycle_management( - self, db_session_with_containers, mock_external_service_dependencies + self, db_session_with_containers: Session, mock_external_service_dependencies ): """ Test token lifecycle management and validation. diff --git a/api/tests/test_containers_integration_tests/tasks/test_mail_owner_transfer_task.py b/api/tests/test_containers_integration_tests/tasks/test_mail_owner_transfer_task.py index e08b099480..176645a4ab 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_mail_owner_transfer_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_mail_owner_transfer_task.py @@ -11,6 +11,7 @@ from unittest.mock import patch import pytest from faker import Faker +from sqlalchemy.orm import Session from libs.email_i18n import EmailType from models.account import Account, Tenant, TenantAccountJoin, TenantAccountRole @@ -44,7 +45,7 @@ class TestMailOwnerTransferTask: "get_email_service": mock_get_email_service, } - def _create_test_account_and_tenant(self, db_session_with_containers): + def _create_test_account_and_tenant(self, db_session_with_containers: Session): """ Helper method to create test account and tenant for testing. @@ -86,7 +87,9 @@ class TestMailOwnerTransferTask: return account, tenant - def test_send_owner_transfer_confirm_task_success(self, db_session_with_containers, mock_mail_dependencies): + def test_send_owner_transfer_confirm_task_success( + self, db_session_with_containers: Session, mock_mail_dependencies + ): """ Test successful owner transfer confirmation email sending. @@ -127,7 +130,7 @@ class TestMailOwnerTransferTask: assert call_args[1]["template_context"]["WorkspaceName"] == test_workspace def test_send_owner_transfer_confirm_task_mail_not_initialized( - self, db_session_with_containers, mock_mail_dependencies + self, db_session_with_containers: Session, mock_mail_dependencies ): """ Test owner transfer confirmation email when mail service is not initialized. @@ -158,7 +161,7 @@ class TestMailOwnerTransferTask: mock_mail_dependencies["email_service"].send_email.assert_not_called() def test_send_owner_transfer_confirm_task_exception_handling( - self, db_session_with_containers, mock_mail_dependencies + self, db_session_with_containers: Session, mock_mail_dependencies ): """ Test exception handling in owner transfer confirmation email. @@ -192,7 +195,7 @@ class TestMailOwnerTransferTask: mock_mail_dependencies["email_service"].send_email.assert_called_once() def test_send_old_owner_transfer_notify_email_task_success( - self, db_session_with_containers, mock_mail_dependencies + self, db_session_with_containers: Session, mock_mail_dependencies ): """ Test successful old owner transfer notification email sending. @@ -234,7 +237,7 @@ class TestMailOwnerTransferTask: assert call_args[1]["template_context"]["NewOwnerEmail"] == test_new_owner_email def test_send_old_owner_transfer_notify_email_task_mail_not_initialized( - self, db_session_with_containers, mock_mail_dependencies + self, db_session_with_containers: Session, mock_mail_dependencies ): """ Test old owner transfer notification email when mail service is not initialized. @@ -265,7 +268,7 @@ class TestMailOwnerTransferTask: mock_mail_dependencies["email_service"].send_email.assert_not_called() def test_send_old_owner_transfer_notify_email_task_exception_handling( - self, db_session_with_containers, mock_mail_dependencies + self, db_session_with_containers: Session, mock_mail_dependencies ): """ Test exception handling in old owner transfer notification email. @@ -299,7 +302,7 @@ class TestMailOwnerTransferTask: mock_mail_dependencies["email_service"].send_email.assert_called_once() def test_send_new_owner_transfer_notify_email_task_success( - self, db_session_with_containers, mock_mail_dependencies + self, db_session_with_containers: Session, mock_mail_dependencies ): """ Test successful new owner transfer notification email sending. @@ -338,7 +341,7 @@ class TestMailOwnerTransferTask: assert call_args[1]["template_context"]["WorkspaceName"] == test_workspace def test_send_new_owner_transfer_notify_email_task_mail_not_initialized( - self, db_session_with_containers, mock_mail_dependencies + self, db_session_with_containers: Session, mock_mail_dependencies ): """ Test new owner transfer notification email when mail service is not initialized. @@ -367,7 +370,7 @@ class TestMailOwnerTransferTask: mock_mail_dependencies["email_service"].send_email.assert_not_called() def test_send_new_owner_transfer_notify_email_task_exception_handling( - self, db_session_with_containers, mock_mail_dependencies + self, db_session_with_containers: Session, mock_mail_dependencies ): """ Test exception handling in new owner transfer notification email. diff --git a/api/tests/test_containers_integration_tests/tasks/test_mail_register_task.py b/api/tests/test_containers_integration_tests/tasks/test_mail_register_task.py index cced6f7780..071971f324 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_mail_register_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_mail_register_task.py @@ -9,6 +9,7 @@ from unittest.mock import patch import pytest from faker import Faker +from sqlalchemy.orm import Session from libs.email_i18n import EmailType from tasks.mail_register_task import send_email_register_mail_task, send_email_register_mail_task_when_account_exist @@ -35,7 +36,7 @@ class TestMailRegisterTask: "get_email_service": mock_get_email_service, } - def test_send_email_register_mail_task_success(self, db_session_with_containers, mock_mail_dependencies): + def test_send_email_register_mail_task_success(self, db_session_with_containers: Session, mock_mail_dependencies): """Test successful email registration mail sending.""" fake = Faker() language = "en-US" @@ -56,7 +57,7 @@ class TestMailRegisterTask: ) def test_send_email_register_mail_task_mail_not_initialized( - self, db_session_with_containers, mock_mail_dependencies + self, db_session_with_containers: Session, mock_mail_dependencies ): """Test email registration task when mail service is not initialized.""" mock_mail_dependencies["mail"].is_inited.return_value = False @@ -66,7 +67,9 @@ class TestMailRegisterTask: mock_mail_dependencies["get_email_service"].assert_not_called() mock_mail_dependencies["email_service"].send_email.assert_not_called() - def test_send_email_register_mail_task_exception_handling(self, db_session_with_containers, mock_mail_dependencies): + def test_send_email_register_mail_task_exception_handling( + self, db_session_with_containers: Session, mock_mail_dependencies + ): """Test email registration task exception handling.""" mock_mail_dependencies["email_service"].send_email.side_effect = Exception("Email service error") @@ -79,7 +82,7 @@ class TestMailRegisterTask: mock_logger.exception.assert_called_once_with("Send email register mail to %s failed", to_email) def test_send_email_register_mail_task_when_account_exist_success( - self, db_session_with_containers, mock_mail_dependencies + self, db_session_with_containers: Session, mock_mail_dependencies ): """Test successful email registration mail sending when account exists.""" fake = Faker() @@ -105,7 +108,7 @@ class TestMailRegisterTask: ) def test_send_email_register_mail_task_when_account_exist_mail_not_initialized( - self, db_session_with_containers, mock_mail_dependencies + self, db_session_with_containers: Session, mock_mail_dependencies ): """Test account exist email task when mail service is not initialized.""" mock_mail_dependencies["mail"].is_inited.return_value = False @@ -118,7 +121,7 @@ class TestMailRegisterTask: mock_mail_dependencies["email_service"].send_email.assert_not_called() def test_send_email_register_mail_task_when_account_exist_exception_handling( - self, db_session_with_containers, mock_mail_dependencies + self, db_session_with_containers: Session, mock_mail_dependencies ): """Test account exist email task exception handling.""" mock_mail_dependencies["email_service"].send_email.side_effect = Exception("Email service error") diff --git a/api/tests/test_containers_integration_tests/tasks/test_rag_pipeline_run_tasks.py b/api/tests/test_containers_integration_tests/tasks/test_rag_pipeline_run_tasks.py index f01fcc1742..5eea985fdc 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_rag_pipeline_run_tasks.py +++ b/api/tests/test_containers_integration_tests/tasks/test_rag_pipeline_run_tasks.py @@ -4,12 +4,13 @@ from unittest.mock import MagicMock, patch import pytest from faker import Faker +from flask import Flask from sqlalchemy.orm import Session from core.app.entities.app_invoke_entities import InvokeFrom, RagPipelineGenerateEntity from core.app.entities.rag_pipeline_invoke_entities import RagPipelineInvokeEntity from core.rag.pipeline.queue import TenantIsolatedTaskQueue -from models import Account, Tenant, TenantAccountJoin, TenantAccountRole +from models import Account, AccountStatus, Tenant, TenantAccountJoin, TenantAccountRole, TenantStatus from models.dataset import Pipeline from models.workflow import Workflow from tasks.rag_pipeline.priority_rag_pipeline_run_task import ( @@ -69,14 +70,14 @@ class TestRagPipelineRunTasks: email=fake.email(), name=fake.name(), interface_language="en-US", - status="active", + status=AccountStatus.ACTIVE, ) db_session_with_containers.add(account) db_session_with_containers.commit() tenant = Tenant( name=fake.company(), - status="normal", + status=TenantStatus.NORMAL, ) db_session_with_containers.add(tenant) db_session_with_containers.commit() @@ -725,7 +726,7 @@ class TestRagPipelineRunTasks: assert queue1._task_key != queue2._task_key def test_run_single_rag_pipeline_task_success( - self, db_session_with_containers: Session, mock_pipeline_generator, flask_app_with_containers + self, db_session_with_containers: Session, mock_pipeline_generator, flask_app_with_containers: Flask ): """ Test successful run_single_rag_pipeline_task execution. @@ -760,7 +761,7 @@ class TestRagPipelineRunTasks: assert isinstance(call_kwargs["application_generate_entity"], RagPipelineGenerateEntity) def test_run_single_rag_pipeline_task_entity_validation_error( - self, db_session_with_containers: Session, mock_pipeline_generator, flask_app_with_containers + self, db_session_with_containers: Session, mock_pipeline_generator, flask_app_with_containers: Flask ): """ Test run_single_rag_pipeline_task with invalid entity data. @@ -805,7 +806,7 @@ class TestRagPipelineRunTasks: mock_pipeline_generator.assert_not_called() def test_run_single_rag_pipeline_task_database_entity_not_found( - self, db_session_with_containers: Session, mock_pipeline_generator, flask_app_with_containers + self, db_session_with_containers: Session, mock_pipeline_generator, flask_app_with_containers: Flask ): """ Test run_single_rag_pipeline_task with non-existent database entities. diff --git a/api/tests/test_containers_integration_tests/tasks/test_remove_app_and_related_data_task.py b/api/tests/test_containers_integration_tests/tasks/test_remove_app_and_related_data_task.py index b43b622870..204f533978 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_remove_app_and_related_data_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_remove_app_and_related_data_task.py @@ -3,6 +3,7 @@ from unittest.mock import ANY, call, patch import pytest from sqlalchemy import delete, func, select +from sqlalchemy.orm import Session from core.db.session_factory import session_factory from extensions.storage.storage_type import StorageType @@ -20,7 +21,7 @@ from tasks.remove_app_and_related_data_task import ( @pytest.fixture(autouse=True) -def cleanup_database(db_session_with_containers): +def cleanup_database(db_session_with_containers: Session): db_session_with_containers.execute(delete(WorkflowDraftVariable)) db_session_with_containers.execute(delete(WorkflowDraftVariableFile)) db_session_with_containers.execute(delete(UploadFile)) @@ -29,7 +30,7 @@ def cleanup_database(db_session_with_containers): db_session_with_containers.commit() -def _create_tenant_and_app(db_session_with_containers): +def _create_tenant_and_app(db_session_with_containers: Session): tenant = Tenant(name=f"test_tenant_{uuid.uuid4()}") db_session_with_containers.add(tenant) db_session_with_containers.flush() @@ -117,7 +118,7 @@ def _create_offload_data(db_session_with_containers, *, tenant_id: str, app_id: class TestDeleteDraftVariablesBatch: - def test_delete_draft_variables_batch_success(self, db_session_with_containers): + def test_delete_draft_variables_batch_success(self, db_session_with_containers: Session): """Test successful deletion of draft variables in batches.""" _, app1 = _create_tenant_and_app(db_session_with_containers) _, app2 = _create_tenant_and_app(db_session_with_containers) @@ -137,7 +138,7 @@ class TestDeleteDraftVariablesBatch: assert app1_remaining_count == 0 assert app2_remaining_count == 100 - def test_delete_draft_variables_batch_empty_result(self, db_session_with_containers): + def test_delete_draft_variables_batch_empty_result(self, db_session_with_containers: Session): """Test deletion when no draft variables exist for the app.""" result = delete_draft_variables_batch(str(uuid.uuid4()), 1000) @@ -176,7 +177,7 @@ class TestDeleteDraftVariableOffloadData: """Test the Offload data cleanup functionality.""" @patch("extensions.ext_storage.storage") - def test_delete_draft_variable_offload_data_success(self, mock_storage, db_session_with_containers): + def test_delete_draft_variable_offload_data_success(self, mock_storage, db_session_with_containers: Session): """Test successful deletion of offload data.""" tenant, app = _create_tenant_and_app(db_session_with_containers) offload_data = _create_offload_data(db_session_with_containers, tenant_id=tenant.id, app_id=app.id, count=3) diff --git a/api/tests/test_containers_integration_tests/test_opendal_fs_default_root.py b/api/tests/test_containers_integration_tests/test_opendal_fs_default_root.py index 34a1941c39..6365207661 100644 --- a/api/tests/test_containers_integration_tests/test_opendal_fs_default_root.py +++ b/api/tests/test_containers_integration_tests/test_opendal_fs_default_root.py @@ -1,12 +1,14 @@ from pathlib import Path +import pytest + from extensions.storage.opendal_storage import OpenDALStorage class TestOpenDALFsDefaultRoot: """Test that OpenDALStorage with scheme='fs' works correctly when no root is provided.""" - def test_fs_without_root_uses_default(self, tmp_path, monkeypatch): + def test_fs_without_root_uses_default(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch): """When no root is specified, the default 'storage' should be used and passed to the Operator.""" # Change to tmp_path so the default "storage" dir is created there monkeypatch.chdir(tmp_path) @@ -25,7 +27,7 @@ class TestOpenDALFsDefaultRoot: # Cleanup storage.delete("test_default_root.txt") - def test_fs_with_explicit_root(self, tmp_path): + def test_fs_with_explicit_root(self, tmp_path: Path): """When root is explicitly provided, it should be used.""" custom_root = str(tmp_path / "custom_storage") storage = OpenDALStorage(scheme="fs", root=custom_root) @@ -38,7 +40,7 @@ class TestOpenDALFsDefaultRoot: # Cleanup storage.delete("test_explicit_root.txt") - def test_fs_with_env_var_root(self, tmp_path, monkeypatch): + def test_fs_with_env_var_root(self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch): """When OPENDAL_FS_ROOT env var is set, it should be picked up via _get_opendal_kwargs.""" env_root = str(tmp_path / "env_storage") monkeypatch.setenv("OPENDAL_FS_ROOT", env_root) diff --git a/api/tests/test_containers_integration_tests/test_workflow_pause_integration.py b/api/tests/test_containers_integration_tests/test_workflow_pause_integration.py index b00d827e37..6402e7da2b 100644 --- a/api/tests/test_containers_integration_tests/test_workflow_pause_integration.py +++ b/api/tests/test_containers_integration_tests/test_workflow_pause_integration.py @@ -175,7 +175,7 @@ class TestWorkflowPauseIntegration: """Comprehensive integration tests for workflow pause functionality.""" @pytest.fixture(autouse=True) - def setup_test_data(self, db_session_with_containers): + def setup_test_data(self, db_session_with_containers: Session): """Set up test data for each test method using TestContainers.""" # Create test tenant and account diff --git a/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_javascript.py b/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_javascript.py index 19a41b6186..a5086b4c5d 100644 --- a/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_javascript.py +++ b/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_javascript.py @@ -1,12 +1,14 @@ from textwrap import dedent +from flask import Flask + from .test_utils import CodeExecutorTestMixin class TestJavaScriptCodeExecutor(CodeExecutorTestMixin): """Test class for JavaScript code executor functionality.""" - def test_javascript_plain(self, flask_app_with_containers): + def test_javascript_plain(self, flask_app_with_containers: Flask): """Test basic JavaScript code execution with console.log output""" CodeExecutor, CodeLanguage = self.code_executor_imports @@ -14,7 +16,7 @@ class TestJavaScriptCodeExecutor(CodeExecutorTestMixin): result_message = CodeExecutor.execute_code(language=CodeLanguage.JAVASCRIPT, preload="", code=code) assert result_message == "Hello World\n" - def test_javascript_json(self, flask_app_with_containers): + def test_javascript_json(self, flask_app_with_containers: Flask): """Test JavaScript code execution with JSON output""" CodeExecutor, CodeLanguage = self.code_executor_imports @@ -25,7 +27,7 @@ class TestJavaScriptCodeExecutor(CodeExecutorTestMixin): result = CodeExecutor.execute_code(language=CodeLanguage.JAVASCRIPT, preload="", code=code) assert result == '{"Hello":"World"}\n' - def test_javascript_with_code_template(self, flask_app_with_containers): + def test_javascript_with_code_template(self, flask_app_with_containers: Flask): """Test JavaScript workflow code template execution with inputs""" CodeExecutor, CodeLanguage = self.code_executor_imports JavascriptCodeProvider, _ = self.javascript_imports @@ -37,7 +39,7 @@ class TestJavaScriptCodeExecutor(CodeExecutorTestMixin): ) assert result == {"result": "HelloWorld"} - def test_javascript_get_runner_script(self, flask_app_with_containers): + def test_javascript_get_runner_script(self, flask_app_with_containers: Flask): """Test JavaScript template transformer runner script generation""" _, NodeJsTemplateTransformer = self.javascript_imports diff --git a/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_jinja2.py b/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_jinja2.py index ddb079f00c..8b4c3c3d4a 100644 --- a/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_jinja2.py +++ b/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_jinja2.py @@ -1,12 +1,14 @@ import base64 +from flask import Flask + from .test_utils import CodeExecutorTestMixin class TestJinja2CodeExecutor(CodeExecutorTestMixin): """Test class for Jinja2 code executor functionality.""" - def test_jinja2(self, flask_app_with_containers): + def test_jinja2(self, flask_app_with_containers: Flask): """Test basic Jinja2 template execution with variable substitution""" CodeExecutor, CodeLanguage = self.code_executor_imports _, Jinja2TemplateTransformer = self.jinja2_imports @@ -25,7 +27,7 @@ class TestJinja2CodeExecutor(CodeExecutorTestMixin): ) assert result == "<>Hello World<>\n" - def test_jinja2_with_code_template(self, flask_app_with_containers): + def test_jinja2_with_code_template(self, flask_app_with_containers: Flask): """Test Jinja2 workflow code template execution with inputs""" CodeExecutor, CodeLanguage = self.code_executor_imports @@ -34,7 +36,7 @@ class TestJinja2CodeExecutor(CodeExecutorTestMixin): ) assert result == {"result": "Hello World"} - def test_jinja2_get_runner_script(self, flask_app_with_containers): + def test_jinja2_get_runner_script(self, flask_app_with_containers: Flask): """Test Jinja2 template transformer runner script generation""" _, Jinja2TemplateTransformer = self.jinja2_imports @@ -43,7 +45,7 @@ class TestJinja2CodeExecutor(CodeExecutorTestMixin): assert runner_script.count(Jinja2TemplateTransformer._inputs_placeholder) == 1 assert runner_script.count(Jinja2TemplateTransformer._result_tag) == 2 - def test_jinja2_template_with_special_characters(self, flask_app_with_containers): + def test_jinja2_template_with_special_characters(self, flask_app_with_containers: Flask): """ Test that templates with special characters (quotes, newlines) render correctly. This is a regression test for issue #26818 where textarea pre-fill values diff --git a/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_python3.py b/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_python3.py index 6d93df2472..0de41e1312 100644 --- a/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_python3.py +++ b/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_python3.py @@ -1,12 +1,14 @@ from textwrap import dedent +from flask import Flask + from .test_utils import CodeExecutorTestMixin class TestPython3CodeExecutor(CodeExecutorTestMixin): """Test class for Python3 code executor functionality.""" - def test_python3_plain(self, flask_app_with_containers): + def test_python3_plain(self, flask_app_with_containers: Flask): """Test basic Python3 code execution with print output""" CodeExecutor, CodeLanguage = self.code_executor_imports @@ -14,7 +16,7 @@ class TestPython3CodeExecutor(CodeExecutorTestMixin): result = CodeExecutor.execute_code(language=CodeLanguage.PYTHON3, preload="", code=code) assert result == "Hello World\n" - def test_python3_json(self, flask_app_with_containers): + def test_python3_json(self, flask_app_with_containers: Flask): """Test Python3 code execution with JSON output""" CodeExecutor, CodeLanguage = self.code_executor_imports @@ -25,7 +27,7 @@ class TestPython3CodeExecutor(CodeExecutorTestMixin): result = CodeExecutor.execute_code(language=CodeLanguage.PYTHON3, preload="", code=code) assert result == '{"Hello": "World"}\n' - def test_python3_with_code_template(self, flask_app_with_containers): + def test_python3_with_code_template(self, flask_app_with_containers: Flask): """Test Python3 workflow code template execution with inputs""" CodeExecutor, CodeLanguage = self.code_executor_imports Python3CodeProvider, _ = self.python3_imports @@ -37,7 +39,7 @@ class TestPython3CodeExecutor(CodeExecutorTestMixin): ) assert result == {"result": "HelloWorld"} - def test_python3_get_runner_script(self, flask_app_with_containers): + def test_python3_get_runner_script(self, flask_app_with_containers: Flask): """Test Python3 template transformer runner script generation""" _, Python3TemplateTransformer = self.python3_imports diff --git a/api/tests/unit_tests/commands/test_generate_swagger_markdown_docs.py b/api/tests/unit_tests/commands/test_generate_swagger_markdown_docs.py new file mode 100644 index 0000000000..62d3d79cf1 --- /dev/null +++ b/api/tests/unit_tests/commands/test_generate_swagger_markdown_docs.py @@ -0,0 +1,103 @@ +"""Unit tests for the Markdown API docs generator.""" + +import importlib.util +import sys +from pathlib import Path + + +def _load_generate_swagger_markdown_docs_module(): + api_dir = Path(__file__).resolve().parents[3] + script_path = api_dir / "dev" / "generate_swagger_markdown_docs.py" + + spec = importlib.util.spec_from_file_location("generate_swagger_markdown_docs", script_path) + assert spec + assert spec.loader + + module = importlib.util.module_from_spec(spec) + sys.modules[spec.name] = module + spec.loader.exec_module(module) # type: ignore[attr-defined] + return module + + +def test_generate_markdown_docs_keeps_split_docs_and_merges_fastopenapi_into_console(tmp_path, monkeypatch): + module = _load_generate_swagger_markdown_docs_module() + swagger_dir = tmp_path / "openapi" + markdown_dir = tmp_path / "markdown" + stale_combined_doc = markdown_dir / "api-reference.md" + markdown_dir.mkdir() + stale_combined_doc.write_text("stale", encoding="utf-8") + + def write_specs(output_dir: Path) -> list[Path]: + output_dir.mkdir(parents=True, exist_ok=True) + paths = [] + for target in module.SPEC_TARGETS: + path = output_dir / target.filename + path.write_text("{}", encoding="utf-8") + paths.append(path) + return paths + + def write_fastopenapi_specs(output_dir: Path) -> list[Path]: + output_dir.mkdir(parents=True, exist_ok=True) + path = output_dir / module.FASTOPENAPI_SPEC_TARGETS[0].filename + path.write_text("{}", encoding="utf-8") + return [path] + + def convert_spec_to_markdown(spec_path: Path, markdown_path: Path) -> None: + markdown_path.write_text(f"# {spec_path.stem}\n\n## Routes\n", encoding="utf-8") + + monkeypatch.setattr(module, "generate_specs", write_specs) + monkeypatch.setattr(module, "generate_fastopenapi_specs", write_fastopenapi_specs) + monkeypatch.setattr(module, "_convert_spec_to_markdown", convert_spec_to_markdown) + + written_paths = module.generate_markdown_docs(swagger_dir, markdown_dir) + + assert [path.name for path in written_paths] == [ + "console-swagger.md", + "web-swagger.md", + "service-swagger.md", + ] + assert not stale_combined_doc.exists() + assert not list(swagger_dir.glob("*.json")) + + console_markdown = (markdown_dir / "console-swagger.md").read_text(encoding="utf-8") + assert "## FastOpenAPI Preview (OpenAPI 3.0)" in console_markdown + assert "### fastopenapi-console-openapi" in console_markdown + assert "#### Routes" in console_markdown + assert "FastOpenAPI Preview" not in (markdown_dir / "web-swagger.md").read_text(encoding="utf-8") + assert "FastOpenAPI Preview" not in (markdown_dir / "service-swagger.md").read_text(encoding="utf-8") + + +def test_generate_markdown_docs_only_removes_generated_specs_from_separate_swagger_dir(tmp_path, monkeypatch): + module = _load_generate_swagger_markdown_docs_module() + swagger_dir = tmp_path / "swagger" + markdown_dir = tmp_path / "markdown" + swagger_dir.mkdir() + existing_file = swagger_dir / "existing.txt" + existing_file.write_text("keep me", encoding="utf-8") + + def write_specs(output_dir: Path) -> list[Path]: + output_dir.mkdir(parents=True, exist_ok=True) + paths = [] + for target in module.SPEC_TARGETS: + path = output_dir / target.filename + path.write_text("{}", encoding="utf-8") + paths.append(path) + return paths + + def write_fastopenapi_specs(output_dir: Path) -> list[Path]: + output_dir.mkdir(parents=True, exist_ok=True) + path = output_dir / module.FASTOPENAPI_SPEC_TARGETS[0].filename + path.write_text("{}", encoding="utf-8") + return [path] + + def convert_spec_to_markdown(spec_path: Path, markdown_path: Path) -> None: + markdown_path.write_text(f"# {spec_path.stem}\n", encoding="utf-8") + + monkeypatch.setattr(module, "generate_specs", write_specs) + monkeypatch.setattr(module, "generate_fastopenapi_specs", write_fastopenapi_specs) + monkeypatch.setattr(module, "_convert_spec_to_markdown", convert_spec_to_markdown) + + module.generate_markdown_docs(swagger_dir, markdown_dir) + + assert existing_file.read_text(encoding="utf-8") == "keep me" + assert not list(swagger_dir.glob("*.json")) diff --git a/api/tests/unit_tests/commands/test_generate_swagger_specs.py b/api/tests/unit_tests/commands/test_generate_swagger_specs.py index e77e875081..79a577087d 100644 --- a/api/tests/unit_tests/commands/test_generate_swagger_specs.py +++ b/api/tests/unit_tests/commands/test_generate_swagger_specs.py @@ -6,6 +6,16 @@ import sys from pathlib import Path +def _walk_values(value): + yield value + if isinstance(value, dict): + for child in value.values(): + yield from _walk_values(child) + elif isinstance(value, list): + for child in value: + yield from _walk_values(child) + + def _load_generate_swagger_specs_module(): api_dir = Path(__file__).resolve().parents[3] script_path = api_dir / "dev" / "generate_swagger_specs.py" @@ -35,3 +45,32 @@ def test_generate_specs_writes_console_web_and_service_swagger_files(tmp_path): payload = json.loads(path.read_text(encoding="utf-8")) assert payload["swagger"] == "2.0" assert "paths" in payload + + +def test_generate_specs_writes_swagger_with_resolvable_references_and_no_nulls(tmp_path): + module = _load_generate_swagger_specs_module() + + written_paths = module.generate_specs(tmp_path) + + for path in written_paths: + payload = json.loads(path.read_text(encoding="utf-8")) + definitions = payload["definitions"] + refs = { + item["$ref"].removeprefix("#/definitions/") + for item in _walk_values(payload) + if isinstance(item, dict) and isinstance(item.get("$ref"), str) + } + + assert refs <= set(definitions) + assert all(value is not None for value in _walk_values(payload)) + + +def test_generate_specs_is_idempotent(tmp_path): + module = _load_generate_swagger_specs_module() + + first_paths = module.generate_specs(tmp_path / "first") + second_paths = module.generate_specs(tmp_path / "second") + + assert [path.name for path in first_paths] == [path.name for path in second_paths] + for first_path, second_path in zip(first_paths, second_paths): + assert first_path.read_text(encoding="utf-8") == second_path.read_text(encoding="utf-8") diff --git a/api/tests/unit_tests/configs/test_dify_config.py b/api/tests/unit_tests/configs/test_dify_config.py index bad246a4bb..57dbf453de 100644 --- a/api/tests/unit_tests/configs/test_dify_config.py +++ b/api/tests/unit_tests/configs/test_dify_config.py @@ -114,8 +114,8 @@ def test_flask_configs(monkeypatch: pytest.MonkeyPatch): "pool_recycle": 3600, "pool_size": 30, "pool_use_lifo": False, - "pool_reset_on_return": None, "pool_timeout": 30, + "pool_reset_on_return": "rollback", } assert config["CONSOLE_WEB_URL"] == "https://example.com" diff --git a/api/tests/unit_tests/controllers/common/test_helpers.py b/api/tests/unit_tests/controllers/common/test_helpers.py index 59c463177c..376a7a90c5 100644 --- a/api/tests/unit_tests/controllers/common/test_helpers.py +++ b/api/tests/unit_tests/controllers/common/test_helpers.py @@ -57,7 +57,7 @@ class TestGuessFileInfoFromResponse: (False, "bin"), ], ) - def test_generated_filename_when_missing(self, monkeypatch, magic_available, expected_ext): + def test_generated_filename_when_missing(self, monkeypatch: pytest.MonkeyPatch, magic_available, expected_ext): if magic_available: if helpers.magic is None: pytest.skip("python-magic is not installed, cannot run 'magic_available=True' test variant") @@ -155,7 +155,7 @@ class TestMagicImportWarnings: ) def test_magic_import_warning_per_platform( self, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, platform_name, expected_message, ): diff --git a/api/tests/unit_tests/controllers/common/test_schema.py b/api/tests/unit_tests/controllers/common/test_schema.py index 56c8160f02..7cabafba0e 100644 --- a/api/tests/unit_tests/controllers/common/test_schema.py +++ b/api/tests/unit_tests/controllers/common/test_schema.py @@ -1,10 +1,11 @@ import sys from enum import StrEnum +from typing import Literal from unittest.mock import MagicMock, patch import pytest from flask_restx import Namespace -from pydantic import BaseModel +from pydantic import BaseModel, ConfigDict, Field class UserModel(BaseModel): @@ -17,6 +18,39 @@ class ProductModel(BaseModel): price: float +class ChildModel(BaseModel): + value: str + + +class ParentModel(BaseModel): + child: ChildModel + + +class StatusEnum(StrEnum): + ACTIVE = "active" + INACTIVE = "inactive" + + +class PriorityEnum(StrEnum): + HIGH = "high" + LOW = "low" + + +class QueryModel(BaseModel): + model_config = ConfigDict(populate_by_name=True) + + page: int = Field(default=1, ge=1, le=100, description="Page number") + keyword: str | None = Field(default=None, min_length=1, max_length=50, description="Search keyword") + status: Literal["active", "inactive"] | None = Field(default=None, description="Status filter") + app_id: str = Field(..., alias="appId", description="Application ID") + tag_ids: list[str] = Field(default_factory=list, min_length=1, max_length=3, description="Tag IDs") + ambiguous: int | str | None = Field(default=None, description="Ambiguous query parameter") + + +class ResponseAliasModel(BaseModel): + public_name: str = Field(validation_alias="internal_name") + + @pytest.fixture(autouse=True) def mock_console_ns(): """Mock the console_ns to avoid circular imports during test collection.""" @@ -64,6 +98,22 @@ def test_register_schema_model_passes_schema_from_pydantic(): assert schema == expected_schema +def test_register_schema_model_promotes_nested_pydantic_definitions(): + from controllers.common.schema import DEFAULT_REF_TEMPLATE_SWAGGER_2_0, register_schema_model + + namespace = MagicMock(spec=Namespace) + + register_schema_model(namespace, ParentModel) + + called_schemas = {call.args[0]: call.args[1] for call in namespace.schema_model.call_args_list} + parent_schema = ParentModel.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0) + + assert set(called_schemas) == {"ParentModel", "ChildModel"} + assert "$defs" not in called_schemas["ParentModel"] + assert called_schemas["ParentModel"]["properties"]["child"]["$ref"] == "#/definitions/ChildModel" + assert called_schemas["ChildModel"] == parent_schema["$defs"]["ChildModel"] + + def test_register_schema_models_registers_multiple_models(): from controllers.common.schema import register_schema_models @@ -77,7 +127,7 @@ def test_register_schema_models_registers_multiple_models(): assert called_names == ["UserModel", "ProductModel"] -def test_register_schema_models_calls_register_schema_model(monkeypatch): +def test_register_schema_models_calls_register_schema_model(monkeypatch: pytest.MonkeyPatch): from controllers.common.schema import register_schema_models namespace = MagicMock(spec=Namespace) @@ -100,14 +150,18 @@ def test_register_schema_models_calls_register_schema_model(monkeypatch): ] -class StatusEnum(StrEnum): - ACTIVE = "active" - INACTIVE = "inactive" +def test_register_response_schema_model_uses_serialized_field_names(): + from controllers.common.schema import register_response_schema_model + namespace = MagicMock(spec=Namespace) -class PriorityEnum(StrEnum): - HIGH = "high" - LOW = "low" + register_response_schema_model(namespace, ResponseAliasModel) + + model_name, schema = namespace.schema_model.call_args.args + + assert model_name == "ResponseAliasModel" + assert "public_name" in schema["properties"] + assert "internal_name" not in schema["properties"] def test_get_or_create_model_returns_existing_model(mock_console_ns): @@ -187,3 +241,54 @@ def test_register_enum_models_uses_correct_ref_template(): # Verify the schema contains enum values assert "enum" in schema or "anyOf" in schema + + +def test_query_params_from_model_builds_flask_restx_doc_params(): + from controllers.common.schema import query_params_from_model + + params = query_params_from_model(QueryModel) + + assert params["page"] == { + "in": "query", + "required": False, + "description": "Page number", + "type": "integer", + "default": 1, + "minimum": 1, + "maximum": 100, + } + assert params["keyword"] == { + "in": "query", + "required": False, + "description": "Search keyword", + "type": "string", + "minLength": 1, + "maxLength": 50, + } + assert params["status"] == { + "in": "query", + "required": False, + "description": "Status filter", + "type": "string", + "enum": ["active", "inactive"], + } + assert params["appId"] == { + "in": "query", + "required": True, + "description": "Application ID", + "type": "string", + } + assert params["tag_ids"] == { + "in": "query", + "required": False, + "description": "Tag IDs", + "type": "array", + "items": {"type": "string"}, + "minItems": 1, + "maxItems": 3, + } + assert params["ambiguous"] == { + "in": "query", + "required": False, + "description": "Ambiguous query parameter", + } diff --git a/api/tests/unit_tests/controllers/console/app/test_app_response_models.py b/api/tests/unit_tests/controllers/console/app/test_app_response_models.py index afa54b4cdc..d513dc6533 100644 --- a/api/tests/unit_tests/controllers/console/app/test_app_response_models.py +++ b/api/tests/unit_tests/controllers/console/app/test_app_response_models.py @@ -10,6 +10,8 @@ from typing import Any import pytest from flask.views import MethodView +from pydantic import ValidationError +from werkzeug.datastructures import MultiDict from configs import dify_config @@ -176,6 +178,101 @@ def _dummy_workflow(): ) +def test_app_list_query_normalizes_orpc_bracket_tag_ids(app_module): + first_tag_id = "8c4ef3d1-58a1-4d94-8a1c-1c171d889e08" + second_tag_id = "3c39395b-6d1f-4030-8b17-eaa7cc85221c" + query_args = MultiDict( + [ + ("page", "1"), + ("limit", "30"), + ("tag_ids[1]", second_tag_id), + ("tag_ids[0]", first_tag_id), + ] + ) + + normalized = app_module._normalize_app_list_query_args(query_args) + query = app_module.AppListQuery.model_validate(normalized) + + assert query.tag_ids == [first_tag_id, second_tag_id] + + +def test_app_list_query_preserves_regular_query_params(app_module): + query_args = MultiDict( + [ + ("page", "2"), + ("limit", "50"), + ("mode", "chat"), + ("name", "Sales Copilot"), + ("is_created_by_me", "true"), + ] + ) + + normalized = app_module._normalize_app_list_query_args(query_args) + query = app_module.AppListQuery.model_validate(normalized) + + assert normalized == { + "page": "2", + "limit": "50", + "mode": "chat", + "name": "Sales Copilot", + "is_created_by_me": "true", + } + assert query.page == 2 + assert query.limit == 50 + assert query.mode == "chat" + assert query.name == "Sales Copilot" + assert query.is_created_by_me is True + assert query.tag_ids is None + + +def test_app_list_query_normalizes_empty_bracket_tag_ids_to_none(app_module): + query_args = MultiDict( + [ + ("tag_ids[0]", ""), + ("tag_ids[1]", " "), + ] + ) + + normalized = app_module._normalize_app_list_query_args(query_args) + query = app_module.AppListQuery.model_validate(normalized) + + assert normalized == {"tag_ids": ["", " "]} + assert query.tag_ids is None + + +def test_app_list_query_rejects_invalid_bracket_tag_id(app_module): + normalized = app_module._normalize_app_list_query_args(MultiDict([("tag_ids[0]", "not-a-uuid")])) + + with pytest.raises(ValidationError): + app_module.AppListQuery.model_validate(normalized) + + +def test_app_list_query_sorts_bracket_tag_ids_by_index(app_module): + first_tag_id = "8c4ef3d1-58a1-4d94-8a1c-1c171d889e08" + second_tag_id = "3c39395b-6d1f-4030-8b17-eaa7cc85221c" + third_tag_id = "9d5ec0f7-4f2b-4e7f-9c13-1e7a034d0eb1" + query_args = MultiDict( + [ + ("tag_ids[2]", third_tag_id), + ("tag_ids[1]", second_tag_id), + ("tag_ids[0]", first_tag_id), + ] + ) + + normalized = app_module._normalize_app_list_query_args(query_args) + query = app_module.AppListQuery.model_validate(normalized) + + assert query.tag_ids == [first_tag_id, second_tag_id, third_tag_id] + + +def test_app_list_query_rejects_flat_tag_ids(app_module): + tag_id = "8c4ef3d1-58a1-4d94-8a1c-1c171d889e08" + normalized = app_module._normalize_app_list_query_args(MultiDict([("tag_ids", tag_id)])) + + with pytest.raises(ValidationError): + app_module.AppListQuery.model_validate(normalized) + + def test_app_partial_serialization_uses_aliases(app_models): AppPartial = app_models.AppPartial created_at = _ts() diff --git a/api/tests/unit_tests/controllers/console/app/test_workflow.py b/api/tests/unit_tests/controllers/console/app/test_workflow.py index e91c0a0597..7c470eb9a8 100644 --- a/api/tests/unit_tests/controllers/console/app/test_workflow.py +++ b/api/tests/unit_tests/controllers/console/app/test_workflow.py @@ -363,7 +363,8 @@ def test_workflow_online_users_filters_inaccessible_workflow(app, monkeypatch: p ) monkeypatch.setattr(workflow_module.file_helpers, "get_signed_file_url", sign_avatar) - workflow_module.redis_client.hgetall.side_effect = lambda key: ( + redis_pipeline = Mock() + redis_pipeline.execute.return_value = [ { b"sid-1": json.dumps( { @@ -374,16 +375,16 @@ def test_workflow_online_users_filters_inaccessible_workflow(app, monkeypatch: p } ) } - if key == f"{workflow_module.WORKFLOW_ONLINE_USERS_PREFIX}{app_id_1}" - else {} - ) + ] + workflow_module.redis_client.pipeline.return_value = redis_pipeline api = workflow_module.WorkflowOnlineUsersApi() - handler = _unwrap(api.get) + handler = _unwrap(api.post) with app.test_request_context( - f"/apps/workflows/online-users?app_ids={app_id_1},{app_id_2}", - method="GET", + "/apps/workflows/online-users", + method="POST", + json={"app_ids": [app_id_1, app_id_2]}, ): response = handler(api) @@ -402,12 +403,43 @@ def test_workflow_online_users_filters_inaccessible_workflow(app, monkeypatch: p } ] } - workflow_module.redis_client.hgetall.assert_called_once_with( - f"{workflow_module.WORKFLOW_ONLINE_USERS_PREFIX}{app_id_1}" - ) + workflow_module.redis_client.pipeline.assert_called_once_with(transaction=False) + redis_pipeline.hgetall.assert_called_once_with(f"{workflow_module.WORKFLOW_ONLINE_USERS_PREFIX}{app_id_1}") + redis_pipeline.execute.assert_called_once_with() sign_avatar.assert_called_once_with("avatar-file-id") +def test_workflow_online_users_batches_redis_reads(app, monkeypatch: pytest.MonkeyPatch) -> None: + app_ids = [f"wf-{index}" for index in range(workflow_module.WORKFLOW_ONLINE_USERS_REDIS_BATCH_SIZE + 1)] + monkeypatch.setattr(workflow_module, "current_account_with_tenant", lambda: (SimpleNamespace(), "tenant-1")) + monkeypatch.setattr( + workflow_module, + "WorkflowService", + lambda: SimpleNamespace(get_accessible_app_ids=lambda app_ids, tenant_id: set(app_ids)), + ) + + first_pipeline = Mock() + first_pipeline.execute.return_value = [{} for _ in range(workflow_module.WORKFLOW_ONLINE_USERS_REDIS_BATCH_SIZE)] + second_pipeline = Mock() + second_pipeline.execute.return_value = [{}] + workflow_module.redis_client.pipeline.side_effect = [first_pipeline, second_pipeline] + + api = workflow_module.WorkflowOnlineUsersApi() + handler = _unwrap(api.post) + + with app.test_request_context( + "/apps/workflows/online-users", + method="POST", + json={"app_ids": app_ids}, + ): + response = handler(api) + + assert len(response["data"]) == len(app_ids) + assert workflow_module.redis_client.pipeline.call_count == 2 + assert first_pipeline.hgetall.call_count == workflow_module.WORKFLOW_ONLINE_USERS_REDIS_BATCH_SIZE + assert second_pipeline.hgetall.call_count == 1 + + def test_workflow_online_users_rejects_excessive_workflow_ids(app, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr(workflow_module, "current_account_with_tenant", lambda: (SimpleNamespace(), "tenant-1")) accessible_app_ids = Mock(return_value=set()) @@ -417,14 +449,15 @@ def test_workflow_online_users_rejects_excessive_workflow_ids(app, monkeypatch: lambda: SimpleNamespace(get_accessible_app_ids=accessible_app_ids), ) - excessive_ids = ",".join(f"wf-{index}" for index in range(workflow_module.MAX_WORKFLOW_ONLINE_USERS_QUERY_IDS + 1)) + excessive_ids = [f"wf-{index}" for index in range(workflow_module.MAX_WORKFLOW_ONLINE_USERS_REQUEST_IDS + 1)] api = workflow_module.WorkflowOnlineUsersApi() - handler = _unwrap(api.get) + handler = _unwrap(api.post) with app.test_request_context( - f"/apps/workflows/online-users?app_ids={excessive_ids}", - method="GET", + "/apps/workflows/online-users", + method="POST", + json={"app_ids": excessive_ids}, ): with pytest.raises(HTTPException) as exc: handler(api) diff --git a/api/tests/unit_tests/controllers/console/app/test_workflow_pause_details_api.py b/api/tests/unit_tests/controllers/console/app/test_workflow_pause_details_api.py index c4a8148446..05c17b4e34 100644 --- a/api/tests/unit_tests/controllers/console/app/test_workflow_pause_details_api.py +++ b/api/tests/unit_tests/controllers/console/app/test_workflow_pause_details_api.py @@ -112,3 +112,24 @@ def test_pause_details_tenant_isolation(app: Flask, monkeypatch: pytest.MonkeyPa with pytest.raises(NotFoundError): with app.test_request_context("/console/api/workflow/run-1/pause-details", method="GET"): response, status = workflow_run_module.ConsoleWorkflowPauseDetailsApi().get(workflow_run_id="run-1") + + +def test_pause_details_returns_empty_response_for_non_paused_run(app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: + account = _make_account() + _patch_console_guards(monkeypatch, account) + + workflow_run = Mock(spec=WorkflowRun) + workflow_run.tenant_id = "tenant-123" + workflow_run.status = WorkflowExecutionStatus.RUNNING + fake_db = SimpleNamespace(engine=Mock(), session=SimpleNamespace(get=lambda *_: workflow_run)) + monkeypatch.setattr(workflow_run_module, "db", fake_db) + + with app.test_request_context("/console/api/workflow/run-1/pause-details", method="GET"): + response, status = workflow_run_module.ConsoleWorkflowPauseDetailsApi().get(workflow_run_id="run-1") + + assert status == 200 + assert response == {"paused_at": None, "paused_nodes": []} + + +def test_pause_details_response_schema_is_registered() -> None: + assert workflow_run_module.WorkflowPauseDetailsResponse.__name__ in workflow_run_module.console_ns.models diff --git a/api/tests/unit_tests/controllers/console/app/test_workflow_run_api.py b/api/tests/unit_tests/controllers/console/app/test_workflow_run_api.py new file mode 100644 index 0000000000..e225e31563 --- /dev/null +++ b/api/tests/unit_tests/controllers/console/app/test_workflow_run_api.py @@ -0,0 +1,248 @@ +from __future__ import annotations + +from datetime import UTC, datetime +from types import SimpleNamespace +from typing import Any + +import pytest +from flask import Flask +from flask_restx import marshal + +from controllers.console.app import workflow_run as workflow_run_module + + +def _unwrap(func): + while hasattr(func, "__wrapped__"): + func = func.__wrapped__ + return func + + +def _serialize_200_response(handler, payload: Any) -> Any: + response_doc = getattr(handler, "__apidoc__", {}).get("responses", {}).get("200") + if response_doc is None: + return payload + + response_model = response_doc[1] + if isinstance(response_model, dict): + return marshal(payload, response_model) + return payload + + +def _account() -> SimpleNamespace: + return SimpleNamespace(id="account-1", name="Alice", email="alice@example.com") + + +def _workflow_run_summary(**overrides) -> SimpleNamespace: + created_at = datetime(2026, 1, 2, 3, 4, 5, tzinfo=UTC) + payload = { + "id": "run-1", + "version": "v1", + "status": "succeeded", + "elapsed_time": 1.5, + "total_tokens": 10, + "total_steps": 2, + "created_by_account": _account(), + "created_at": created_at, + "finished_at": created_at, + "exceptions_count": 0, + "retry_index": 0, + } + payload.update(overrides) + return SimpleNamespace(**payload) + + +def _workflow_run_node_execution(**overrides) -> SimpleNamespace: + created_at = datetime(2026, 1, 2, 3, 4, 5, tzinfo=UTC) + payload = { + "id": "node-exec-1", + "index": 1, + "predecessor_node_id": None, + "node_id": "node-1", + "node_type": "start", + "title": "Start", + "inputs_dict": {"query": "hello"}, + "process_data_dict": {"step": "prepared"}, + "outputs_dict": {"answer": "world"}, + "status": "succeeded", + "error": None, + "elapsed_time": 1.0, + "execution_metadata_dict": {"total_tokens": 3}, + "extras": {}, + "created_at": created_at, + "created_by_role": "account", + "created_by_account": _account(), + "created_by_end_user": None, + "finished_at": created_at, + "inputs_truncated": False, + "outputs_truncated": False, + "process_data_truncated": False, + } + payload.update(overrides) + return SimpleNamespace(**payload) + + +def test_workflow_run_list_returns_frontend_history_contract(app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: + class WorkflowRunService: + def get_paginate_workflow_runs(self, **_kwargs): + return { + "limit": 10, + "has_more": False, + "data": [_workflow_run_summary()], + } + + monkeypatch.setattr(workflow_run_module, "WorkflowRunService", WorkflowRunService) + + api = workflow_run_module.WorkflowRunListApi() + handler = _unwrap(api.get) + + with app.test_request_context("/apps/app-1/workflow-runs?limit=10", method="GET"): + payload = handler(api, app_model=SimpleNamespace(id="app-1", tenant_id="tenant-1")) + + response = _serialize_200_response(api.get, payload) + + assert response["limit"] == 10 + assert response["has_more"] is False + assert response["data"][0] == { + "id": "run-1", + "version": "v1", + "status": "succeeded", + "elapsed_time": 1.5, + "total_tokens": 10, + "total_steps": 2, + "created_by_account": {"id": "account-1", "name": "Alice", "email": "alice@example.com"}, + "created_at": 1767323045, + "finished_at": 1767323045, + "exceptions_count": 0, + "retry_index": 0, + } + + +def test_advanced_chat_workflow_run_list_keeps_message_fields(app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: + class WorkflowRunService: + def get_paginate_advanced_chat_workflow_runs(self, **_kwargs): + return { + "limit": 1, + "has_more": True, + "data": [ + _workflow_run_summary( + conversation_id="conversation-1", + message_id="message-1", + ) + ], + } + + monkeypatch.setattr(workflow_run_module, "WorkflowRunService", WorkflowRunService) + + api = workflow_run_module.AdvancedChatAppWorkflowRunListApi() + handler = _unwrap(api.get) + + with app.test_request_context("/apps/app-1/advanced-chat/workflow-runs?limit=1", method="GET"): + payload = handler(api, app_model=SimpleNamespace(id="app-1", tenant_id="tenant-1")) + + response = _serialize_200_response(api.get, payload) + + assert response["data"][0]["conversation_id"] == "conversation-1" + assert response["data"][0]["message_id"] == "message-1" + + +def test_workflow_run_detail_returns_frontend_detail_contract(app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: + created_at = datetime(2026, 1, 2, 3, 4, 5, tzinfo=UTC) + workflow_run = SimpleNamespace( + id="run-1", + version="v1", + graph_dict={"nodes": []}, + inputs_dict={"query": "hello"}, + status="succeeded", + outputs_dict={"answer": "world"}, + error=None, + elapsed_time=1.5, + total_tokens=10, + total_steps=2, + created_by_role="account", + created_by_account=_account(), + created_by_end_user=None, + created_at=created_at, + finished_at=created_at, + exceptions_count=0, + ) + + class WorkflowRunService: + def get_workflow_run(self, **_kwargs): + return workflow_run + + monkeypatch.setattr(workflow_run_module, "WorkflowRunService", WorkflowRunService) + + api = workflow_run_module.WorkflowRunDetailApi() + handler = _unwrap(api.get) + + with app.test_request_context("/apps/app-1/workflow-runs/run-1", method="GET"): + payload = handler(api, app_model=SimpleNamespace(id="app-1", tenant_id="tenant-1"), run_id="run-1") + + response = _serialize_200_response(api.get, payload) + + assert response == { + "id": "run-1", + "version": "v1", + "graph": {"nodes": []}, + "inputs": {"query": "hello"}, + "status": "succeeded", + "outputs": {"answer": "world"}, + "error": None, + "elapsed_time": 1.5, + "total_tokens": 10, + "total_steps": 2, + "created_by_role": "account", + "created_by_account": {"id": "account-1", "name": "Alice", "email": "alice@example.com"}, + "created_by_end_user": None, + "created_at": 1767323045, + "finished_at": 1767323045, + "exceptions_count": 0, + } + + +def test_workflow_run_node_executions_return_frontend_trace_contract( + app: Flask, monkeypatch: pytest.MonkeyPatch +) -> None: + class WorkflowRunService: + def get_workflow_run_node_executions(self, **_kwargs): + return [_workflow_run_node_execution()] + + monkeypatch.setattr(workflow_run_module, "WorkflowRunService", WorkflowRunService) + monkeypatch.setattr(workflow_run_module, "current_user", SimpleNamespace(id="account-1")) + + api = workflow_run_module.WorkflowRunNodeExecutionListApi() + handler = _unwrap(api.get) + + with app.test_request_context("/apps/app-1/workflow-runs/run-1/node-executions", method="GET"): + payload = handler(api, app_model=SimpleNamespace(id="app-1", tenant_id="tenant-1"), run_id="run-1") + + response = _serialize_200_response(api.get, payload) + + assert response == { + "data": [ + { + "id": "node-exec-1", + "index": 1, + "predecessor_node_id": None, + "node_id": "node-1", + "node_type": "start", + "title": "Start", + "inputs": {"query": "hello"}, + "process_data": {"step": "prepared"}, + "outputs": {"answer": "world"}, + "status": "succeeded", + "error": None, + "elapsed_time": 1.0, + "execution_metadata": {"total_tokens": 3}, + "extras": {}, + "created_at": 1767323045, + "created_by_role": "account", + "created_by_account": {"id": "account-1", "name": "Alice", "email": "alice@example.com"}, + "created_by_end_user": None, + "finished_at": 1767323045, + "inputs_truncated": False, + "outputs_truncated": False, + "process_data_truncated": False, + } + ] + } diff --git a/api/tests/unit_tests/controllers/console/app/workflow_draft_variables_test.py b/api/tests/unit_tests/controllers/console/app/workflow_draft_variables_test.py index 22b80b748e..62fa82e339 100644 --- a/api/tests/unit_tests/controllers/console/app/workflow_draft_variables_test.py +++ b/api/tests/unit_tests/controllers/console/app/workflow_draft_variables_test.py @@ -1,7 +1,7 @@ import uuid from collections import OrderedDict from typing import Any, NamedTuple -from unittest.mock import MagicMock, patch +from unittest.mock import patch import pytest from flask_restx import marshal @@ -29,15 +29,18 @@ class TestWorkflowDraftVariableFields: def test_serialize_full_content(self): """Test that _serialize_full_content uses pre-loaded relationships.""" # Create mock objects with relationships pre-loaded - mock_variable_file = MagicMock(spec=WorkflowDraftVariableFile) - mock_variable_file.size = 100000 - mock_variable_file.length = 50 - mock_variable_file.value_type = SegmentType.OBJECT - mock_variable_file.upload_file_id = "test-upload-file-id" - - mock_variable = MagicMock(spec=WorkflowDraftVariable) - mock_variable.file_id = "test-file-id" - mock_variable.variable_file = mock_variable_file + mock_variable = WorkflowDraftVariable( + file_id="test-file-id", + variable_file=WorkflowDraftVariableFile( + size=100000, + length=50, + value_type=SegmentType.OBJECT, + upload_file_id="test-upload-file-id", + tenant_id=str(uuid.uuid4()), + app_id=str(uuid.uuid4()), + user_id=str(uuid.uuid4()), + ), + ) # Mock the file helpers with patch("controllers.console.app.workflow_draft_variable.file_helpers", autospec=True) as mock_file_helpers: @@ -84,7 +87,7 @@ class TestWorkflowDraftVariableFields: expected_without_value: OrderedDict[str, Any] = OrderedDict( { - "id": str(conv_var.id), + "id": conv_var.id, "type": conv_var.get_variable_type().value, "name": "conv_var", "description": "", @@ -117,7 +120,7 @@ class TestWorkflowDraftVariableFields: expected_without_value = OrderedDict( { - "id": str(sys_var.id), + "id": sys_var.id, "type": sys_var.get_variable_type().value, "name": "sys_var", "description": "", @@ -149,7 +152,7 @@ class TestWorkflowDraftVariableFields: expected_without_value: OrderedDict[str, Any] = OrderedDict( { - "id": str(node_var.id), + "id": node_var.id, "type": node_var.get_variable_type().value, "name": "node_var", "description": "", @@ -180,19 +183,22 @@ class TestWorkflowDraftVariableFields: node_var.id = str(uuid.uuid4()) node_var.last_edited_at = naive_utc_now() variable_file = WorkflowDraftVariableFile( - id=str(uuidv7()), upload_file_id=str(uuid.uuid4()), size=1024, length=10, value_type=SegmentType.ARRAY_STRING, + tenant_id=str(uuidv7()), + app_id=str(uuidv7()), + user_id=str(uuidv7()), ) + variable_file.id = str(uuidv7()) node_var.variable_file = variable_file node_var.file_id = variable_file.id expected_without_value: OrderedDict[str, Any] = OrderedDict( { - "id": str(node_var.id), - "type": node_var.get_variable_type().value, + "id": node_var.id, + "type": node_var.get_variable_type(), "name": "node_var", "description": "", "selector": ["test_node", "node_var"], @@ -235,7 +241,7 @@ class TestWorkflowDraftVariableList: node_var.id = str(uuid.uuid4()) node_var_dict = OrderedDict( { - "id": str(node_var.id), + "id": node_var.id, "type": node_var.get_variable_type().value, "name": "test_var", "description": "", diff --git a/api/tests/unit_tests/controllers/console/auth/test_account_activation.py b/api/tests/unit_tests/controllers/console/auth/test_account_activation.py index d3e864a75a..0fb0ebc330 100644 --- a/api/tests/unit_tests/controllers/console/auth/test_account_activation.py +++ b/api/tests/unit_tests/controllers/console/auth/test_account_activation.py @@ -67,7 +67,7 @@ class TestActivateCheckApi: assert response["data"]["email"] == "invitee@example.com" @patch("controllers.console.auth.activate.RegisterService.get_invitation_with_case_fallback") - def test_check_invalid_invitation_token(self, mock_get_invitation, app): + def test_check_invalid_invitation_token(self, mock_get_invitation, app: Flask): """ Test checking invalid invitation token. @@ -185,7 +185,7 @@ class TestActivateApi: mock_db, mock_revoke_token, mock_get_invitation, - app, + app: Flask, mock_invitation, mock_account, ): @@ -227,7 +227,7 @@ class TestActivateApi: mock_db.session.commit.assert_called_once() @patch("controllers.console.auth.activate.RegisterService.get_invitation_with_case_fallback") - def test_activation_with_invalid_token(self, mock_get_invitation, app): + def test_activation_with_invalid_token(self, mock_get_invitation, app: Flask): """ Test account activation with invalid token. @@ -263,7 +263,7 @@ class TestActivateApi: mock_db, mock_revoke_token, mock_get_invitation, - app, + app: Flask, mock_invitation, mock_account, ): @@ -312,7 +312,7 @@ class TestActivateApi: mock_db, mock_revoke_token, mock_get_invitation, - app, + app: Flask, mock_invitation, mock_account, language, @@ -358,7 +358,7 @@ class TestActivateApi: mock_db, mock_revoke_token, mock_get_invitation, - app, + app: Flask, mock_invitation, ): """ @@ -398,7 +398,7 @@ class TestActivateApi: mock_db, mock_revoke_token, mock_get_invitation, - app, + app: Flask, mock_invitation, ): """ @@ -438,7 +438,7 @@ class TestActivateApi: mock_db, mock_revoke_token, mock_get_invitation, - app, + app: Flask, mock_invitation, mock_account, ): diff --git a/api/tests/unit_tests/controllers/console/auth/test_email_verification.py b/api/tests/unit_tests/controllers/console/auth/test_email_verification.py index b7bc73da5f..102af9b250 100644 --- a/api/tests/unit_tests/controllers/console/auth/test_email_verification.py +++ b/api/tests/unit_tests/controllers/console/auth/test_email_verification.py @@ -140,7 +140,7 @@ class TestEmailCodeLoginSendEmailApi: @patch("controllers.console.wraps.db") @patch("controllers.console.auth.login.AccountService.is_email_send_ip_limit") - def test_send_email_code_ip_rate_limited(self, mock_is_ip_limit, mock_db, app): + def test_send_email_code_ip_rate_limited(self, mock_is_ip_limit, mock_db, app: Flask): """ Test email code sending blocked by IP rate limit. @@ -160,7 +160,7 @@ class TestEmailCodeLoginSendEmailApi: @patch("controllers.console.wraps.db") @patch("controllers.console.auth.login.AccountService.is_email_send_ip_limit") @patch("controllers.console.auth.login.AccountService.get_user_through_email") - def test_send_email_code_frozen_account(self, mock_get_user, mock_is_ip_limit, mock_db, app): + def test_send_email_code_frozen_account(self, mock_get_user, mock_is_ip_limit, mock_db, app: Flask): """ Test email code sending to frozen account. @@ -195,7 +195,7 @@ class TestEmailCodeLoginSendEmailApi: mock_get_user, mock_is_ip_limit, mock_db, - app, + app: Flask, mock_account, language_input, expected_language, @@ -267,7 +267,7 @@ class TestEmailCodeLoginApi: mock_revoke_token, mock_get_data, mock_db, - app, + app: Flask, mock_account, mock_token_pair, ): @@ -315,7 +315,7 @@ class TestEmailCodeLoginApi: mock_revoke_token, mock_get_data, mock_db, - app, + app: Flask, mock_account, mock_token_pair, ): @@ -353,7 +353,7 @@ class TestEmailCodeLoginApi: @patch("controllers.console.wraps.db") @patch("controllers.console.auth.login.AccountService.get_email_code_login_data") - def test_email_code_login_invalid_token(self, mock_get_data, mock_db, app): + def test_email_code_login_invalid_token(self, mock_get_data, mock_db, app: Flask): """ Test email code login with invalid token. @@ -375,7 +375,7 @@ class TestEmailCodeLoginApi: @patch("controllers.console.wraps.db") @patch("controllers.console.auth.login.AccountService.get_email_code_login_data") - def test_email_code_login_email_mismatch(self, mock_get_data, mock_db, app): + def test_email_code_login_email_mismatch(self, mock_get_data, mock_db, app: Flask): """ Test email code login with mismatched email. @@ -397,7 +397,7 @@ class TestEmailCodeLoginApi: @patch("controllers.console.wraps.db") @patch("controllers.console.auth.login.AccountService.get_email_code_login_data") - def test_email_code_login_wrong_code(self, mock_get_data, mock_db, app): + def test_email_code_login_wrong_code(self, mock_get_data, mock_db, app: Flask): """ Test email code login with incorrect code. @@ -431,7 +431,7 @@ class TestEmailCodeLoginApi: mock_revoke_token, mock_get_data, mock_db, - app, + app: Flask, mock_account, ): """ @@ -474,7 +474,7 @@ class TestEmailCodeLoginApi: mock_revoke_token, mock_get_data, mock_db, - app, + app: Flask, mock_account, ): """ @@ -515,7 +515,7 @@ class TestEmailCodeLoginApi: mock_revoke_token, mock_get_data, mock_db, - app, + app: Flask, mock_account, ): """ diff --git a/api/tests/unit_tests/controllers/console/auth/test_login_logout.py b/api/tests/unit_tests/controllers/console/auth/test_login_logout.py index d089be8905..ace2ce5706 100644 --- a/api/tests/unit_tests/controllers/console/auth/test_login_logout.py +++ b/api/tests/unit_tests/controllers/console/auth/test_login_logout.py @@ -9,7 +9,7 @@ This module tests the core authentication endpoints including: """ import base64 -from unittest.mock import MagicMock, patch +from unittest.mock import MagicMock, Mock, patch import pytest from flask import Flask @@ -52,12 +52,12 @@ class TestLoginApi: return app @pytest.fixture - def api(self, app): + def api(self, app: Flask): """Create Flask-RESTX API instance.""" return Api(app) @pytest.fixture - def client(self, app, api): + def client(self, app: Flask, api: Api): """Create test client.""" api.add_resource(LoginApi, "/login") return app.test_client() @@ -97,7 +97,7 @@ class TestLoginApi: mock_get_invitation, mock_is_rate_limit, mock_db, - app, + app: Flask, mock_account, mock_token_pair, ): @@ -141,14 +141,14 @@ class TestLoginApi: @patch("controllers.console.auth.login.AccountService.reset_login_error_rate_limit") def test_successful_login_with_valid_invitation( self, - mock_reset_rate_limit, + mock_reset_rate_limit: Mock, mock_login, mock_get_tenants, mock_authenticate, mock_get_invitation, mock_is_rate_limit, mock_db, - app, + app: Flask, mock_account, mock_token_pair, ): @@ -188,7 +188,7 @@ class TestLoginApi: @patch("controllers.console.auth.login.dify_config.BILLING_ENABLED", False) @patch("controllers.console.auth.login.AccountService.is_login_error_rate_limit") @patch("controllers.console.auth.login.RegisterService.get_invitation_with_case_fallback") - def test_login_fails_when_rate_limited(self, mock_get_invitation, mock_is_rate_limit, mock_db, app): + def test_login_fails_when_rate_limited(self, mock_get_invitation, mock_is_rate_limit, mock_db, app: Flask): """ Test login rejection when rate limit is exceeded. @@ -216,7 +216,7 @@ class TestLoginApi: @patch("controllers.console.wraps.db") @patch("controllers.console.auth.login.dify_config.BILLING_ENABLED", True) @patch("controllers.console.auth.login.BillingService.is_email_in_freeze") - def test_login_fails_when_account_frozen(self, mock_is_frozen, mock_db, app): + def test_login_fails_when_account_frozen(self, mock_is_frozen, mock_db, app: Flask): """ Test login rejection for frozen accounts. @@ -253,7 +253,7 @@ class TestLoginApi: mock_get_invitation, mock_is_rate_limit, mock_db, - app, + app: Flask, ): """ Test login failure with invalid credentials. @@ -290,7 +290,7 @@ class TestLoginApi: @patch("controllers.console.auth.login.RegisterService.get_invitation_with_case_fallback") @patch("controllers.console.auth.login.AccountService.authenticate") def test_login_fails_for_banned_account( - self, mock_authenticate, mock_get_invitation, mock_is_rate_limit, mock_db, app + self, mock_authenticate, mock_get_invitation, mock_is_rate_limit, mock_db, app: Flask ): """ Test login rejection for banned accounts. @@ -328,14 +328,14 @@ class TestLoginApi: @patch("controllers.console.auth.login.FeatureService.get_system_features") def test_login_fails_when_no_workspace_and_limit_exceeded( self, - mock_get_features, - mock_get_tenants, - mock_authenticate, - mock_get_invitation, - mock_is_rate_limit, - mock_db, - app, - mock_account, + mock_get_features: MagicMock, + mock_get_tenants: MagicMock, + mock_authenticate: MagicMock, + mock_get_invitation: MagicMock, + mock_is_rate_limit: MagicMock, + mock_db: MagicMock, + app: Flask, + mock_account: MagicMock, ): """ Test login failure when user has no workspace and workspace limit exceeded. @@ -367,7 +367,7 @@ class TestLoginApi: @patch("controllers.console.auth.login.dify_config.BILLING_ENABLED", False) @patch("controllers.console.auth.login.AccountService.is_login_error_rate_limit") @patch("controllers.console.auth.login.RegisterService.get_invitation_with_case_fallback") - def test_login_invitation_email_mismatch(self, mock_get_invitation, mock_is_rate_limit, mock_db, app): + def test_login_invitation_email_mismatch(self, mock_get_invitation, mock_is_rate_limit, mock_db, app: Flask): """ Test login failure when invitation email doesn't match login email. @@ -412,7 +412,7 @@ class TestLoginApi: mock_get_invitation, mock_is_rate_limit, mock_db, - app, + app: Flask, mock_account, mock_token_pair, ): @@ -448,7 +448,7 @@ class TestLoginApi: mock_revoke_token, mock_get_token_data, mock_db, - app, + app: Flask, ): mock_get_token_data.return_value = {"email": "User@Example.com", "code": "123456"} mock_get_account.side_effect = Unauthorized("Account is banned.") @@ -491,7 +491,7 @@ class TestLogoutApi: @patch("controllers.console.auth.login.AccountService.logout") @patch("controllers.console.auth.login.flask_login.logout_user") def test_successful_logout( - self, mock_logout_user, mock_service_logout, mock_current_account, mock_db, app, mock_account + self, mock_logout_user, mock_service_logout, mock_current_account, mock_db, app: Flask, mock_account ): """ Test successful logout flow. @@ -518,7 +518,7 @@ class TestLogoutApi: @patch("controllers.console.wraps.db") @patch("controllers.console.auth.login.current_account_with_tenant") @patch("controllers.console.auth.login.flask_login") - def test_logout_anonymous_user(self, mock_flask_login, mock_current_account, mock_db, app): + def test_logout_anonymous_user(self, mock_flask_login, mock_current_account, mock_db, app: Flask): """ Test logout for anonymous (not logged in) user. diff --git a/api/tests/unit_tests/controllers/console/auth/test_token_refresh.py b/api/tests/unit_tests/controllers/console/auth/test_token_refresh.py index d010f60866..22974ca416 100644 --- a/api/tests/unit_tests/controllers/console/auth/test_token_refresh.py +++ b/api/tests/unit_tests/controllers/console/auth/test_token_refresh.py @@ -28,12 +28,12 @@ class TestRefreshTokenApi: return app @pytest.fixture - def api(self, app): + def api(self, app: Flask): """Create Flask-RESTX API instance.""" return Api(app) @pytest.fixture - def client(self, app, api): + def client(self, app: Flask, api: Api): """Create test client.""" api.add_resource(RefreshTokenApi, "/refresh-token") return app.test_client() @@ -74,7 +74,7 @@ class TestRefreshTokenApi: assert response.json["result"] == "success" @patch("controllers.console.auth.login.extract_refresh_token", autospec=True) - def test_refresh_fails_without_token(self, mock_extract_token, app): + def test_refresh_fails_without_token(self, mock_extract_token, app: Flask): """ Test token refresh failure when no refresh token provided. @@ -98,7 +98,7 @@ class TestRefreshTokenApi: @patch("controllers.console.auth.login.extract_refresh_token", autospec=True) @patch("controllers.console.auth.login.AccountService.refresh_token", autospec=True) - def test_refresh_fails_with_invalid_token(self, mock_refresh_token, mock_extract_token, app): + def test_refresh_fails_with_invalid_token(self, mock_refresh_token, mock_extract_token, app: Flask): """ Test token refresh failure with invalid refresh token. @@ -123,7 +123,7 @@ class TestRefreshTokenApi: @patch("controllers.console.auth.login.extract_refresh_token", autospec=True) @patch("controllers.console.auth.login.AccountService.refresh_token", autospec=True) - def test_refresh_fails_with_expired_token(self, mock_refresh_token, mock_extract_token, app): + def test_refresh_fails_with_expired_token(self, mock_refresh_token, mock_extract_token, app: Flask): """ Test token refresh failure with expired refresh token. @@ -148,7 +148,7 @@ class TestRefreshTokenApi: @patch("controllers.console.auth.login.extract_refresh_token", autospec=True) @patch("controllers.console.auth.login.AccountService.refresh_token", autospec=True) - def test_refresh_with_empty_token(self, mock_refresh_token, mock_extract_token, app): + def test_refresh_with_empty_token(self, mock_refresh_token, mock_extract_token, app: Flask): """ Test token refresh with empty string token. diff --git a/api/tests/unit_tests/controllers/console/billing/test_billing.py b/api/tests/unit_tests/controllers/console/billing/test_billing.py index 810f1b94fc..defa9064fd 100644 --- a/api/tests/unit_tests/controllers/console/billing/test_billing.py +++ b/api/tests/unit_tests/controllers/console/billing/test_billing.py @@ -49,7 +49,7 @@ class TestPartnerTenants: mock_csrf.return_value = None yield {"db": mock_db, "csrf": mock_csrf} - def test_put_success(self, app, mock_account, mock_billing_service, mock_decorators): + def test_put_success(self, app: Flask, mock_account, mock_billing_service, mock_decorators): """Test successful partner tenants bindings sync.""" # Arrange partner_key_encoded = base64.b64encode(b"partner-key-123").decode("utf-8") @@ -79,7 +79,7 @@ class TestPartnerTenants: mock_account.id, "partner-key-123", click_id ) - def test_put_invalid_partner_key_base64(self, app, mock_account, mock_billing_service, mock_decorators): + def test_put_invalid_partner_key_base64(self, app: Flask, mock_account, mock_billing_service, mock_decorators): """Test that invalid base64 partner_key raises BadRequest.""" # Arrange invalid_partner_key = "invalid-base64-!@#$" @@ -104,7 +104,7 @@ class TestPartnerTenants: resource.put(invalid_partner_key) assert "Invalid partner_key" in str(exc_info.value) - def test_put_missing_click_id(self, app, mock_account, mock_billing_service, mock_decorators): + def test_put_missing_click_id(self, app: Flask, mock_account, mock_billing_service, mock_decorators): """Test that missing click_id raises BadRequest.""" # Arrange partner_key_encoded = base64.b64encode(b"partner-key-123").decode("utf-8") @@ -128,7 +128,9 @@ class TestPartnerTenants: with pytest.raises(BadRequest): resource.put(partner_key_encoded) - def test_put_billing_service_json_decode_error(self, app, mock_account, mock_billing_service, mock_decorators): + def test_put_billing_service_json_decode_error( + self, app: Flask, mock_account, mock_billing_service, mock_decorators + ): """Test handling of billing service JSON decode error. When billing service returns non-200 status code with invalid JSON response, @@ -174,7 +176,7 @@ class TestPartnerTenants: assert isinstance(exc_info.value, json.JSONDecodeError) assert "Expecting value" in str(exc_info.value) - def test_put_empty_click_id(self, app, mock_account, mock_billing_service, mock_decorators): + def test_put_empty_click_id(self, app: Flask, mock_account, mock_billing_service, mock_decorators): """Test that empty click_id raises BadRequest.""" # Arrange partner_key_encoded = base64.b64encode(b"partner-key-123").decode("utf-8") @@ -199,7 +201,7 @@ class TestPartnerTenants: resource.put(partner_key_encoded) assert "Invalid partner information" in str(exc_info.value) - def test_put_empty_partner_key_after_decode(self, app, mock_account, mock_billing_service, mock_decorators): + def test_put_empty_partner_key_after_decode(self, app: Flask, mock_account, mock_billing_service, mock_decorators): """Test that empty partner_key after decode raises BadRequest.""" # Arrange # Base64 encode an empty string @@ -225,7 +227,7 @@ class TestPartnerTenants: resource.put(empty_partner_key_encoded) assert "Invalid partner information" in str(exc_info.value) - def test_put_empty_user_id(self, app, mock_account, mock_billing_service, mock_decorators): + def test_put_empty_user_id(self, app: Flask, mock_account, mock_billing_service, mock_decorators): """Test that empty user id raises BadRequest.""" # Arrange partner_key_encoded = base64.b64encode(b"partner-key-123").decode("utf-8") diff --git a/api/tests/unit_tests/controllers/console/datasets/rag_pipeline/test_datasource_auth.py b/api/tests/unit_tests/controllers/console/datasets/rag_pipeline/test_datasource_auth.py index 5136922e88..9c5b5ec256 100644 --- a/api/tests/unit_tests/controllers/console/datasets/rag_pipeline/test_datasource_auth.py +++ b/api/tests/unit_tests/controllers/console/datasets/rag_pipeline/test_datasource_auth.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock, patch import pytest +from flask import Flask from werkzeug.exceptions import Forbidden, NotFound from controllers.console import console_ns @@ -29,7 +30,7 @@ def unwrap(func): class TestDatasourcePluginOAuthAuthorizationUrl: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = DatasourcePluginOAuthAuthorizationUrl() method = unwrap(api.get) @@ -61,7 +62,7 @@ class TestDatasourcePluginOAuthAuthorizationUrl: assert response.status_code == 200 - def test_get_no_oauth_config(self, app): + def test_get_no_oauth_config(self, app: Flask): api = DatasourcePluginOAuthAuthorizationUrl() method = unwrap(api.get) @@ -80,7 +81,7 @@ class TestDatasourcePluginOAuthAuthorizationUrl: with pytest.raises(ValueError): method(api, "notion") - def test_get_without_credential_id_sets_cookie(self, app): + def test_get_without_credential_id_sets_cookie(self, app: Flask): api = DatasourcePluginOAuthAuthorizationUrl() method = unwrap(api.get) @@ -115,7 +116,7 @@ class TestDatasourcePluginOAuthAuthorizationUrl: class TestDatasourceOAuthCallback: - def test_callback_success_new_credential(self, app): + def test_callback_success_new_credential(self, app: Flask): api = DatasourceOAuthCallback() method = unwrap(api.get) @@ -157,7 +158,7 @@ class TestDatasourceOAuthCallback: assert response.status_code == 302 - def test_callback_missing_context(self, app): + def test_callback_missing_context(self, app: Flask): api = DatasourceOAuthCallback() method = unwrap(api.get) @@ -165,7 +166,7 @@ class TestDatasourceOAuthCallback: with pytest.raises(Forbidden): method(api, "notion") - def test_callback_invalid_context(self, app): + def test_callback_invalid_context(self, app: Flask): api = DatasourceOAuthCallback() method = unwrap(api.get) @@ -180,7 +181,7 @@ class TestDatasourceOAuthCallback: with pytest.raises(Forbidden): method(api, "notion") - def test_callback_oauth_config_not_found(self, app): + def test_callback_oauth_config_not_found(self, app: Flask): api = DatasourceOAuthCallback() method = unwrap(api.get) @@ -202,7 +203,7 @@ class TestDatasourceOAuthCallback: with pytest.raises(NotFound): method(api, "notion") - def test_callback_reauthorize_existing_credential(self, app): + def test_callback_reauthorize_existing_credential(self, app: Flask): api = DatasourceOAuthCallback() method = unwrap(api.get) @@ -245,7 +246,7 @@ class TestDatasourceOAuthCallback: assert response.status_code == 302 assert "/oauth-callback" in response.location - def test_callback_context_id_from_cookie(self, app): + def test_callback_context_id_from_cookie(self, app: Flask): api = DatasourceOAuthCallback() method = unwrap(api.get) @@ -289,7 +290,7 @@ class TestDatasourceOAuthCallback: class TestDatasourceAuth: - def test_post_success(self, app): + def test_post_success(self, app: Flask): api = DatasourceAuth() method = unwrap(api.post) @@ -312,7 +313,7 @@ class TestDatasourceAuth: assert status == 200 - def test_post_invalid_credentials(self, app): + def test_post_invalid_credentials(self, app: Flask): api = DatasourceAuth() method = unwrap(api.post) @@ -334,7 +335,7 @@ class TestDatasourceAuth: with pytest.raises(ValueError): method(api, "notion") - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = DatasourceAuth() method = unwrap(api.get) @@ -355,7 +356,7 @@ class TestDatasourceAuth: assert status == 200 assert response["result"] - def test_post_missing_credentials(self, app): + def test_post_missing_credentials(self, app: Flask): api = DatasourceAuth() method = unwrap(api.post) @@ -372,7 +373,7 @@ class TestDatasourceAuth: with pytest.raises(ValueError): method(api, "notion") - def test_get_empty_list(self, app): + def test_get_empty_list(self, app: Flask): api = DatasourceAuth() method = unwrap(api.get) @@ -395,7 +396,7 @@ class TestDatasourceAuth: class TestDatasourceAuthDeleteApi: - def test_delete_success(self, app): + def test_delete_success(self, app: Flask): api = DatasourceAuthDeleteApi() method = unwrap(api.post) @@ -418,7 +419,7 @@ class TestDatasourceAuthDeleteApi: assert status == 200 - def test_delete_missing_credential_id(self, app): + def test_delete_missing_credential_id(self, app: Flask): api = DatasourceAuthDeleteApi() method = unwrap(api.post) @@ -437,7 +438,7 @@ class TestDatasourceAuthDeleteApi: class TestDatasourceAuthUpdateApi: - def test_update_success(self, app): + def test_update_success(self, app: Flask): api = DatasourceAuthUpdateApi() method = unwrap(api.post) @@ -460,7 +461,7 @@ class TestDatasourceAuthUpdateApi: assert status == 201 - def test_update_with_credentials_none(self, app): + def test_update_with_credentials_none(self, app: Flask): api = DatasourceAuthUpdateApi() method = unwrap(api.post) @@ -484,7 +485,7 @@ class TestDatasourceAuthUpdateApi: update_mock.assert_called_once() assert status == 201 - def test_update_name_only(self, app): + def test_update_name_only(self, app: Flask): api = DatasourceAuthUpdateApi() method = unwrap(api.post) @@ -507,7 +508,7 @@ class TestDatasourceAuthUpdateApi: assert status == 201 - def test_update_with_empty_credentials_dict(self, app): + def test_update_with_empty_credentials_dict(self, app: Flask): api = DatasourceAuthUpdateApi() method = unwrap(api.post) @@ -533,7 +534,7 @@ class TestDatasourceAuthUpdateApi: class TestDatasourceAuthListApi: - def test_list_success(self, app): + def test_list_success(self, app: Flask): api = DatasourceAuthListApi() method = unwrap(api.get) @@ -553,7 +554,7 @@ class TestDatasourceAuthListApi: assert status == 200 - def test_auth_list_empty(self, app): + def test_auth_list_empty(self, app: Flask): api = DatasourceAuthListApi() method = unwrap(api.get) @@ -574,7 +575,7 @@ class TestDatasourceAuthListApi: assert status == 200 assert response["result"] == [] - def test_hardcode_list_empty(self, app): + def test_hardcode_list_empty(self, app: Flask): api = DatasourceHardCodeAuthListApi() method = unwrap(api.get) @@ -597,7 +598,7 @@ class TestDatasourceAuthListApi: class TestDatasourceHardCodeAuthListApi: - def test_list_success(self, app): + def test_list_success(self, app: Flask): api = DatasourceHardCodeAuthListApi() method = unwrap(api.get) @@ -619,7 +620,7 @@ class TestDatasourceHardCodeAuthListApi: class TestDatasourceAuthOauthCustomClient: - def test_post_success(self, app): + def test_post_success(self, app: Flask): api = DatasourceAuthOauthCustomClient() method = unwrap(api.post) @@ -642,7 +643,7 @@ class TestDatasourceAuthOauthCustomClient: assert status == 200 - def test_delete_success(self, app): + def test_delete_success(self, app: Flask): api = DatasourceAuthOauthCustomClient() method = unwrap(api.delete) @@ -662,7 +663,7 @@ class TestDatasourceAuthOauthCustomClient: assert status == 200 - def test_post_empty_payload(self, app): + def test_post_empty_payload(self, app: Flask): api = DatasourceAuthOauthCustomClient() method = unwrap(api.post) @@ -685,7 +686,7 @@ class TestDatasourceAuthOauthCustomClient: assert status == 200 - def test_post_disabled_flag(self, app): + def test_post_disabled_flag(self, app: Flask): api = DatasourceAuthOauthCustomClient() method = unwrap(api.post) @@ -714,7 +715,7 @@ class TestDatasourceAuthOauthCustomClient: class TestDatasourceAuthDefaultApi: - def test_set_default_success(self, app): + def test_set_default_success(self, app: Flask): api = DatasourceAuthDefaultApi() method = unwrap(api.post) @@ -737,7 +738,7 @@ class TestDatasourceAuthDefaultApi: assert status == 200 - def test_default_missing_id(self, app): + def test_default_missing_id(self, app: Flask): api = DatasourceAuthDefaultApi() method = unwrap(api.post) @@ -756,7 +757,7 @@ class TestDatasourceAuthDefaultApi: class TestDatasourceUpdateProviderNameApi: - def test_update_name_success(self, app): + def test_update_name_success(self, app: Flask): api = DatasourceUpdateProviderNameApi() method = unwrap(api.post) @@ -779,7 +780,7 @@ class TestDatasourceUpdateProviderNameApi: assert status == 200 - def test_update_name_too_long(self, app): + def test_update_name_too_long(self, app: Flask): api = DatasourceUpdateProviderNameApi() method = unwrap(api.post) @@ -799,7 +800,7 @@ class TestDatasourceUpdateProviderNameApi: with pytest.raises(ValueError): method(api, "notion") - def test_update_name_missing_credential_id(self, app): + def test_update_name_missing_credential_id(self, app: Flask): api = DatasourceUpdateProviderNameApi() method = unwrap(api.post) diff --git a/api/tests/unit_tests/controllers/console/datasets/rag_pipeline/test_datasource_content_preview.py b/api/tests/unit_tests/controllers/console/datasets/rag_pipeline/test_datasource_content_preview.py index 7a8ccde55a..d4c6a775ec 100644 --- a/api/tests/unit_tests/controllers/console/datasets/rag_pipeline/test_datasource_content_preview.py +++ b/api/tests/unit_tests/controllers/console/datasets/rag_pipeline/test_datasource_content_preview.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock, patch import pytest +from flask import Flask from werkzeug.exceptions import Forbidden from controllers.console import console_ns @@ -25,7 +26,7 @@ class TestDataSourceContentPreviewApi: "credential_id": "cred-1", } - def test_post_success(self, app): + def test_post_success(self, app: Flask): api = DataSourceContentPreviewApi() method = unwrap(api.post) @@ -66,7 +67,7 @@ class TestDataSourceContentPreviewApi: assert status == 200 assert response == preview_result - def test_post_forbidden_non_account_user(self, app): + def test_post_forbidden_non_account_user(self, app: Flask): api = DataSourceContentPreviewApi() method = unwrap(api.post) @@ -85,7 +86,7 @@ class TestDataSourceContentPreviewApi: with pytest.raises(Forbidden): method(api, pipeline, "node-1") - def test_post_invalid_payload(self, app): + def test_post_invalid_payload(self, app: Flask): api = DataSourceContentPreviewApi() method = unwrap(api.post) @@ -108,7 +109,7 @@ class TestDataSourceContentPreviewApi: with pytest.raises(ValueError): method(api, pipeline, "node-1") - def test_post_without_credential_id(self, app): + def test_post_without_credential_id(self, app: Flask): api = DataSourceContentPreviewApi() method = unwrap(api.post) diff --git a/api/tests/unit_tests/controllers/console/datasets/test_datasets.py b/api/tests/unit_tests/controllers/console/datasets/test_datasets.py index b40a72b4f9..e28d68ee5a 100644 --- a/api/tests/unit_tests/controllers/console/datasets/test_datasets.py +++ b/api/tests/unit_tests/controllers/console/datasets/test_datasets.py @@ -2,6 +2,7 @@ import datetime from unittest.mock import MagicMock, PropertyMock, patch import pytest +from flask import Flask from werkzeug.exceptions import BadRequest, Forbidden, NotFound import services @@ -58,7 +59,7 @@ class TestDatasetList: user.is_dataset_editor = True return user - def test_get_success_basic(self, app): + def test_get_success_basic(self, app: Flask): api = DatasetListApi() method = unwrap(api.get) @@ -93,49 +94,7 @@ class TestDatasetList: assert resp["total"] == 1 assert resp["data"][0]["embedding_available"] is True - def test_get_with_rbac_enabled_fetches_permission_keys(self, app): - api = DatasetListApi() - method = unwrap(api.get) - - current_user = self._mock_user() - current_user.id = "acct-1" - dataset = MagicMock(id="ds-1") - datasets = [dataset] - marshaled = [self._mock_dataset_dict()] - - with app.test_request_context("/datasets"): - with ( - patch( - "controllers.console.datasets.datasets.current_account_with_tenant", - return_value=(current_user, "tenant-1"), - ), - patch("controllers.console.datasets.datasets.dify_config.RBAC_ENABLED", True), - patch.object( - DatasetService, - "get_datasets", - return_value=(datasets, 1), - ), - patch( - "controllers.console.datasets.datasets.enterprise_rbac_service.RBACService.DatasetPermissions.batch_get", - return_value={"ds-1": ["dataset.acl.readonly", "dataset.acl.edit"]}, - ) as mock_batch_get, - patch( - "controllers.console.datasets.datasets.marshal", - return_value=marshaled, - ), - patch.object( - ProviderManager, - "get_configurations", - return_value=MagicMock(get_models=lambda **_: []), - ), - ): - resp, status = method(api) - - assert status == 200 - assert dataset.permission_keys == ["dataset.acl.readonly", "dataset.acl.edit"] - mock_batch_get.assert_called_once_with("tenant-1", "acct-1", ["ds-1"]) - - def test_get_with_ids_filter(self, app): + def test_get_with_ids_filter(self, app: Flask): api = DatasetListApi() method = unwrap(api.get) @@ -170,7 +129,7 @@ class TestDatasetList: assert status == 200 assert resp["total"] == 2 - def test_get_with_tag_ids(self, app): + def test_get_with_tag_ids(self, app: Flask): api = DatasetListApi() method = unwrap(api.get) @@ -203,7 +162,7 @@ class TestDatasetList: assert status == 200 - def test_embedding_available_false(self, app): + def test_embedding_available_false(self, app: Flask): api = DatasetListApi() method = unwrap(api.get) @@ -245,7 +204,7 @@ class TestDatasetList: assert resp["data"][0]["embedding_available"] is False - def test_partial_members_permission(self, app): + def test_partial_members_permission(self, app: Flask): api = DatasetListApi() method = unwrap(api.get) @@ -284,7 +243,7 @@ class TestDatasetList: class TestDatasetListApiPost: - def test_post_success(self, app): + def test_post_success(self, app: Flask): api = DatasetListApi() method = unwrap(api.post) @@ -332,7 +291,7 @@ class TestDatasetListApiPost: assert status == 201 - def test_post_forbidden(self, app): + def test_post_forbidden(self, app: Flask): api = DatasetListApi() method = unwrap(api.post) @@ -352,7 +311,7 @@ class TestDatasetListApiPost: with pytest.raises(Forbidden): method(api) - def test_post_duplicate_name(self, app): + def test_post_duplicate_name(self, app: Flask): api = DatasetListApi() method = unwrap(api.post) @@ -377,7 +336,7 @@ class TestDatasetListApiPost: with pytest.raises(DatasetNameDuplicateError): method(api) - def test_post_invalid_payload_missing_name(self, app): + def test_post_invalid_payload_missing_name(self, app: Flask): api = DatasetListApi() method = unwrap(api.post) @@ -385,7 +344,7 @@ class TestDatasetListApiPost: with pytest.raises(ValueError): method(api) - def test_post_invalid_indexing_technique(self, app): + def test_post_invalid_indexing_technique(self, app: Flask): api = DatasetListApi() method = unwrap(api.post) @@ -398,7 +357,7 @@ class TestDatasetListApiPost: with pytest.raises(ValueError, match="Invalid indexing technique"): method(api) - def test_post_invalid_provider(self, app): + def test_post_invalid_provider(self, app: Flask): api = DatasetListApi() method = unwrap(api.post) @@ -413,7 +372,7 @@ class TestDatasetListApiPost: class TestDatasetApiGet: - def test_get_success_basic(self, app): + def test_get_success_basic(self, app: Flask): api = DatasetApi() method = unwrap(api.get) @@ -469,7 +428,7 @@ class TestDatasetApiGet: assert status == 200 assert data["embedding_available"] is True - def test_get_dataset_not_found(self, app): + def test_get_dataset_not_found(self, app: Flask): api = DatasetApi() method = unwrap(api.get) @@ -490,7 +449,7 @@ class TestDatasetApiGet: with pytest.raises(NotFound, match="Dataset not found"): method(api, dataset_id) - def test_get_permission_denied(self, app): + def test_get_permission_denied(self, app: Flask): api = DatasetApi() method = unwrap(api.get) @@ -517,7 +476,7 @@ class TestDatasetApiGet: with pytest.raises(Forbidden, match="no access"): method(api, dataset_id) - def test_get_high_quality_embedding_unavailable(self, app): + def test_get_high_quality_embedding_unavailable(self, app: Flask): api = DatasetApi() method = unwrap(api.get) @@ -572,7 +531,7 @@ class TestDatasetApiGet: assert data["embedding_available"] is False - def test_get_partial_members_permission(self, app): + def test_get_partial_members_permission(self, app: Flask): api = DatasetApi() method = unwrap(api.get) @@ -632,7 +591,7 @@ class TestDatasetApiGet: class TestDatasetApiPatch: - def test_patch_success_basic(self, app): + def test_patch_success_basic(self, app: Flask): api = DatasetApi() method = unwrap(api.patch) @@ -701,7 +660,7 @@ class TestDatasetApiPatch: assert status == 200 assert result["partial_member_list"] == [] - def test_patch_dataset_not_found(self, app): + def test_patch_dataset_not_found(self, app: Flask): api = DatasetApi() method = unwrap(api.patch) @@ -716,7 +675,7 @@ class TestDatasetApiPatch: with pytest.raises(NotFound, match="Dataset not found"): method(api, "missing") - def test_patch_permission_denied(self, app): + def test_patch_permission_denied(self, app: Flask): api = DatasetApi() method = unwrap(api.patch) @@ -746,7 +705,7 @@ class TestDatasetApiPatch: with pytest.raises(Forbidden): method(api, dataset_id) - def test_patch_partial_members_update(self, app): + def test_patch_partial_members_update(self, app: Flask): api = DatasetApi() method = unwrap(api.patch) @@ -815,7 +774,7 @@ class TestDatasetApiPatch: assert result["partial_member_list"] == payload["partial_member_list"] - def test_patch_clear_partial_members(self, app): + def test_patch_clear_partial_members(self, app: Flask): api = DatasetApi() method = unwrap(api.patch) @@ -885,7 +844,7 @@ class TestDatasetApiPatch: class TestDatasetApiDelete: - def test_delete_success(self, app): + def test_delete_success(self, app: Flask): api = DatasetApi() method = unwrap(api.delete) @@ -916,7 +875,7 @@ class TestDatasetApiDelete: assert status == 204 assert result == {"result": "success"} - def test_delete_forbidden_no_permission(self, app): + def test_delete_forbidden_no_permission(self, app: Flask): api = DatasetApi() method = unwrap(api.delete) @@ -935,7 +894,7 @@ class TestDatasetApiDelete: with pytest.raises(Forbidden): method(api, dataset_id) - def test_delete_dataset_not_found(self, app): + def test_delete_dataset_not_found(self, app: Flask): api = DatasetApi() method = unwrap(api.delete) @@ -959,7 +918,7 @@ class TestDatasetApiDelete: with pytest.raises(NotFound, match="Dataset not found"): method(api, dataset_id) - def test_delete_dataset_in_use(self, app): + def test_delete_dataset_in_use(self, app: Flask): api = DatasetApi() method = unwrap(api.delete) @@ -985,7 +944,7 @@ class TestDatasetApiDelete: class TestDatasetUseCheckApi: - def test_get_use_check_true(self, app): + def test_get_use_check_true(self, app: Flask): api = DatasetUseCheckApi() method = unwrap(api.get) @@ -1004,7 +963,7 @@ class TestDatasetUseCheckApi: assert status == 200 assert result == {"is_using": True} - def test_get_use_check_false(self, app): + def test_get_use_check_false(self, app: Flask): api = DatasetUseCheckApi() method = unwrap(api.get) @@ -1025,7 +984,7 @@ class TestDatasetUseCheckApi: class TestDatasetQueryApi: - def test_get_queries_success(self, app): + def test_get_queries_success(self, app: Flask): api = DatasetQueryApi() method = unwrap(api.get) @@ -1069,7 +1028,7 @@ class TestDatasetQueryApi: assert response["has_more"] is False assert len(response["data"]) == 2 - def test_get_queries_dataset_not_found(self, app): + def test_get_queries_dataset_not_found(self, app: Flask): api = DatasetQueryApi() method = unwrap(api.get) @@ -1091,7 +1050,7 @@ class TestDatasetQueryApi: with pytest.raises(NotFound, match="Dataset not found"): method(api, dataset_id) - def test_get_queries_permission_denied(self, app): + def test_get_queries_permission_denied(self, app: Flask): api = DatasetQueryApi() method = unwrap(api.get) @@ -1120,7 +1079,7 @@ class TestDatasetQueryApi: with pytest.raises(Forbidden): method(api, dataset_id) - def test_get_queries_pagination_has_more(self, app): + def test_get_queries_pagination_has_more(self, app: Flask): api = DatasetQueryApi() method = unwrap(api.get) @@ -1194,7 +1153,7 @@ class TestDatasetIndexingEstimateApi: "dataset_id": None, } - def test_post_success_upload_file(self, app): + def test_post_success_upload_file(self, app: Flask): api = DatasetIndexingEstimateApi() method = unwrap(api.post) @@ -1235,7 +1194,7 @@ class TestDatasetIndexingEstimateApi: assert status == 200 assert response == {"tokens": 100} - def test_post_file_not_found(self, app): + def test_post_file_not_found(self, app: Flask): api = DatasetIndexingEstimateApi() method = unwrap(api.post) @@ -1265,7 +1224,7 @@ class TestDatasetIndexingEstimateApi: with pytest.raises(NotFound): method(api) - def test_post_llm_bad_request_error(self, app): + def test_post_llm_bad_request_error(self, app: Flask): api = DatasetIndexingEstimateApi() method = unwrap(api.post) mock_file = self._upload_file() @@ -1300,7 +1259,7 @@ class TestDatasetIndexingEstimateApi: with pytest.raises(ProviderNotInitializeError): method(api) - def test_post_provider_token_not_init(self, app): + def test_post_provider_token_not_init(self, app: Flask): api = DatasetIndexingEstimateApi() method = unwrap(api.post) mock_file = self._upload_file() @@ -1335,7 +1294,7 @@ class TestDatasetIndexingEstimateApi: with pytest.raises(ProviderNotInitializeError): method(api) - def test_post_generic_exception(self, app): + def test_post_generic_exception(self, app: Flask): api = DatasetIndexingEstimateApi() method = unwrap(api.post) mock_file = self._upload_file() @@ -1372,7 +1331,7 @@ class TestDatasetIndexingEstimateApi: class TestDatasetRelatedAppListApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = DatasetRelatedAppListApi() method = unwrap(api.get) @@ -1410,7 +1369,7 @@ class TestDatasetRelatedAppListApi: assert response["total"] == 2 assert response["data"] == [app1, app2] - def test_get_dataset_not_found(self, app): + def test_get_dataset_not_found(self, app: Flask): api = DatasetRelatedAppListApi() method = unwrap(api.get) @@ -1428,7 +1387,7 @@ class TestDatasetRelatedAppListApi: with pytest.raises(NotFound): method(api, "dataset-1") - def test_get_permission_denied(self, app): + def test_get_permission_denied(self, app: Flask): api = DatasetRelatedAppListApi() method = unwrap(api.get) @@ -1452,7 +1411,7 @@ class TestDatasetRelatedAppListApi: with pytest.raises(Forbidden): method(api, "dataset-1") - def test_get_filters_none_apps(self, app): + def test_get_filters_none_apps(self, app: Flask): api = DatasetRelatedAppListApi() method = unwrap(api.get) @@ -1491,7 +1450,7 @@ class TestDatasetRelatedAppListApi: class TestDatasetIndexingStatusApi: - def test_get_success_with_documents(self, app): + def test_get_success_with_documents(self, app: Flask): api = DatasetIndexingStatusApi() method = unwrap(api.get) @@ -1532,7 +1491,7 @@ class TestDatasetIndexingStatusApi: assert item["completed_segments"] == 3 assert item["total_segments"] == 3 - def test_get_success_no_documents(self, app): + def test_get_success_no_documents(self, app: Flask): api = DatasetIndexingStatusApi() method = unwrap(api.get) @@ -1552,7 +1511,7 @@ class TestDatasetIndexingStatusApi: assert status == 200 assert response == {"data": []} - def test_segment_counts_different_values(self, app): + def test_segment_counts_different_values(self, app: Flask): api = DatasetIndexingStatusApi() method = unwrap(api.get) @@ -1592,7 +1551,7 @@ class TestDatasetIndexingStatusApi: class TestDatasetApiKeyApi: - def test_get_api_keys_success(self, app): + def test_get_api_keys_success(self, app: Flask): api = DatasetApiKeyApi() method = unwrap(api.get) @@ -1629,7 +1588,7 @@ class TestDatasetApiKeyApi: assert response["data"][1]["id"] == "key-2" assert response["data"][1]["token"] == "ds-def" - def test_post_create_api_key_success(self, app): + def test_post_create_api_key_success(self, app: Flask): api = DatasetApiKeyApi() method = unwrap(api.post) @@ -1674,7 +1633,7 @@ class TestDatasetApiKeyApi: assert response["type"] == "dataset" assert response["created_at"] is not None - def test_post_exceed_max_keys(self, app): + def test_post_exceed_max_keys(self, app: Flask): api = DatasetApiKeyApi() method = unwrap(api.post) @@ -1700,7 +1659,7 @@ class TestDatasetApiKeyApi: class TestDatasetApiDeleteApi: - def test_delete_success(self, app): + def test_delete_success(self, app: Flask): api = DatasetApiDeleteApi() method = unwrap(api.delete) @@ -1730,7 +1689,7 @@ class TestDatasetApiDeleteApi: assert status == 204 assert response["result"] == "success" - def test_delete_key_not_found(self, app): + def test_delete_key_not_found(self, app: Flask): api = DatasetApiDeleteApi() method = unwrap(api.delete) @@ -1750,7 +1709,7 @@ class TestDatasetApiDeleteApi: class TestDatasetEnableApiApi: - def test_enable_api(self, app): + def test_enable_api(self, app: Flask): api = DatasetEnableApiApi() method = unwrap(api.post) @@ -1766,7 +1725,7 @@ class TestDatasetEnableApiApi: assert status == 200 assert response["result"] == "success" - def test_disable_api(self, app): + def test_disable_api(self, app: Flask): api = DatasetEnableApiApi() method = unwrap(api.post) @@ -1784,7 +1743,7 @@ class TestDatasetEnableApiApi: class TestDatasetApiBaseUrlApi: - def test_get_api_base_url_from_config(self, app): + def test_get_api_base_url_from_config(self, app: Flask): api = DatasetApiBaseUrlApi() method = unwrap(api.get) @@ -1799,7 +1758,7 @@ class TestDatasetApiBaseUrlApi: assert response["api_base_url"] == "https://example.com/v1" - def test_get_api_base_url_from_request(self, app): + def test_get_api_base_url_from_request(self, app: Flask): api = DatasetApiBaseUrlApi() method = unwrap(api.get) @@ -1814,7 +1773,7 @@ class TestDatasetApiBaseUrlApi: assert response["api_base_url"] == "http://localhost:5000/v1" - def test_get_api_base_url_no_double_v1(self, app): + def test_get_api_base_url_no_double_v1(self, app: Flask): api = DatasetApiBaseUrlApi() method = unwrap(api.get) @@ -1831,7 +1790,7 @@ class TestDatasetApiBaseUrlApi: class TestDatasetRetrievalSettingApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = DatasetRetrievalSettingApi() method = unwrap(api.get) @@ -1852,7 +1811,7 @@ class TestDatasetRetrievalSettingApi: class TestDatasetRetrievalSettingMockApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = DatasetRetrievalSettingMockApi() method = unwrap(api.get) @@ -1869,7 +1828,7 @@ class TestDatasetRetrievalSettingMockApi: class TestDatasetErrorDocs: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = DatasetErrorDocs() method = unwrap(api.get) @@ -1892,7 +1851,7 @@ class TestDatasetErrorDocs: assert status == 200 assert response["total"] == 1 - def test_get_dataset_not_found(self, app): + def test_get_dataset_not_found(self, app: Flask): api = DatasetErrorDocs() method = unwrap(api.get) @@ -1908,7 +1867,7 @@ class TestDatasetErrorDocs: class TestDatasetPermissionUserListApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = DatasetPermissionUserListApi() method = unwrap(api.get) @@ -1939,7 +1898,7 @@ class TestDatasetPermissionUserListApi: assert status == 200 assert response["data"] == users - def test_get_permission_denied(self, app): + def test_get_permission_denied(self, app: Flask): api = DatasetPermissionUserListApi() method = unwrap(api.get) @@ -1965,7 +1924,7 @@ class TestDatasetPermissionUserListApi: class TestDatasetAutoDisableLogApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = DatasetAutoDisableLogApi() method = unwrap(api.get) @@ -1988,7 +1947,7 @@ class TestDatasetAutoDisableLogApi: assert status == 200 assert response == logs - def test_get_dataset_not_found(self, app): + def test_get_dataset_not_found(self, app: Flask): api = DatasetAutoDisableLogApi() method = unwrap(api.get) diff --git a/api/tests/unit_tests/controllers/console/datasets/test_datasets_document.py b/api/tests/unit_tests/controllers/console/datasets/test_datasets_document.py index d9b02ac453..ff9e1736d2 100644 --- a/api/tests/unit_tests/controllers/console/datasets/test_datasets_document.py +++ b/api/tests/unit_tests/controllers/console/datasets/test_datasets_document.py @@ -2,6 +2,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock, patch import pytest +from flask import Flask from werkzeug.exceptions import Forbidden, NotFound import services @@ -239,7 +240,7 @@ class TestDatasetDocumentListApi: assert "documents" in response - def test_post_forbidden(self, app): + def test_post_forbidden(self, app: Flask): api = DatasetDocumentListApi() method = unwrap(api.post) @@ -395,7 +396,7 @@ class TestDocumentDownloadApi: class TestDocumentProcessingApi: - def test_processing_forbidden_when_not_editor(self, app): + def test_processing_forbidden_when_not_editor(self, app: Flask): api = DocumentProcessingApi() method = unwrap(api.patch) @@ -1185,7 +1186,7 @@ class TestDocumentPermissionCases: "preview": [], } - def test_document_tenant_mismatch(self, app): + def test_document_tenant_mismatch(self, app: Flask): api = DocumentApi() method = unwrap(api.get) @@ -1253,7 +1254,7 @@ class TestDocumentPermissionCases: assert status == 200 assert response["mode"] == "custom" - def test_process_rule_permission_denied(self, app): + def test_process_rule_permission_denied(self, app: Flask): api = GetProcessRuleApi() method = unwrap(api.get) diff --git a/api/tests/unit_tests/controllers/console/datasets/test_datasets_segments.py b/api/tests/unit_tests/controllers/console/datasets/test_datasets_segments.py index 693b06e95b..66d257ee66 100644 --- a/api/tests/unit_tests/controllers/console/datasets/test_datasets_segments.py +++ b/api/tests/unit_tests/controllers/console/datasets/test_datasets_segments.py @@ -2,6 +2,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock, patch import pytest +from flask import Flask from werkzeug.exceptions import Forbidden, NotFound import services @@ -67,7 +68,7 @@ def _segment(): ) -def test_get_segment_with_summary(monkeypatch): +def test_get_segment_with_summary(monkeypatch: pytest.MonkeyPatch): segment = _segment() summary = SimpleNamespace(summary_content="summary") @@ -82,7 +83,7 @@ def test_get_segment_with_summary(monkeypatch): class TestDatasetDocumentSegmentListApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = DatasetDocumentSegmentListApi() method = unwrap(api.get) @@ -132,7 +133,7 @@ class TestDatasetDocumentSegmentListApi: assert status == 200 - def test_get_dataset_not_found(self, app): + def test_get_dataset_not_found(self, app: Flask): api = DatasetDocumentSegmentListApi() method = unwrap(api.get) @@ -150,7 +151,7 @@ class TestDatasetDocumentSegmentListApi: with pytest.raises(NotFound): method(api, "ds-1", "doc-1") - def test_get_permission_denied(self, app): + def test_get_permission_denied(self, app: Flask): api = DatasetDocumentSegmentListApi() method = unwrap(api.get) @@ -176,7 +177,7 @@ class TestDatasetDocumentSegmentListApi: class TestDatasetDocumentSegmentApi: - def test_patch_success(self, app): + def test_patch_success(self, app: Flask): api = DatasetDocumentSegmentApi() method = unwrap(api.patch) @@ -221,7 +222,7 @@ class TestDatasetDocumentSegmentApi: assert status == 200 assert response["result"] == "success" - def test_patch_document_indexing_in_progress(self, app): + def test_patch_document_indexing_in_progress(self, app: Flask): api = DatasetDocumentSegmentApi() method = unwrap(api.patch) @@ -264,7 +265,7 @@ class TestDatasetDocumentSegmentApi: with pytest.raises(InvalidActionError): method(api, "ds-1", "doc-1", "disable") - def test_patch_llm_bad_request(self, app): + def test_patch_llm_bad_request(self, app: Flask): api = DatasetDocumentSegmentApi() method = unwrap(api.patch) @@ -308,7 +309,7 @@ class TestDatasetDocumentSegmentApi: with pytest.raises(ProviderNotInitializeError): method(api, "ds-1", "doc-1", "enable") - def test_patch_provider_token_not_init(self, app): + def test_patch_provider_token_not_init(self, app: Flask): api = DatasetDocumentSegmentApi() method = unwrap(api.patch) @@ -354,7 +355,7 @@ class TestDatasetDocumentSegmentApi: class TestDatasetDocumentSegmentAddApi: - def test_post_success(self, app): + def test_post_success(self, app: Flask): api = DatasetDocumentSegmentAddApi() method = unwrap(api.post) @@ -413,7 +414,7 @@ class TestDatasetDocumentSegmentAddApi: assert status == 200 assert response["data"]["id"] == "seg-1" - def test_post_llm_bad_request(self, app): + def test_post_llm_bad_request(self, app: Flask): api = DatasetDocumentSegmentAddApi() method = unwrap(api.post) @@ -452,7 +453,7 @@ class TestDatasetDocumentSegmentAddApi: with pytest.raises(ProviderNotInitializeError): method(api, "ds-1", "doc-1") - def test_post_provider_token_not_init(self, app): + def test_post_provider_token_not_init(self, app: Flask): api = DatasetDocumentSegmentAddApi() method = unwrap(api.post) @@ -493,7 +494,7 @@ class TestDatasetDocumentSegmentAddApi: class TestDatasetDocumentSegmentUpdateApi: - def test_patch_success(self, app): + def test_patch_success(self, app: Flask): api = DatasetDocumentSegmentUpdateApi() method = unwrap(api.patch) @@ -551,7 +552,7 @@ class TestDatasetDocumentSegmentUpdateApi: assert status == 200 assert "data" in response - def test_patch_llm_bad_request(self, app): + def test_patch_llm_bad_request(self, app: Flask): api = DatasetDocumentSegmentUpdateApi() method = unwrap(api.patch) @@ -596,7 +597,7 @@ class TestDatasetDocumentSegmentUpdateApi: class TestDatasetDocumentSegmentBatchImportApi: - def test_post_success(self, app): + def test_post_success(self, app: Flask): api = DatasetDocumentSegmentBatchImportApi() method = unwrap(api.post) @@ -638,7 +639,7 @@ class TestDatasetDocumentSegmentBatchImportApi: assert status == 200 assert response["job_status"] == "waiting" - def test_post_dataset_not_found(self, app): + def test_post_dataset_not_found(self, app: Flask): api = DatasetDocumentSegmentBatchImportApi() method = unwrap(api.post) @@ -659,7 +660,7 @@ class TestDatasetDocumentSegmentBatchImportApi: with pytest.raises(NotFound): method(api, "ds-1", "doc-1") - def test_post_document_not_found(self, app): + def test_post_document_not_found(self, app: Flask): api = DatasetDocumentSegmentBatchImportApi() method = unwrap(api.post) @@ -684,7 +685,7 @@ class TestDatasetDocumentSegmentBatchImportApi: with pytest.raises(NotFound): method(api, "ds-1", "doc-1") - def test_post_upload_file_not_found(self, app): + def test_post_upload_file_not_found(self, app: Flask): api = DatasetDocumentSegmentBatchImportApi() method = unwrap(api.post) @@ -713,7 +714,7 @@ class TestDatasetDocumentSegmentBatchImportApi: with pytest.raises(NotFound): method(api, "ds-1", "doc-1") - def test_post_invalid_file_type(self, app): + def test_post_invalid_file_type(self, app: Flask): api = DatasetDocumentSegmentBatchImportApi() method = unwrap(api.post) @@ -745,7 +746,7 @@ class TestDatasetDocumentSegmentBatchImportApi: with pytest.raises(ValueError): method(api, "ds-1", "doc-1") - def test_post_async_task_failure(self, app): + def test_post_async_task_failure(self, app: Flask): api = DatasetDocumentSegmentBatchImportApi() method = unwrap(api.post) @@ -783,7 +784,7 @@ class TestDatasetDocumentSegmentBatchImportApi: assert status == 500 assert "error" in response - def test_get_job_not_found_in_redis(self, app): + def test_get_job_not_found_in_redis(self, app: Flask): api = DatasetDocumentSegmentBatchImportApi() method = unwrap(api.get) @@ -799,7 +800,7 @@ class TestDatasetDocumentSegmentBatchImportApi: class TestChildChunkAddApi: - def test_post_success(self, app): + def test_post_success(self, app: Flask): api = ChildChunkAddApi() method = unwrap(api.post) @@ -852,7 +853,7 @@ class TestChildChunkAddApi: assert status == 200 assert response["data"]["id"] == "cc-1" - def test_post_child_chunk_indexing_error(self, app): + def test_post_child_chunk_indexing_error(self, app: Flask): api = ChildChunkAddApi() method = unwrap(api.post) @@ -897,7 +898,7 @@ class TestChildChunkAddApi: class TestChildChunkUpdateApi: - def test_delete_success(self, app): + def test_delete_success(self, app: Flask): api = ChildChunkUpdateApi() method = unwrap(api.delete) @@ -941,7 +942,7 @@ class TestChildChunkUpdateApi: assert status == 204 assert response["result"] == "success" - def test_delete_child_chunk_index_error(self, app): + def test_delete_child_chunk_index_error(self, app: Flask): api = ChildChunkUpdateApi() method = unwrap(api.delete) @@ -984,7 +985,7 @@ class TestChildChunkUpdateApi: class TestSegmentListAdvancedCases: - def test_segment_list_with_keyword_filter(self, app): + def test_segment_list_with_keyword_filter(self, app: Flask): api = DatasetDocumentSegmentListApi() method = unwrap(api.get) @@ -1035,7 +1036,7 @@ class TestSegmentListAdvancedCases: assert status == 200 assert response["total"] == 1 - def test_segment_list_permission_denied(self, app): + def test_segment_list_permission_denied(self, app: Flask): """Test segment list with permission denied""" api = DatasetDocumentSegmentListApi() method = unwrap(api.get) @@ -1058,7 +1059,7 @@ class TestSegmentListAdvancedCases: with pytest.raises(Forbidden): method(api, "ds-1", "doc-1") - def test_segment_list_dataset_not_found(self, app): + def test_segment_list_dataset_not_found(self, app: Flask): """Test segment list with dataset not found""" api = DatasetDocumentSegmentListApi() method = unwrap(api.get) @@ -1079,7 +1080,7 @@ class TestSegmentListAdvancedCases: class TestSegmentOperationCases: - def test_segment_add_with_provider_token_error(self, app): + def test_segment_add_with_provider_token_error(self, app: Flask): """Test segment add with provider token not initialized""" api = DatasetDocumentSegmentAddApi() method = unwrap(api.post) @@ -1117,7 +1118,7 @@ class TestSegmentOperationCases: with pytest.raises(ProviderTokenNotInitError): method(api, "ds-1", "doc-1") - def test_batch_import_with_document_not_found(self, app): + def test_batch_import_with_document_not_found(self, app: Flask): """Test batch import with document not found""" api = DatasetDocumentSegmentBatchImportApi() method = unwrap(api.post) @@ -1146,7 +1147,7 @@ class TestSegmentOperationCases: with pytest.raises(NotFound): method(api, "ds-1", "doc-1") - def test_batch_import_with_invalid_file(self, app): + def test_batch_import_with_invalid_file(self, app: Flask): """Test batch import with invalid file type""" api = DatasetDocumentSegmentBatchImportApi() method = unwrap(api.post) @@ -1181,7 +1182,7 @@ class TestSegmentOperationCases: with pytest.raises(NotFound): method(api, "ds-1", "doc-1") - def test_batch_import_with_async_task_failure(self, app): + def test_batch_import_with_async_task_failure(self, app: Flask): api = DatasetDocumentSegmentBatchImportApi() method = unwrap(api.post) @@ -1226,7 +1227,7 @@ class TestSegmentOperationCases: assert status == 500 assert "error" in response - def test_batch_import_get_job_not_found(self, app): + def test_batch_import_get_job_not_found(self, app: Flask): api = DatasetDocumentSegmentBatchImportApi() method = unwrap(api.get) diff --git a/api/tests/unit_tests/controllers/console/datasets/test_external.py b/api/tests/unit_tests/controllers/console/datasets/test_external.py index 514bbbe040..7254bf7670 100644 --- a/api/tests/unit_tests/controllers/console/datasets/test_external.py +++ b/api/tests/unit_tests/controllers/console/datasets/test_external.py @@ -57,7 +57,7 @@ def mock_auth(monkeypatch, current_user): class TestExternalApiTemplateListApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = ExternalApiTemplateListApi() method = unwrap(api.get) @@ -78,7 +78,7 @@ class TestExternalApiTemplateListApi: assert resp["total"] == 1 assert resp["data"][0]["id"] == "1" - def test_post_forbidden(self, app, current_user): + def test_post_forbidden(self, app: Flask, current_user): current_user.is_dataset_editor = False api = ExternalApiTemplateListApi() method = unwrap(api.post) @@ -93,7 +93,7 @@ class TestExternalApiTemplateListApi: with pytest.raises(Forbidden): method(api) - def test_post_duplicate_name(self, app): + def test_post_duplicate_name(self, app: Flask): api = ExternalApiTemplateListApi() method = unwrap(api.post) @@ -114,7 +114,7 @@ class TestExternalApiTemplateListApi: class TestExternalApiTemplateApi: - def test_get_not_found(self, app): + def test_get_not_found(self, app: Flask): api = ExternalApiTemplateApi() method = unwrap(api.get) @@ -129,7 +129,7 @@ class TestExternalApiTemplateApi: with pytest.raises(NotFound): method(api, "api-id") - def test_delete_forbidden(self, app, current_user): + def test_delete_forbidden(self, app: Flask, current_user): current_user.has_edit_permission = False current_user.is_dataset_operator = False @@ -142,7 +142,7 @@ class TestExternalApiTemplateApi: class TestExternalApiUseCheckApi: - def test_get_scopes_usage_check_to_current_tenant(self, app): + def test_get_scopes_usage_check_to_current_tenant(self, app: Flask): api = ExternalApiUseCheckApi() method = unwrap(api.get) @@ -162,7 +162,7 @@ class TestExternalApiUseCheckApi: class TestExternalDatasetCreateApi: - def test_create_success(self, app): + def test_create_success(self, app: Flask): api = ExternalDatasetCreateApi() method = unwrap(api.post) @@ -206,7 +206,7 @@ class TestExternalDatasetCreateApi: assert status == 201 - def test_create_forbidden(self, app, current_user): + def test_create_forbidden(self, app: Flask, current_user): current_user.is_dataset_editor = False api = ExternalDatasetCreateApi() method = unwrap(api.post) @@ -226,7 +226,7 @@ class TestExternalDatasetCreateApi: class TestExternalKnowledgeHitTestingApi: - def test_hit_testing_dataset_not_found(self, app): + def test_hit_testing_dataset_not_found(self, app: Flask): api = ExternalKnowledgeHitTestingApi() method = unwrap(api.post) @@ -241,7 +241,7 @@ class TestExternalKnowledgeHitTestingApi: with pytest.raises(NotFound): method(api, "dataset-id") - def test_hit_testing_success(self, app): + def test_hit_testing_success(self, app: Flask): api = ExternalKnowledgeHitTestingApi() method = unwrap(api.post) @@ -266,7 +266,7 @@ class TestExternalKnowledgeHitTestingApi: class TestBedrockRetrievalApi: - def test_bedrock_retrieval(self, app): + def test_bedrock_retrieval(self, app: Flask): api = BedrockRetrievalApi() method = unwrap(api.post) diff --git a/api/tests/unit_tests/controllers/console/datasets/test_hit_testing.py b/api/tests/unit_tests/controllers/console/datasets/test_hit_testing.py index 09ed2aaf69..4fa5d21493 100644 --- a/api/tests/unit_tests/controllers/console/datasets/test_hit_testing.py +++ b/api/tests/unit_tests/controllers/console/datasets/test_hit_testing.py @@ -3,6 +3,7 @@ from unittest.mock import MagicMock, PropertyMock, patch import pytest from flask import Flask +from pytest_mock import MockerFixture from werkzeug.exceptions import NotFound from controllers.console import console_ns @@ -35,7 +36,7 @@ def dataset(): @pytest.fixture(autouse=True) -def bypass_decorators(mocker): +def bypass_decorators(mocker: MockerFixture): """Bypass all decorators on the API method.""" mocker.patch( "controllers.console.datasets.hit_testing.setup_required", @@ -56,7 +57,7 @@ def bypass_decorators(mocker): class TestHitTestingApi: - def test_hit_testing_success(self, app, dataset, dataset_id): + def test_hit_testing_success(self, app: Flask, dataset, dataset_id): api = HitTestingApi() method = unwrap(api.post) @@ -99,7 +100,7 @@ class TestHitTestingApi: assert "records" in result assert result["records"] == [] - def test_hit_testing_success_with_optional_record_fields(self, app, dataset, dataset_id): + def test_hit_testing_success_with_optional_record_fields(self, app: Flask, dataset, dataset_id): api = HitTestingApi() method = unwrap(api.post) @@ -150,7 +151,7 @@ class TestHitTestingApi: assert result["query"] == payload["query"] assert result["records"] == records - def test_hit_testing_dataset_not_found(self, app, dataset_id): + def test_hit_testing_dataset_not_found(self, app: Flask, dataset_id): api = HitTestingApi() method = unwrap(api.post) @@ -175,7 +176,7 @@ class TestHitTestingApi: with pytest.raises(NotFound, match="Dataset not found"): method(api, dataset_id) - def test_hit_testing_invalid_args(self, app, dataset, dataset_id): + def test_hit_testing_invalid_args(self, app: Flask, dataset, dataset_id): api = HitTestingApi() method = unwrap(api.post) diff --git a/api/tests/unit_tests/controllers/console/datasets/test_metadata.py b/api/tests/unit_tests/controllers/console/datasets/test_metadata.py index de834c2d4d..4042190ff6 100644 --- a/api/tests/unit_tests/controllers/console/datasets/test_metadata.py +++ b/api/tests/unit_tests/controllers/console/datasets/test_metadata.py @@ -3,6 +3,7 @@ from unittest.mock import MagicMock, PropertyMock, patch import pytest from flask import Flask +from pytest_mock import MockerFixture from werkzeug.exceptions import NotFound from controllers.console import console_ns @@ -60,7 +61,7 @@ def metadata_id(): @pytest.fixture(autouse=True) -def bypass_decorators(mocker): +def bypass_decorators(mocker: MockerFixture): """Bypass setup/login/license decorators.""" mocker.patch( "controllers.console.datasets.metadata.setup_required", @@ -269,7 +270,7 @@ class TestDatasetMetadataApi: class TestDatasetMetadataBuiltInFieldApi: - def test_get_built_in_fields(self, app): + def test_get_built_in_fields(self, app: Flask): api = DatasetMetadataBuiltInFieldApi() method = unwrap(api.get) diff --git a/api/tests/unit_tests/controllers/console/datasets/test_website.py b/api/tests/unit_tests/controllers/console/datasets/test_website.py index 9f0da6e76f..9991a0d345 100644 --- a/api/tests/unit_tests/controllers/console/datasets/test_website.py +++ b/api/tests/unit_tests/controllers/console/datasets/test_website.py @@ -2,6 +2,7 @@ from unittest.mock import Mock, PropertyMock, patch import pytest from flask import Flask +from pytest_mock import MockerFixture from controllers.console import console_ns from controllers.console.datasets.error import WebsiteCrawlError @@ -31,7 +32,7 @@ def app(): @pytest.fixture(autouse=True) -def bypass_auth_and_setup(mocker): +def bypass_auth_and_setup(mocker: MockerFixture): """Bypass setup/login/account decorators.""" mocker.patch( "controllers.console.datasets.website.login_required", @@ -48,7 +49,7 @@ def bypass_auth_and_setup(mocker): class TestWebsiteCrawlApi: - def test_crawl_success(self, app, mocker): + def test_crawl_success(self, app, mocker: MockerFixture): api = WebsiteCrawlApi() method = unwrap(api.post) @@ -85,7 +86,7 @@ class TestWebsiteCrawlApi: assert status == 200 assert result["job_id"] == "job-1" - def test_crawl_invalid_payload(self, app, mocker): + def test_crawl_invalid_payload(self, app, mocker: MockerFixture): api = WebsiteCrawlApi() method = unwrap(api.post) @@ -113,7 +114,7 @@ class TestWebsiteCrawlApi: with pytest.raises(WebsiteCrawlError, match="invalid payload"): method(api) - def test_crawl_service_error(self, app, mocker): + def test_crawl_service_error(self, app, mocker: MockerFixture): api = WebsiteCrawlApi() method = unwrap(api.post) @@ -150,7 +151,7 @@ class TestWebsiteCrawlApi: class TestWebsiteCrawlStatusApi: - def test_get_status_success(self, app, mocker): + def test_get_status_success(self, app, mocker: MockerFixture): api = WebsiteCrawlStatusApi() method = unwrap(api.get) @@ -181,7 +182,7 @@ class TestWebsiteCrawlStatusApi: assert status == 200 assert result["status"] == "completed" - def test_get_status_invalid_provider(self, app, mocker): + def test_get_status_invalid_provider(self, app, mocker: MockerFixture): api = WebsiteCrawlStatusApi() method = unwrap(api.get) @@ -203,7 +204,7 @@ class TestWebsiteCrawlStatusApi: with pytest.raises(WebsiteCrawlError, match="invalid provider"): method(api, job_id) - def test_get_status_service_error(self, app, mocker): + def test_get_status_service_error(self, app, mocker: MockerFixture): api = WebsiteCrawlStatusApi() method = unwrap(api.get) diff --git a/api/tests/unit_tests/controllers/console/datasets/test_wraps.py b/api/tests/unit_tests/controllers/console/datasets/test_wraps.py index e358435de4..2cfa938af8 100644 --- a/api/tests/unit_tests/controllers/console/datasets/test_wraps.py +++ b/api/tests/unit_tests/controllers/console/datasets/test_wraps.py @@ -1,6 +1,7 @@ from unittest.mock import Mock import pytest +from pytest_mock import MockerFixture from controllers.console.datasets.error import PipelineNotFoundError from controllers.console.datasets.wraps import get_rag_pipeline @@ -16,7 +17,7 @@ class TestGetRagPipeline: with pytest.raises(ValueError, match="missing pipeline_id"): dummy_view() - def test_pipeline_not_found(self, mocker): + def test_pipeline_not_found(self, mocker: MockerFixture): @get_rag_pipeline def dummy_view(**kwargs): return "ok" @@ -34,7 +35,7 @@ class TestGetRagPipeline: with pytest.raises(PipelineNotFoundError): dummy_view(pipeline_id="pipeline-1") - def test_pipeline_found_and_injected(self, mocker): + def test_pipeline_found_and_injected(self, mocker: MockerFixture): pipeline = Mock(spec=Pipeline) pipeline.id = "pipeline-1" pipeline.tenant_id = "tenant-1" @@ -57,7 +58,7 @@ class TestGetRagPipeline: assert result is pipeline - def test_pipeline_id_removed_from_kwargs(self, mocker): + def test_pipeline_id_removed_from_kwargs(self, mocker: MockerFixture): pipeline = Mock(spec=Pipeline) @get_rag_pipeline @@ -79,7 +80,7 @@ class TestGetRagPipeline: assert result == "ok" - def test_pipeline_id_cast_to_string(self, mocker): + def test_pipeline_id_cast_to_string(self, mocker: MockerFixture): pipeline = Mock(spec=Pipeline) @get_rag_pipeline diff --git a/api/tests/unit_tests/controllers/console/explore/test_banner.py b/api/tests/unit_tests/controllers/console/explore/test_banner.py index c8f674f515..d1cb6b6a03 100644 --- a/api/tests/unit_tests/controllers/console/explore/test_banner.py +++ b/api/tests/unit_tests/controllers/console/explore/test_banner.py @@ -1,6 +1,8 @@ from datetime import datetime from unittest.mock import MagicMock, patch +from flask import Flask + import controllers.console.explore.banner as banner_module from models.enums import BannerStatus @@ -12,7 +14,7 @@ def unwrap(func): class TestBannerApi: - def test_get_banners_with_requested_language(self, app): + def test_get_banners_with_requested_language(self, app: Flask): api = banner_module.BannerApi() method = unwrap(api.get) @@ -41,7 +43,7 @@ class TestBannerApi: } ] - def test_get_banners_fallback_to_en_us(self, app): + def test_get_banners_fallback_to_en_us(self, app: Flask): api = banner_module.BannerApi() method = unwrap(api.get) @@ -76,7 +78,7 @@ class TestBannerApi: } ] - def test_get_banners_default_language_en_us(self, app): + def test_get_banners_default_language_en_us(self, app: Flask): api = banner_module.BannerApi() method = unwrap(api.get) diff --git a/api/tests/unit_tests/controllers/console/explore/test_message.py b/api/tests/unit_tests/controllers/console/explore/test_message.py index 145cc9cdd7..3d41489435 100644 --- a/api/tests/unit_tests/controllers/console/explore/test_message.py +++ b/api/tests/unit_tests/controllers/console/explore/test_message.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock, patch import pytest +from flask import Flask from werkzeug.exceptions import InternalServerError, NotFound import controllers.console.explore.message as module @@ -54,7 +55,7 @@ def make_message(): class TestMessageListApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = module.MessageListApi() method = unwrap(api.get) @@ -96,7 +97,7 @@ class TestMessageListApi: with pytest.raises(NotChatAppError): method(installed_app) - def test_conversation_not_exists(self, app): + def test_conversation_not_exists(self, app: Flask): api = module.MessageListApi() method = unwrap(api.get) @@ -118,7 +119,7 @@ class TestMessageListApi: with pytest.raises(NotFound): method(installed_app) - def test_first_message_not_exists(self, app): + def test_first_message_not_exists(self, app: Flask): api = module.MessageListApi() method = unwrap(api.get) @@ -142,7 +143,7 @@ class TestMessageListApi: class TestMessageFeedbackApi: - def test_post_success(self, app): + def test_post_success(self, app: Flask): api = module.MessageFeedbackApi() method = unwrap(api.post) @@ -161,7 +162,7 @@ class TestMessageFeedbackApi: assert result["result"] == "success" - def test_message_not_exists(self, app): + def test_message_not_exists(self, app: Flask): api = module.MessageFeedbackApi() method = unwrap(api.post) @@ -182,7 +183,7 @@ class TestMessageFeedbackApi: class TestMessageMoreLikeThisApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = module.MessageMoreLikeThisApi() method = unwrap(api.get) @@ -221,7 +222,7 @@ class TestMessageMoreLikeThisApi: with pytest.raises(NotCompletionAppError): method(installed_app, "mid") - def test_more_like_this_disabled(self, app): + def test_more_like_this_disabled(self, app: Flask): api = module.MessageMoreLikeThisApi() method = unwrap(api.get) @@ -243,7 +244,7 @@ class TestMessageMoreLikeThisApi: with pytest.raises(AppMoreLikeThisDisabledError): method(installed_app, "mid") - def test_message_not_exists_more_like_this(self, app): + def test_message_not_exists_more_like_this(self, app: Flask): api = module.MessageMoreLikeThisApi() method = unwrap(api.get) @@ -265,7 +266,7 @@ class TestMessageMoreLikeThisApi: with pytest.raises(NotFound): method(installed_app, "mid") - def test_provider_not_init_more_like_this(self, app): + def test_provider_not_init_more_like_this(self, app: Flask): api = module.MessageMoreLikeThisApi() method = unwrap(api.get) @@ -287,7 +288,7 @@ class TestMessageMoreLikeThisApi: with pytest.raises(ProviderNotInitializeError): method(installed_app, "mid") - def test_quota_exceeded_more_like_this(self, app): + def test_quota_exceeded_more_like_this(self, app: Flask): api = module.MessageMoreLikeThisApi() method = unwrap(api.get) @@ -309,7 +310,7 @@ class TestMessageMoreLikeThisApi: with pytest.raises(ProviderQuotaExceededError): method(installed_app, "mid") - def test_model_not_support_more_like_this(self, app): + def test_model_not_support_more_like_this(self, app: Flask): api = module.MessageMoreLikeThisApi() method = unwrap(api.get) @@ -331,7 +332,7 @@ class TestMessageMoreLikeThisApi: with pytest.raises(ProviderModelCurrentlyNotSupportError): method(installed_app, "mid") - def test_invoke_error_more_like_this(self, app): + def test_invoke_error_more_like_this(self, app: Flask): api = module.MessageMoreLikeThisApi() method = unwrap(api.get) @@ -353,7 +354,7 @@ class TestMessageMoreLikeThisApi: with pytest.raises(CompletionRequestError): method(installed_app, "mid") - def test_unexpected_error_more_like_this(self, app): + def test_unexpected_error_more_like_this(self, app: Flask): api = module.MessageMoreLikeThisApi() method = unwrap(api.get) diff --git a/api/tests/unit_tests/controllers/console/explore/test_recommended_app.py b/api/tests/unit_tests/controllers/console/explore/test_recommended_app.py index 76c863577a..89cbea5ddc 100644 --- a/api/tests/unit_tests/controllers/console/explore/test_recommended_app.py +++ b/api/tests/unit_tests/controllers/console/explore/test_recommended_app.py @@ -1,5 +1,7 @@ from unittest.mock import MagicMock, patch +from flask import Flask + import controllers.console.explore.recommended_app as module from models.model import AppMode, IconType @@ -11,7 +13,7 @@ def unwrap(func): class TestRecommendedAppListApi: - def test_get_with_language_param(self, app): + def test_get_with_language_param(self, app: Flask): api = module.RecommendedAppListApi() method = unwrap(api.get) @@ -31,7 +33,7 @@ class TestRecommendedAppListApi: service_mock.assert_called_once_with("en-US") assert result == result_data - def test_get_fallback_to_user_language(self, app): + def test_get_fallback_to_user_language(self, app: Flask): api = module.RecommendedAppListApi() method = unwrap(api.get) @@ -51,7 +53,7 @@ class TestRecommendedAppListApi: service_mock.assert_called_once_with("fr-FR") assert result == result_data - def test_get_fallback_to_default_language(self, app): + def test_get_fallback_to_default_language(self, app: Flask): api = module.RecommendedAppListApi() method = unwrap(api.get) @@ -73,7 +75,7 @@ class TestRecommendedAppListApi: class TestRecommendedAppApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = module.RecommendedAppApi() method = unwrap(api.get) @@ -124,7 +126,7 @@ class TestRecommendedAppResponseModels: }, "app_id": "app-1", "description": "desc", - "category": "cat", + "categories": ["cat", "other"], "position": 1, "is_listed": True, "can_trial": False, @@ -135,4 +137,5 @@ class TestRecommendedAppResponseModels: ).model_dump(mode="json") assert response["recommended_apps"][0]["app_id"] == "app-1" + assert response["recommended_apps"][0]["categories"] == ["cat", "other"] assert response["categories"] == ["cat"] diff --git a/api/tests/unit_tests/controllers/console/explore/test_saved_message.py b/api/tests/unit_tests/controllers/console/explore/test_saved_message.py index bb7cdd55c4..71241890e9 100644 --- a/api/tests/unit_tests/controllers/console/explore/test_saved_message.py +++ b/api/tests/unit_tests/controllers/console/explore/test_saved_message.py @@ -2,6 +2,7 @@ from unittest.mock import MagicMock, PropertyMock, patch from uuid import uuid4 import pytest +from flask import Flask from werkzeug.exceptions import NotFound import controllers.console.explore.saved_message as module @@ -42,7 +43,7 @@ def payload_patch(): class TestSavedMessageListApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = module.SavedMessageListApi() method = unwrap(api.get) diff --git a/api/tests/unit_tests/controllers/console/explore/test_trial.py b/api/tests/unit_tests/controllers/console/explore/test_trial.py index 3625056af9..82a063307b 100644 --- a/api/tests/unit_tests/controllers/console/explore/test_trial.py +++ b/api/tests/unit_tests/controllers/console/explore/test_trial.py @@ -3,6 +3,7 @@ from unittest.mock import MagicMock, patch from uuid import uuid4 import pytest +from flask import Flask from werkzeug.exceptions import Forbidden, InternalServerError, NotFound import controllers.console.explore.trial as module @@ -87,8 +88,13 @@ def valid_parameters(): } +def test_trial_workflow_uses_trial_scoped_simple_account_model(): + assert module.simple_account_model.name == "TrialSimpleAccount" + assert hasattr(module.simple_account_model, "items") + + class TestTrialAppWorkflowRunApi: - def test_not_workflow_app(self, app): + def test_not_workflow_app(self, app: Flask): api = module.TrialAppWorkflowRunApi() method = unwrap(api.post) @@ -224,7 +230,7 @@ class TestTrialAppWorkflowRunApi: class TestTrialChatApi: - def test_not_chat_app(self, app): + def test_not_chat_app(self, app: Flask): api = module.TrialChatApi() method = unwrap(api.post) @@ -408,7 +414,7 @@ class TestTrialChatApi: class TestTrialCompletionApi: - def test_not_completion_app(self, app): + def test_not_completion_app(self, app: Flask): api = module.TrialCompletionApi() method = unwrap(api.post) @@ -560,7 +566,7 @@ class TestTrialCompletionApi: class TestTrialMessageSuggestedQuestionApi: - def test_not_chat_app(self, app): + def test_not_chat_app(self, app: Flask): api = module.TrialMessageSuggestedQuestionApi() method = unwrap(api.get) @@ -952,7 +958,7 @@ class TestTrialAppWorkflowTaskStopApi: class TestTrialSitApi: - def test_no_site(self, app): + def test_no_site(self, app: Flask): api = module.TrialSitApi() method = unwrap(api.get) app_model = MagicMock() @@ -963,7 +969,7 @@ class TestTrialSitApi: with pytest.raises(Forbidden): method(api, app_model) - def test_archived_tenant(self, app): + def test_archived_tenant(self, app: Flask): api = module.TrialSitApi() method = unwrap(api.get) @@ -978,7 +984,7 @@ class TestTrialSitApi: with pytest.raises(Forbidden): method(api, app_model) - def test_success(self, app): + def test_success(self, app: Flask): api = module.TrialSitApi() method = unwrap(api.get) diff --git a/api/tests/unit_tests/controllers/console/tag/test_tags.py b/api/tests/unit_tests/controllers/console/tag/test_tags.py index 6405558bb4..8b47da25fb 100644 --- a/api/tests/unit_tests/controllers/console/tag/test_tags.py +++ b/api/tests/unit_tests/controllers/console/tag/test_tags.py @@ -8,10 +8,8 @@ from werkzeug.exceptions import Forbidden import controllers.console.tag.tags as module from controllers.console import console_ns from controllers.console.tag.tags import ( - DeprecatedTagBindingCreateApi, - DeprecatedTagBindingRemoveApi, TagBindingCollectionApi, - TagBindingItemApi, + TagBindingRemoveApi, TagListApi, TagUpdateDeleteApi, ) @@ -75,7 +73,7 @@ def payload_patch(): class TestTagListApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = TagListApi() method = unwrap(api.get) @@ -126,7 +124,7 @@ class TestTagListApi: assert result["name"] == "test-tag" assert result["binding_count"] == "0" - def test_post_forbidden(self, app, readonly_user, payload_patch): + def test_post_forbidden(self, app: Flask, readonly_user, payload_patch): api = TagListApi() method = unwrap(api.post) @@ -172,7 +170,7 @@ class TestTagUpdateDeleteApi: assert status == 200 assert result["binding_count"] == "3" - def test_patch_forbidden(self, app, readonly_user, payload_patch): + def test_patch_forbidden(self, app: Flask, readonly_user, payload_patch): api = TagUpdateDeleteApi() method = unwrap(api.patch) @@ -233,7 +231,7 @@ class TestTagBindingCollectionApi: assert status == 200 assert result["result"] == "success" - def test_create_forbidden(self, app, readonly_user, payload_patch): + def test_create_forbidden(self, app: Flask, readonly_user, payload_patch): api = TagBindingCollectionApi() method = unwrap(api.post) @@ -249,39 +247,13 @@ class TestTagBindingCollectionApi: method(api) -class TestDeprecatedTagBindingCreateApi: - def test_create_success(self, app, admin_user, payload_patch): - api = DeprecatedTagBindingCreateApi() +class TestTagBindingRemoveApi: + def test_remove_success(self, app, admin_user, payload_patch): + api = TagBindingRemoveApi() method = unwrap(api.post) payload = { - "tag_ids": ["tag-1"], - "target_id": "target-1", - "type": "knowledge", - } - - with app.test_request_context("/", json=payload): - with ( - patch( - "controllers.console.tag.tags.current_account_with_tenant", - return_value=(admin_user, None), - ), - payload_patch(payload), - patch("controllers.console.tag.tags.TagService.save_tag_binding") as save_mock, - ): - result, status = method(api) - - save_mock.assert_called_once() - assert status == 200 - assert result["result"] == "success" - - -class TestTagBindingItemApi: - def test_delete_success(self, app, admin_user, payload_patch): - api = TagBindingItemApi() - method = unwrap(api.delete) - - payload = { + "tag_ids": ["tag-1", "tag-2"], "target_id": "target-1", "type": "knowledge", } @@ -295,57 +267,16 @@ class TestTagBindingItemApi: payload_patch(payload), patch("controllers.console.tag.tags.TagService.delete_tag_binding") as delete_mock, ): - result, status = method(api, "tag-1") + result, status = method(api) delete_mock.assert_called_once() delete_payload = delete_mock.call_args.args[0] - assert delete_payload.tag_id == "tag-1" - assert delete_payload.target_id == "target-1" - assert delete_payload.type == TagType.KNOWLEDGE + assert delete_payload.tag_ids == ["tag-1", "tag-2"] assert status == 200 assert result["result"] == "success" - def test_delete_forbidden(self, app, readonly_user): - api = TagBindingItemApi() - method = unwrap(api.delete) - - with app.test_request_context("/"): - with patch( - "controllers.console.tag.tags.current_account_with_tenant", - return_value=(readonly_user, None), - ): - with pytest.raises(Forbidden): - method(api, "tag-1") - - -class TestDeprecatedTagBindingRemoveApi: - def test_remove_success(self, app, admin_user, payload_patch): - api = DeprecatedTagBindingRemoveApi() - method = unwrap(api.post) - - payload = { - "tag_id": "tag-1", - "target_id": "target-1", - "type": "knowledge", - } - - with app.test_request_context("/", json=payload): - with ( - patch( - "controllers.console.tag.tags.current_account_with_tenant", - return_value=(admin_user, None), - ), - payload_patch(payload), - patch("controllers.console.tag.tags.TagService.delete_tag_binding") as delete_mock, - ): - result, status = method(api) - - delete_mock.assert_called_once() - assert status == 200 - assert result["result"] == "success" - - def test_remove_forbidden(self, app, readonly_user, payload_patch): - api = DeprecatedTagBindingRemoveApi() + def test_remove_forbidden(self, app: Flask, readonly_user, payload_patch): + api = TagBindingRemoveApi() method = unwrap(api.post) with app.test_request_context("/", json={}): @@ -371,32 +302,30 @@ class TestTagResponseModel: class TestTagBindingRouteMetadata: - def test_legacy_write_routes_are_marked_deprecated(self): - assert DeprecatedTagBindingCreateApi.post.__apidoc__["deprecated"] is True - assert DeprecatedTagBindingRemoveApi.post.__apidoc__["deprecated"] is True + def test_write_routes_are_not_deprecated(self): assert TagBindingCollectionApi.post.__apidoc__.get("deprecated") is not True - assert TagBindingItemApi.delete.__apidoc__.get("deprecated") is not True + assert TagBindingRemoveApi.post.__apidoc__.get("deprecated") is not True def test_write_routes_have_stable_operation_ids(self): assert TagBindingCollectionApi.post.__apidoc__["id"] == "create_tag_binding" - assert TagBindingItemApi.delete.__apidoc__["id"] == "delete_tag_binding" - assert DeprecatedTagBindingCreateApi.post.__apidoc__["id"] == "create_tag_binding_deprecated" - assert DeprecatedTagBindingRemoveApi.post.__apidoc__["id"] == "delete_tag_binding_deprecated" + assert TagBindingRemoveApi.post.__apidoc__["id"] == "remove_tag_bindings" - def test_canonical_and_legacy_write_routes_are_registered(self): + def test_write_routes_are_registered(self): route_map = { resource.__name__: urls for resource, urls, _route_doc, _kwargs in console_ns.resources if resource.__name__ in { "TagBindingCollectionApi", - "TagBindingItemApi", - "DeprecatedTagBindingCreateApi", - "DeprecatedTagBindingRemoveApi", + "TagBindingRemoveApi", } } assert route_map["TagBindingCollectionApi"] == ("/tag-bindings",) - assert route_map["TagBindingItemApi"] == ("/tag-bindings/",) - assert route_map["DeprecatedTagBindingCreateApi"] == ("/tag-bindings/create",) - assert route_map["DeprecatedTagBindingRemoveApi"] == ("/tag-bindings/remove",) + assert route_map["TagBindingRemoveApi"] == ("/tag-bindings/remove",) + + def test_legacy_write_routes_are_not_registered(self): + urls = {url for _resource, resource_urls, _route_doc, _kwargs in console_ns.resources for url in resource_urls} + + assert "/tag-bindings/create" not in urls + assert "/tag-bindings/" not in urls diff --git a/api/tests/unit_tests/controllers/console/test_admin.py b/api/tests/unit_tests/controllers/console/test_admin.py index 16197fcd0c..27f332ac51 100644 --- a/api/tests/unit_tests/controllers/console/test_admin.py +++ b/api/tests/unit_tests/controllers/console/test_admin.py @@ -4,6 +4,7 @@ import uuid from unittest.mock import Mock, PropertyMock, patch import pytest +from pytest_mock import MockerFixture from werkzeug.exceptions import NotFound, Unauthorized from controllers.console.admin import ( @@ -18,7 +19,7 @@ from models.model import App, InstalledApp, RecommendedApp @pytest.fixture(autouse=True) -def bypass_only_edition_cloud(mocker): +def bypass_only_edition_cloud(mocker: MockerFixture): """ Bypass only_edition_cloud decorator by setting EDITION to "CLOUD". """ @@ -29,7 +30,7 @@ def bypass_only_edition_cloud(mocker): @pytest.fixture -def mock_admin_auth(mocker): +def mock_admin_auth(mocker: MockerFixture): """ Provide valid admin authentication for controller tests. """ @@ -44,7 +45,7 @@ def mock_admin_auth(mocker): @pytest.fixture -def mock_console_payload(mocker): +def mock_console_payload(mocker: MockerFixture): payload = { "app_id": str(uuid.uuid4()), "language": "en-US", @@ -62,7 +63,7 @@ def mock_console_payload(mocker): @pytest.fixture -def mock_banner_payload(mocker): +def mock_banner_payload(mocker: MockerFixture): mocker.patch( "flask_restx.namespace.Namespace.payload", new_callable=PropertyMock, @@ -78,7 +79,7 @@ def mock_banner_payload(mocker): @pytest.fixture -def mock_session_factory(mocker): +def mock_session_factory(mocker: MockerFixture): mock_session = Mock() mock_session.execute = Mock() mock_session.add = Mock() @@ -97,7 +98,7 @@ class TestDeleteExploreBannerApi: def setup_method(self): self.api = DeleteExploreBannerApi() - def test_delete_banner_not_found(self, mocker, mock_admin_auth): + def test_delete_banner_not_found(self, mocker: MockerFixture, mock_admin_auth): mocker.patch( "controllers.console.admin.db.session.execute", return_value=Mock(scalar_one_or_none=lambda: None), @@ -106,7 +107,7 @@ class TestDeleteExploreBannerApi: with pytest.raises(NotFound, match="is not found"): self.api.delete(uuid.uuid4()) - def test_delete_banner_success(self, mocker, mock_admin_auth): + def test_delete_banner_success(self, mocker: MockerFixture, mock_admin_auth): mock_banner = Mock() mocker.patch( @@ -126,7 +127,7 @@ class TestInsertExploreBannerApi: def setup_method(self): self.api = InsertExploreBannerApi() - def test_insert_banner_success(self, mocker, mock_admin_auth, mock_banner_payload): + def test_insert_banner_success(self, mocker: MockerFixture, mock_admin_auth, mock_banner_payload): mocker.patch("controllers.console.admin.db.session.add") mocker.patch("controllers.console.admin.db.session.commit") @@ -168,7 +169,7 @@ class TestInsertExploreAppApiDelete: def setup_method(self): self.api = InsertExploreAppApi() - def test_delete_when_not_in_explore(self, mocker, mock_admin_auth): + def test_delete_when_not_in_explore(self, mocker: MockerFixture, mock_admin_auth): mocker.patch( "controllers.console.admin.session_factory.create_session", return_value=Mock( @@ -183,7 +184,7 @@ class TestInsertExploreAppApiDelete: assert status == 204 assert response["result"] == "success" - def test_delete_when_in_explore_with_trial_app(self, mocker, mock_admin_auth): + def test_delete_when_in_explore_with_trial_app(self, mocker: MockerFixture, mock_admin_auth): """Test deleting an app from explore that has a trial app.""" app_id = uuid.uuid4() @@ -225,7 +226,7 @@ class TestInsertExploreAppApiDelete: assert response["result"] == "success" assert mock_app.is_public is False - def test_delete_with_installed_apps(self, mocker, mock_admin_auth): + def test_delete_with_installed_apps(self, mocker: MockerFixture, mock_admin_auth): """Test deleting an app that has installed apps in other tenants.""" app_id = uuid.uuid4() @@ -270,7 +271,7 @@ class TestInsertExploreAppListApi: def setup_method(self): self.api = InsertExploreAppListApi() - def test_app_not_found(self, mocker, mock_admin_auth, mock_console_payload): + def test_app_not_found(self, mocker: MockerFixture, mock_admin_auth, mock_console_payload): mocker.patch( "controllers.console.admin.db.session.execute", return_value=Mock(scalar_one_or_none=lambda: None), @@ -281,7 +282,7 @@ class TestInsertExploreAppListApi: def test_create_recommended_app( self, - mocker, + mocker: MockerFixture, mock_admin_auth, mock_console_payload, ): @@ -318,7 +319,9 @@ class TestInsertExploreAppListApi: assert response["result"] == "success" assert mock_app.is_public is True - def test_update_recommended_app(self, mocker, mock_admin_auth, mock_console_payload, mock_session_factory): + def test_update_recommended_app( + self, mocker: MockerFixture, mock_admin_auth, mock_console_payload, mock_session_factory + ): mock_app = Mock(spec=App) mock_app.id = "app-id" mock_app.site = None @@ -344,7 +347,7 @@ class TestInsertExploreAppListApi: def test_site_data_overrides_payload( self, - mocker, + mocker: MockerFixture, mock_admin_auth, mock_console_payload, mock_session_factory, @@ -381,7 +384,7 @@ class TestInsertExploreAppListApi: def test_create_trial_app_when_can_trial_enabled( self, - mocker, + mocker: MockerFixture, mock_admin_auth, mock_console_payload, mock_session_factory, @@ -413,7 +416,7 @@ class TestInsertExploreAppListApi: def test_update_recommended_app_with_trial( self, - mocker, + mocker: MockerFixture, mock_admin_auth, mock_console_payload, mock_session_factory, @@ -450,7 +453,7 @@ class TestInsertExploreAppListApi: def test_update_recommended_app_without_trial( self, - mocker, + mocker: MockerFixture, mock_admin_auth, mock_console_payload, mock_session_factory, diff --git a/api/tests/unit_tests/controllers/console/test_feature.py b/api/tests/unit_tests/controllers/console/test_feature.py index d8debc1f2c..1711aede61 100644 --- a/api/tests/unit_tests/controllers/console/test_feature.py +++ b/api/tests/unit_tests/controllers/console/test_feature.py @@ -1,3 +1,4 @@ +from pytest_mock import MockerFixture from werkzeug.exceptions import Unauthorized @@ -11,7 +12,7 @@ def unwrap(func): class TestFeatureApi: - def test_get_tenant_features_success(self, mocker): + def test_get_tenant_features_success(self, mocker: MockerFixture): from controllers.console.feature import FeatureApi mocker.patch( @@ -32,7 +33,7 @@ class TestFeatureApi: class TestSystemFeatureApi: - def test_get_system_features_authenticated(self, mocker): + def test_get_system_features_authenticated(self, mocker: MockerFixture): """ current_user.is_authenticated == True """ @@ -56,7 +57,7 @@ class TestSystemFeatureApi: assert result == {"features": {"sys_feature": True}} - def test_get_system_features_unauthenticated(self, mocker): + def test_get_system_features_unauthenticated(self, mocker: MockerFixture): """ current_user.is_authenticated raises Unauthorized """ diff --git a/api/tests/unit_tests/controllers/console/test_files.py b/api/tests/unit_tests/controllers/console/test_files.py index 5df9daa7f8..eebc6f9d60 100644 --- a/api/tests/unit_tests/controllers/console/test_files.py +++ b/api/tests/unit_tests/controllers/console/test_files.py @@ -82,7 +82,7 @@ def mock_file_service(mock_db): class TestFileApiGet: - def test_get_upload_config(self, app): + def test_get_upload_config(self, app: Flask): api = FileApi() get_method = unwrap(api.get) @@ -290,7 +290,7 @@ class TestFilePreviewApi: class TestFileSupportTypeApi: - def test_get_supported_types(self, app): + def test_get_supported_types(self, app: Flask): api = FileSupportTypeApi() get_method = unwrap(api.get) diff --git a/api/tests/unit_tests/controllers/console/test_workspace_account.py b/api/tests/unit_tests/controllers/console/test_workspace_account.py index 0b1a32581a..4b4f968c8f 100644 --- a/api/tests/unit_tests/controllers/console/test_workspace_account.py +++ b/api/tests/unit_tests/controllers/console/test_workspace_account.py @@ -58,7 +58,7 @@ class TestChangeEmailSend: mock_get_change_data, mock_current_account, mock_db, - app, + app: Flask, ): mock_features.return_value = SimpleNamespace(enable_change_email=True) mock_account = _build_account("current@example.com", "acc1") @@ -107,7 +107,7 @@ class TestChangeEmailSend: mock_get_change_data, mock_current_account, mock_db, - app, + app: Flask, ): """GHSA-4q3w-q5mc-45rq: a phase-1 token must not unlock the new-email send step.""" from controllers.console.auth.error import InvalidTokenError @@ -155,7 +155,7 @@ class TestChangeEmailValidity: mock_reset_rate, mock_current_account, mock_db, - app, + app: Flask, ): mock_features.return_value = SimpleNamespace(enable_change_email=True) mock_account = _build_account("user@example.com", "acc2") @@ -214,7 +214,7 @@ class TestChangeEmailValidity: mock_reset_rate, mock_current_account, mock_db, - app, + app: Flask, ): mock_features.return_value = SimpleNamespace(enable_change_email=True) mock_current_account.return_value = (_build_account("old@example.com", "acc"), None) @@ -267,7 +267,7 @@ class TestChangeEmailValidity: mock_reset_rate, mock_current_account, mock_db, - app, + app: Flask, ): """A token whose phase marker is a string but not a known transition must be rejected.""" from controllers.console.auth.error import InvalidTokenError @@ -316,7 +316,7 @@ class TestChangeEmailValidity: mock_reset_rate, mock_current_account, mock_db, - app, + app: Flask, ): """A token minted without a phase marker (e.g. a hand-crafted token) must not validate.""" from controllers.console.auth.error import InvalidTokenError @@ -366,7 +366,7 @@ class TestChangeEmailReset: mock_send_notify, mock_current_account, mock_db, - app, + app: Flask, ): mock_features.return_value = SimpleNamespace(enable_change_email=True) current_user = _build_account("old@example.com", "acc3") @@ -418,7 +418,7 @@ class TestChangeEmailReset: mock_send_notify, mock_current_account, mock_db, - app, + app: Flask, ): """GHSA-4q3w-q5mc-45rq PoC: phase-1 token must not be usable against /reset.""" from controllers.console.auth.error import InvalidTokenError @@ -471,7 +471,7 @@ class TestChangeEmailReset: mock_send_notify, mock_current_account, mock_db, - app, + app: Flask, ): """A verified token for address A must not be replayed to change to address B.""" from controllers.console.auth.error import InvalidTokenError @@ -547,7 +547,7 @@ class TestAccountServiceSendChangeEmailEmail: class TestAccountDeletionFeedback: @patch("controllers.console.wraps.db") @patch("controllers.console.workspace.account.BillingService.update_account_deletion_feedback") - def test_should_normalize_feedback_email(self, mock_update, mock_db, app): + def test_should_normalize_feedback_email(self, mock_update, mock_db, app: Flask): with app.test_request_context( "/account/delete/feedback", method="POST", @@ -563,7 +563,7 @@ class TestCheckEmailUnique: @patch("controllers.console.wraps.db") @patch("controllers.console.workspace.account.AccountService.check_email_unique") @patch("controllers.console.workspace.account.AccountService.is_account_in_freeze") - def test_should_normalize_email(self, mock_is_freeze, mock_check_unique, mock_db, app): + def test_should_normalize_email(self, mock_is_freeze, mock_check_unique, mock_db, app: Flask): mock_is_freeze.return_value = False mock_check_unique.return_value = True diff --git a/api/tests/unit_tests/controllers/console/test_workspace_members.py b/api/tests/unit_tests/controllers/console/test_workspace_members.py index 811bf5b1e7..412d6a6c52 100644 --- a/api/tests/unit_tests/controllers/console/test_workspace_members.py +++ b/api/tests/unit_tests/controllers/console/test_workspace_members.py @@ -43,7 +43,7 @@ class TestMemberInviteEmailApi: mock_current_account, mock_invite_member, mock_get_features, - app, + app: Flask, ): mock_get_features.return_value = _build_feature_flags() mock_invite_member.return_value = "token-abc" diff --git a/api/tests/unit_tests/controllers/console/workspace/test_accounts.py b/api/tests/unit_tests/controllers/console/workspace/test_accounts.py index 42be02cdaf..064726da05 100644 --- a/api/tests/unit_tests/controllers/console/workspace/test_accounts.py +++ b/api/tests/unit_tests/controllers/console/workspace/test_accounts.py @@ -1,6 +1,8 @@ from unittest.mock import MagicMock, PropertyMock, patch import pytest +from flask import Flask +from werkzeug.exceptions import NotFound from controllers.console import console_ns from controllers.console.auth.error import ( @@ -29,6 +31,7 @@ from controllers.console.workspace.error import ( CurrentPasswordIncorrectError, InvalidAccountDeletionCodeError, ) +from models.enums import CreatorUserRole from services.errors.account import CurrentPasswordIncorrectError as ServicePwdError @@ -39,7 +42,7 @@ def unwrap(func): class TestAccountInitApi: - def test_init_success(self, app): + def test_init_success(self, app: Flask): api = AccountInitApi() method = unwrap(api.post) @@ -62,7 +65,7 @@ class TestAccountInitApi: assert resp["result"] == "success" - def test_init_already_initialized(self, app): + def test_init_already_initialized(self, app: Flask): api = AccountInitApi() method = unwrap(api.post) @@ -77,7 +80,7 @@ class TestAccountInitApi: class TestAccountProfileApi: - def test_get_profile_success(self, app): + def test_get_profile_success(self, app: Flask): api = AccountProfileApi() method = unwrap(api.get) @@ -135,8 +138,133 @@ class TestAccountUpdateApis: assert result["id"] == "u1" +class TestAccountAvatarApiGet: + """GET /account/avatar must not sign arbitrary upload_file IDs (IDOR).""" + + def test_get_avatar_signed_url_when_upload_owned_by_current_account(self, app: Flask): + api = AccountAvatarApi() + method = unwrap(api.get) + + user = MagicMock() + user.id = "acc-owner" + tenant_id = "tenant-1" + file_id = "550e8400-e29b-41d4-a716-446655440000" + + upload_file = MagicMock() + upload_file.id = file_id + upload_file.tenant_id = tenant_id + upload_file.created_by = user.id + upload_file.created_by_role = CreatorUserRole.ACCOUNT + + with ( + app.test_request_context(f"/account/avatar?avatar={file_id}"), + patch( + "controllers.console.workspace.account.current_account_with_tenant", + return_value=(user, tenant_id), + ), + patch("controllers.console.workspace.account.db.session.scalar", return_value=upload_file), + patch( + "controllers.console.workspace.account.file_helpers.get_signed_file_url", + return_value="https://signed/example", + ) as sign_mock, + ): + result = method(api) + + assert result == {"avatar_url": "https://signed/example"} + sign_mock.assert_called_once_with(upload_file_id=file_id) + + def test_get_avatar_not_found_when_upload_created_by_other_account_same_tenant(self, app: Flask): + api = AccountAvatarApi() + method = unwrap(api.get) + + user = MagicMock() + user.id = "acc-a" + tenant_id = "tenant-1" + file_id = "550e8400-e29b-41d4-a716-446655440001" + + upload_file = MagicMock() + upload_file.id = file_id + upload_file.tenant_id = tenant_id + upload_file.created_by = "acc-b" + upload_file.created_by_role = CreatorUserRole.ACCOUNT + + with ( + app.test_request_context(f"/account/avatar?avatar={file_id}"), + patch( + "controllers.console.workspace.account.current_account_with_tenant", + return_value=(user, tenant_id), + ), + patch("controllers.console.workspace.account.db.session.scalar", return_value=upload_file), + patch( + "controllers.console.workspace.account.file_helpers.get_signed_file_url", + return_value="https://signed/leak", + ) as sign_mock, + ): + with pytest.raises(NotFound): + method(api) + + sign_mock.assert_not_called() + + def test_get_avatar_not_found_when_upload_belongs_to_other_tenant(self, app: Flask): + api = AccountAvatarApi() + method = unwrap(api.get) + + user = MagicMock() + user.id = "acc-owner" + tenant_id = "tenant-1" + file_id = "550e8400-e29b-41d4-a716-446655440002" + + upload_file = MagicMock() + upload_file.id = file_id + upload_file.tenant_id = "tenant-other" + upload_file.created_by = user.id + upload_file.created_by_role = CreatorUserRole.ACCOUNT + + with ( + app.test_request_context(f"/account/avatar?avatar={file_id}"), + patch( + "controllers.console.workspace.account.current_account_with_tenant", + return_value=(user, tenant_id), + ), + patch("controllers.console.workspace.account.db.session.scalar", return_value=upload_file), + patch( + "controllers.console.workspace.account.file_helpers.get_signed_file_url", + return_value="https://signed/leak", + ) as sign_mock, + ): + with pytest.raises(NotFound): + method(api) + + sign_mock.assert_not_called() + + def test_get_avatar_https_pass_through_without_signing(self, app: Flask): + api = AccountAvatarApi() + method = unwrap(api.get) + + user = MagicMock() + user.id = "acc-owner" + tenant_id = "tenant-1" + external = "https://cdn.example/avatar.png" + + with ( + app.test_request_context(f"/account/avatar?avatar={external}"), + patch( + "controllers.console.workspace.account.current_account_with_tenant", + return_value=(user, tenant_id), + ), + patch( + "controllers.console.workspace.account.file_helpers.get_signed_file_url", + return_value="https://signed/should-not-use", + ) as sign_mock, + ): + result = method(api) + + assert result == {"avatar_url": external} + sign_mock.assert_not_called() + + class TestAccountPasswordApi: - def test_password_success(self, app): + def test_password_success(self, app: Flask): api = AccountPasswordApi() method = unwrap(api.post) @@ -165,7 +293,7 @@ class TestAccountPasswordApi: assert result["id"] == "u1" - def test_password_wrong_current(self, app): + def test_password_wrong_current(self, app: Flask): api = AccountPasswordApi() method = unwrap(api.post) @@ -190,7 +318,7 @@ class TestAccountPasswordApi: class TestAccountIntegrateApi: - def test_get_integrates(self, app): + def test_get_integrates(self, app: Flask): api = AccountIntegrateApi() method = unwrap(api.get) @@ -209,7 +337,7 @@ class TestAccountIntegrateApi: class TestAccountDeleteApi: - def test_delete_verify_success(self, app): + def test_delete_verify_success(self, app: Flask): api = AccountDeleteVerifyApi() method = unwrap(api.get) @@ -231,7 +359,7 @@ class TestAccountDeleteApi: assert result["result"] == "success" - def test_delete_invalid_code(self, app): + def test_delete_invalid_code(self, app: Flask): api = AccountDeleteApi() method = unwrap(api.post) @@ -252,7 +380,7 @@ class TestAccountDeleteApi: class TestChangeEmailApis: - def test_check_email_code_invalid(self, app): + def test_check_email_code_invalid(self, app: Flask): api = ChangeEmailCheckApi() method = unwrap(api.post) @@ -278,7 +406,7 @@ class TestChangeEmailApis: with pytest.raises(EmailCodeError): method(api) - def test_reset_email_already_used(self, app): + def test_reset_email_already_used(self, app: Flask): api = ChangeEmailResetApi() method = unwrap(api.post) @@ -300,7 +428,7 @@ class TestChangeEmailApis: class TestCheckEmailUniqueApi: - def test_email_unique_success(self, app): + def test_email_unique_success(self, app: Flask): api = CheckEmailUnique() method = unwrap(api.post) @@ -321,7 +449,7 @@ class TestCheckEmailUniqueApi: assert result["result"] == "success" - def test_email_in_freeze(self, app): + def test_email_in_freeze(self, app: Flask): api = CheckEmailUnique() method = unwrap(api.post) diff --git a/api/tests/unit_tests/controllers/console/workspace/test_agent_providers.py b/api/tests/unit_tests/controllers/console/workspace/test_agent_providers.py index b4e03f681d..eb0ca15d2e 100644 --- a/api/tests/unit_tests/controllers/console/workspace/test_agent_providers.py +++ b/api/tests/unit_tests/controllers/console/workspace/test_agent_providers.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock, patch import pytest +from flask import Flask from controllers.console.error import AccountNotFound from controllers.console.workspace.agent_providers import ( @@ -16,7 +17,7 @@ def unwrap(func): class TestAgentProviderListApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = AgentProviderListApi() method = unwrap(api.get) @@ -39,7 +40,7 @@ class TestAgentProviderListApi: assert result == providers - def test_get_empty_list(self, app): + def test_get_empty_list(self, app: Flask): api = AgentProviderListApi() method = unwrap(api.get) @@ -61,7 +62,7 @@ class TestAgentProviderListApi: assert result == [] - def test_get_account_not_found(self, app): + def test_get_account_not_found(self, app: Flask): api = AgentProviderListApi() method = unwrap(api.get) @@ -77,7 +78,7 @@ class TestAgentProviderListApi: class TestAgentProviderApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = AgentProviderApi() method = unwrap(api.get) @@ -101,7 +102,7 @@ class TestAgentProviderApi: assert result == provider_data - def test_get_provider_not_found(self, app): + def test_get_provider_not_found(self, app: Flask): api = AgentProviderApi() method = unwrap(api.get) @@ -124,7 +125,7 @@ class TestAgentProviderApi: assert result is None - def test_get_account_not_found(self, app): + def test_get_account_not_found(self, app: Flask): api = AgentProviderApi() method = unwrap(api.get) diff --git a/api/tests/unit_tests/controllers/console/workspace/test_endpoint.py b/api/tests/unit_tests/controllers/console/workspace/test_endpoint.py index 0b3d7ef6d7..ed7b2d606f 100644 --- a/api/tests/unit_tests/controllers/console/workspace/test_endpoint.py +++ b/api/tests/unit_tests/controllers/console/workspace/test_endpoint.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock, patch import pytest +from flask import Flask from controllers.console import console_ns from controllers.console.workspace.endpoint import ( @@ -39,7 +40,7 @@ def patch_current_account(user_and_tenant): @pytest.mark.usefixtures("patch_current_account") class TestEndpointCollectionApi: - def test_create_success(self, app): + def test_create_success(self, app: Flask): api = EndpointCollectionApi() method = unwrap(api.post) @@ -57,7 +58,7 @@ class TestEndpointCollectionApi: assert result["success"] is True - def test_create_permission_denied(self, app): + def test_create_permission_denied(self, app: Flask): api = EndpointCollectionApi() method = unwrap(api.post) @@ -77,7 +78,7 @@ class TestEndpointCollectionApi: with pytest.raises(ValueError): method(api) - def test_create_validation_error(self, app): + def test_create_validation_error(self, app: Flask): api = EndpointCollectionApi() method = unwrap(api.post) @@ -96,7 +97,7 @@ class TestEndpointCollectionApi: @pytest.mark.usefixtures("patch_current_account") class TestDeprecatedEndpointCreateApi: - def test_create_success(self, app): + def test_create_success(self, app: Flask): api = DeprecatedEndpointCreateApi() method = unwrap(api.post) @@ -117,7 +118,7 @@ class TestDeprecatedEndpointCreateApi: @pytest.mark.usefixtures("patch_current_account") class TestEndpointListApi: - def test_list_success(self, app): + def test_list_success(self, app: Flask): api = EndpointListApi() method = unwrap(api.get) @@ -130,7 +131,7 @@ class TestEndpointListApi: assert "endpoints" in result assert len(result["endpoints"]) == 1 - def test_list_invalid_query(self, app): + def test_list_invalid_query(self, app: Flask): api = EndpointListApi() method = unwrap(api.get) @@ -143,7 +144,7 @@ class TestEndpointListApi: @pytest.mark.usefixtures("patch_current_account") class TestEndpointListForSinglePluginApi: - def test_list_for_plugin_success(self, app): + def test_list_for_plugin_success(self, app: Flask): api = EndpointListForSinglePluginApi() method = unwrap(api.get) @@ -158,7 +159,7 @@ class TestEndpointListForSinglePluginApi: assert "endpoints" in result - def test_list_for_plugin_missing_param(self, app): + def test_list_for_plugin_missing_param(self, app: Flask): api = EndpointListForSinglePluginApi() method = unwrap(api.get) @@ -171,7 +172,7 @@ class TestEndpointListForSinglePluginApi: @pytest.mark.usefixtures("patch_current_account") class TestEndpointItemApi: - def test_delete_success(self, app): + def test_delete_success(self, app: Flask): api = EndpointItemApi() method = unwrap(api.delete) @@ -187,7 +188,7 @@ class TestEndpointItemApi: assert result["success"] is True mock_delete.assert_called_once_with(tenant_id="t1", user_id="u1", endpoint_id="e1") - def test_delete_service_failure(self, app): + def test_delete_service_failure(self, app: Flask): api = EndpointItemApi() method = unwrap(api.delete) @@ -199,7 +200,7 @@ class TestEndpointItemApi: assert result["success"] is False - def test_update_success(self, app): + def test_update_success(self, app: Flask): api = EndpointItemApi() method = unwrap(api.patch) @@ -226,7 +227,7 @@ class TestEndpointItemApi: settings={"x": 1}, ) - def test_update_validation_error(self, app): + def test_update_validation_error(self, app: Flask): api = EndpointItemApi() method = unwrap(api.patch) @@ -238,7 +239,7 @@ class TestEndpointItemApi: with pytest.raises(ValueError): method(api, "e1") - def test_update_service_failure(self, app): + def test_update_service_failure(self, app: Flask): api = EndpointItemApi() method = unwrap(api.patch) @@ -258,7 +259,7 @@ class TestEndpointItemApi: @pytest.mark.usefixtures("patch_current_account") class TestDeprecatedEndpointDeleteApi: - def test_delete_success(self, app): + def test_delete_success(self, app: Flask): api = DeprecatedEndpointDeleteApi() method = unwrap(api.post) @@ -272,7 +273,7 @@ class TestDeprecatedEndpointDeleteApi: assert result["success"] is True - def test_delete_invalid_payload(self, app): + def test_delete_invalid_payload(self, app: Flask): api = DeprecatedEndpointDeleteApi() method = unwrap(api.post) @@ -282,7 +283,7 @@ class TestDeprecatedEndpointDeleteApi: with pytest.raises(ValueError): method(api) - def test_delete_service_failure(self, app): + def test_delete_service_failure(self, app: Flask): api = DeprecatedEndpointDeleteApi() method = unwrap(api.post) @@ -299,7 +300,7 @@ class TestDeprecatedEndpointDeleteApi: @pytest.mark.usefixtures("patch_current_account") class TestDeprecatedEndpointUpdateApi: - def test_update_success(self, app): + def test_update_success(self, app: Flask): api = DeprecatedEndpointUpdateApi() method = unwrap(api.post) @@ -317,7 +318,7 @@ class TestDeprecatedEndpointUpdateApi: assert result["success"] is True - def test_update_validation_error(self, app): + def test_update_validation_error(self, app: Flask): api = DeprecatedEndpointUpdateApi() method = unwrap(api.post) @@ -329,7 +330,7 @@ class TestDeprecatedEndpointUpdateApi: with pytest.raises(ValueError): method(api) - def test_update_service_failure(self, app): + def test_update_service_failure(self, app: Flask): api = DeprecatedEndpointUpdateApi() method = unwrap(api.post) @@ -380,7 +381,7 @@ class TestEndpointRouteMetadata: @pytest.mark.usefixtures("patch_current_account") class TestEndpointEnableApi: - def test_enable_success(self, app): + def test_enable_success(self, app: Flask): api = EndpointEnableApi() method = unwrap(api.post) @@ -394,7 +395,7 @@ class TestEndpointEnableApi: assert result["success"] is True - def test_enable_invalid_payload(self, app): + def test_enable_invalid_payload(self, app: Flask): api = EndpointEnableApi() method = unwrap(api.post) @@ -404,7 +405,7 @@ class TestEndpointEnableApi: with pytest.raises(ValueError): method(api) - def test_enable_service_failure(self, app): + def test_enable_service_failure(self, app: Flask): api = EndpointEnableApi() method = unwrap(api.post) @@ -421,7 +422,7 @@ class TestEndpointEnableApi: @pytest.mark.usefixtures("patch_current_account") class TestEndpointDisableApi: - def test_disable_success(self, app): + def test_disable_success(self, app: Flask): api = EndpointDisableApi() method = unwrap(api.post) @@ -435,7 +436,7 @@ class TestEndpointDisableApi: assert result["success"] is True - def test_disable_invalid_payload(self, app): + def test_disable_invalid_payload(self, app: Flask): api = EndpointDisableApi() method = unwrap(api.post) diff --git a/api/tests/unit_tests/controllers/console/workspace/test_members.py b/api/tests/unit_tests/controllers/console/workspace/test_members.py index 055a14fcf3..c207aedd23 100644 --- a/api/tests/unit_tests/controllers/console/workspace/test_members.py +++ b/api/tests/unit_tests/controllers/console/workspace/test_members.py @@ -2,6 +2,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock, patch import pytest +from flask import Flask from werkzeug.exceptions import HTTPException import services @@ -35,7 +36,7 @@ def unwrap(func): class TestMemberListApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = MemberListApi() method = unwrap(api.get) @@ -107,7 +108,7 @@ class TestMemberListApi: ] mock_batch_get.assert_called_once_with("tenant-1", "acct-1", ["m1"]) - def test_get_no_tenant(self, app): + def test_get_no_tenant(self, app: Flask): api = MemberListApi() method = unwrap(api.get) @@ -122,7 +123,7 @@ class TestMemberListApi: class TestMemberInviteEmailApi: - def test_invite_success(self, app): + def test_invite_success(self, app: Flask): api = MemberInviteEmailApi() method = unwrap(api.post) @@ -149,7 +150,7 @@ class TestMemberInviteEmailApi: assert status == 201 assert result["result"] == "success" - def test_invite_limit_exceeded(self, app): + def test_invite_limit_exceeded(self, app: Flask): api = MemberInviteEmailApi() method = unwrap(api.post) @@ -171,7 +172,7 @@ class TestMemberInviteEmailApi: with pytest.raises(WorkspaceMembersLimitExceeded): method(api) - def test_invite_already_member(self, app): + def test_invite_already_member(self, app: Flask): api = MemberInviteEmailApi() method = unwrap(api.post) @@ -199,7 +200,7 @@ class TestMemberInviteEmailApi: assert result["invitation_results"][0]["status"] == "success" - def test_invite_invalid_role(self, app): + def test_invite_invalid_role(self, app: Flask): api = MemberInviteEmailApi() method = unwrap(api.post) @@ -214,7 +215,7 @@ class TestMemberInviteEmailApi: assert status == 400 assert result["code"] == "invalid-role" - def test_invite_generic_exception(self, app): + def test_invite_generic_exception(self, app: Flask): api = MemberInviteEmailApi() method = unwrap(api.post) @@ -244,7 +245,7 @@ class TestMemberInviteEmailApi: class TestMemberCancelInviteApi: - def test_cancel_success(self, app): + def test_cancel_success(self, app: Flask): api = MemberCancelInviteApi() method = unwrap(api.delete) @@ -264,7 +265,7 @@ class TestMemberCancelInviteApi: assert status == 200 assert result["result"] == "success" - def test_cancel_not_found(self, app): + def test_cancel_not_found(self, app: Flask): api = MemberCancelInviteApi() method = unwrap(api.delete) @@ -281,7 +282,7 @@ class TestMemberCancelInviteApi: with pytest.raises(HTTPException): method(api, "x") - def test_cancel_cannot_operate_self(self, app): + def test_cancel_cannot_operate_self(self, app: Flask): api = MemberCancelInviteApi() method = unwrap(api.delete) @@ -303,7 +304,7 @@ class TestMemberCancelInviteApi: assert status == 400 - def test_cancel_no_permission(self, app): + def test_cancel_no_permission(self, app: Flask): api = MemberCancelInviteApi() method = unwrap(api.delete) @@ -325,7 +326,7 @@ class TestMemberCancelInviteApi: assert status == 403 - def test_cancel_member_not_in_tenant(self, app): + def test_cancel_member_not_in_tenant(self, app: Flask): api = MemberCancelInviteApi() method = unwrap(api.delete) @@ -349,7 +350,7 @@ class TestMemberCancelInviteApi: class TestMemberUpdateRoleApi: - def test_update_success(self, app): + def test_update_success(self, app: Flask): api = MemberUpdateRoleApi() method = unwrap(api.put) @@ -372,7 +373,7 @@ class TestMemberUpdateRoleApi: assert result["result"] == "success" - def test_update_invalid_role(self, app): + def test_update_invalid_role(self, app: Flask): api = MemberUpdateRoleApi() method = unwrap(api.put) @@ -383,7 +384,7 @@ class TestMemberUpdateRoleApi: assert status == 400 - def test_update_member_not_found(self, app): + def test_update_member_not_found(self, app: Flask): api = MemberUpdateRoleApi() method = unwrap(api.put) @@ -402,7 +403,7 @@ class TestMemberUpdateRoleApi: class TestDatasetOperatorMemberListApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = DatasetOperatorMemberListApi() method = unwrap(api.get) @@ -429,7 +430,7 @@ class TestDatasetOperatorMemberListApi: assert status == 200 assert len(result["accounts"]) == 1 - def test_get_no_tenant(self, app): + def test_get_no_tenant(self, app: Flask): api = DatasetOperatorMemberListApi() method = unwrap(api.get) @@ -444,7 +445,7 @@ class TestDatasetOperatorMemberListApi: class TestSendOwnerTransferEmailApi: - def test_send_success(self, app): + def test_send_success(self, app: Flask): api = SendOwnerTransferEmailApi() method = unwrap(api.post) @@ -467,7 +468,7 @@ class TestSendOwnerTransferEmailApi: assert result["result"] == "success" - def test_send_ip_limit(self, app): + def test_send_ip_limit(self, app: Flask): api = SendOwnerTransferEmailApi() method = unwrap(api.post) @@ -481,7 +482,7 @@ class TestSendOwnerTransferEmailApi: with pytest.raises(EmailSendIpLimitError): method(api) - def test_send_not_owner(self, app): + def test_send_not_owner(self, app: Flask): api = SendOwnerTransferEmailApi() method = unwrap(api.post) @@ -500,7 +501,7 @@ class TestSendOwnerTransferEmailApi: class TestOwnerTransferCheckApi: - def test_check_invalid_code(self, app): + def test_check_invalid_code(self, app: Flask): api = OwnerTransferCheckApi() method = unwrap(api.post) @@ -525,7 +526,7 @@ class TestOwnerTransferCheckApi: with pytest.raises(EmailCodeError): method(api) - def test_rate_limited(self, app): + def test_rate_limited(self, app: Flask): api = OwnerTransferCheckApi() method = unwrap(api.post) @@ -546,7 +547,7 @@ class TestOwnerTransferCheckApi: with pytest.raises(OwnerTransferLimitError): method(api) - def test_invalid_token(self, app): + def test_invalid_token(self, app: Flask): api = OwnerTransferCheckApi() method = unwrap(api.post) @@ -568,7 +569,7 @@ class TestOwnerTransferCheckApi: with pytest.raises(InvalidTokenError): method(api) - def test_invalid_email(self, app): + def test_invalid_email(self, app: Flask): api = OwnerTransferCheckApi() method = unwrap(api.post) @@ -595,7 +596,7 @@ class TestOwnerTransferCheckApi: class TestOwnerTransferApi: - def test_transfer_self(self, app): + def test_transfer_self(self, app: Flask): api = OwnerTransfer() method = unwrap(api.post) @@ -612,7 +613,7 @@ class TestOwnerTransferApi: with pytest.raises(CannotTransferOwnerToSelfError): method(api, "1") - def test_invalid_token(self, app): + def test_invalid_token(self, app: Flask): api = OwnerTransfer() method = unwrap(api.post) @@ -630,7 +631,7 @@ class TestOwnerTransferApi: with pytest.raises(InvalidTokenError): method(api, "2") - def test_member_not_in_tenant(self, app): + def test_member_not_in_tenant(self, app: Flask): api = OwnerTransfer() method = unwrap(api.post) diff --git a/api/tests/unit_tests/controllers/console/workspace/test_model_providers.py b/api/tests/unit_tests/controllers/console/workspace/test_model_providers.py index 168479af1e..e836a3cc55 100644 --- a/api/tests/unit_tests/controllers/console/workspace/test_model_providers.py +++ b/api/tests/unit_tests/controllers/console/workspace/test_model_providers.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock, patch import pytest +from flask import Flask from pydantic_core import ValidationError from werkzeug.exceptions import Forbidden @@ -26,7 +27,7 @@ def unwrap(func): class TestModelProviderListApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = ModelProviderListApi() method = unwrap(api.get) @@ -47,7 +48,7 @@ class TestModelProviderListApi: class TestModelProviderCredentialApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = ModelProviderCredentialApi() method = unwrap(api.get) @@ -66,7 +67,7 @@ class TestModelProviderCredentialApi: assert "credentials" in result - def test_get_invalid_uuid(self, app): + def test_get_invalid_uuid(self, app: Flask): api = ModelProviderCredentialApi() method = unwrap(api.get) @@ -80,7 +81,7 @@ class TestModelProviderCredentialApi: with pytest.raises(ValidationError): method(api, provider="openai") - def test_post_create_success(self, app): + def test_post_create_success(self, app: Flask): api = ModelProviderCredentialApi() method = unwrap(api.post) @@ -102,7 +103,7 @@ class TestModelProviderCredentialApi: assert result["result"] == "success" assert status == 201 - def test_post_create_validation_error(self, app): + def test_post_create_validation_error(self, app: Flask): api = ModelProviderCredentialApi() method = unwrap(api.post) @@ -122,7 +123,7 @@ class TestModelProviderCredentialApi: with pytest.raises(ValueError): method(api, provider="openai") - def test_put_update_success(self, app): + def test_put_update_success(self, app: Flask): api = ModelProviderCredentialApi() method = unwrap(api.put) @@ -143,7 +144,7 @@ class TestModelProviderCredentialApi: assert result["result"] == "success" - def test_put_invalid_uuid(self, app): + def test_put_invalid_uuid(self, app: Flask): api = ModelProviderCredentialApi() method = unwrap(api.put) @@ -159,7 +160,7 @@ class TestModelProviderCredentialApi: with pytest.raises(ValidationError): method(api, provider="openai") - def test_delete_success(self, app): + def test_delete_success(self, app: Flask): api = ModelProviderCredentialApi() method = unwrap(api.delete) @@ -183,7 +184,7 @@ class TestModelProviderCredentialApi: class TestModelProviderCredentialSwitchApi: - def test_switch_success(self, app): + def test_switch_success(self, app: Flask): api = ModelProviderCredentialSwitchApi() method = unwrap(api.post) @@ -204,7 +205,7 @@ class TestModelProviderCredentialSwitchApi: assert result["result"] == "success" - def test_switch_invalid_uuid(self, app): + def test_switch_invalid_uuid(self, app: Flask): api = ModelProviderCredentialSwitchApi() method = unwrap(api.post) @@ -222,7 +223,7 @@ class TestModelProviderCredentialSwitchApi: class TestModelProviderValidateApi: - def test_validate_success(self, app): + def test_validate_success(self, app: Flask): api = ModelProviderValidateApi() method = unwrap(api.post) @@ -243,7 +244,7 @@ class TestModelProviderValidateApi: assert result["result"] == "success" - def test_validate_failure(self, app): + def test_validate_failure(self, app: Flask): api = ModelProviderValidateApi() method = unwrap(api.post) @@ -266,7 +267,7 @@ class TestModelProviderValidateApi: class TestModelProviderIconApi: - def test_icon_success(self, app): + def test_icon_success(self, app: Flask): api = ModelProviderIconApi() with ( @@ -280,7 +281,7 @@ class TestModelProviderIconApi: assert response.mimetype == "image/png" - def test_icon_not_found(self, app): + def test_icon_not_found(self, app: Flask): api = ModelProviderIconApi() with ( @@ -295,7 +296,7 @@ class TestModelProviderIconApi: class TestPreferredProviderTypeUpdateApi: - def test_update_success(self, app): + def test_update_success(self, app: Flask): api = PreferredProviderTypeUpdateApi() method = unwrap(api.post) @@ -316,7 +317,7 @@ class TestPreferredProviderTypeUpdateApi: assert result["result"] == "success" - def test_invalid_enum(self, app): + def test_invalid_enum(self, app: Flask): api = PreferredProviderTypeUpdateApi() method = unwrap(api.post) @@ -334,7 +335,7 @@ class TestPreferredProviderTypeUpdateApi: class TestModelProviderPaymentCheckoutUrlApi: - def test_checkout_success(self, app): + def test_checkout_success(self, app: Flask): api = ModelProviderPaymentCheckoutUrlApi() method = unwrap(api.get) @@ -359,7 +360,7 @@ class TestModelProviderPaymentCheckoutUrlApi: assert "url" in result - def test_invalid_provider(self, app): + def test_invalid_provider(self, app: Flask): api = ModelProviderPaymentCheckoutUrlApi() method = unwrap(api.get) @@ -367,7 +368,7 @@ class TestModelProviderPaymentCheckoutUrlApi: with pytest.raises(ValueError): method(api, provider="openai") - def test_permission_denied(self, app): + def test_permission_denied(self, app: Flask): api = ModelProviderPaymentCheckoutUrlApi() method = unwrap(api.get) diff --git a/api/tests/unit_tests/controllers/console/workspace/test_models.py b/api/tests/unit_tests/controllers/console/workspace/test_models.py index f0d32f81fb..3c4acbab44 100644 --- a/api/tests/unit_tests/controllers/console/workspace/test_models.py +++ b/api/tests/unit_tests/controllers/console/workspace/test_models.py @@ -32,7 +32,7 @@ class TestDefaultModelApi: with ( app.test_request_context( "/", - query_string={"model_type": ModelType.LLM.value}, + query_string={"model_type": ModelType.LLM}, ), patch( "controllers.console.workspace.models.current_account_with_tenant", @@ -53,7 +53,7 @@ class TestDefaultModelApi: payload = { "model_settings": [ { - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "provider": "openai", "model": "gpt-4", } @@ -72,12 +72,12 @@ class TestDefaultModelApi: assert result["result"] == "success" - def test_get_returns_empty_when_no_default(self, app): + def test_get_returns_empty_when_no_default(self, app: Flask): api = DefaultModelApi() method = unwrap(api.get) with ( - app.test_request_context("/", query_string={"model_type": ModelType.LLM.value}), + app.test_request_context("/", query_string={"model_type": ModelType.LLM}), patch("controllers.console.workspace.models.current_account_with_tenant", return_value=(MagicMock(), "t1")), patch("controllers.console.workspace.models.ModelProviderService") as service, ): @@ -113,7 +113,7 @@ class TestModelProviderModelApi: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "load_balancing": { "configs": [{"weight": 1}], "enabled": True, @@ -139,7 +139,7 @@ class TestModelProviderModelApi: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, } with ( @@ -154,7 +154,7 @@ class TestModelProviderModelApi: assert status == 204 - def test_get_models_returns_empty(self, app): + def test_get_models_returns_empty(self, app: Flask): api = ModelProviderModelApi() method = unwrap(api.get) @@ -180,7 +180,7 @@ class TestModelProviderModelCredentialApi: "/", query_string={ "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, }, ), patch( @@ -208,7 +208,7 @@ class TestModelProviderModelCredentialApi: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "credentials": {"key": "val"}, } @@ -224,12 +224,12 @@ class TestModelProviderModelCredentialApi: assert status == 201 - def test_get_empty_credentials(self, app): + def test_get_empty_credentials(self, app: Flask): api = ModelProviderModelCredentialApi() method = unwrap(api.get) with ( - app.test_request_context("/", query_string={"model": "gpt", "model_type": ModelType.LLM.value}), + app.test_request_context("/", query_string={"model": "gpt", "model_type": ModelType.LLM}), patch("controllers.console.workspace.models.current_account_with_tenant", return_value=(MagicMock(), "t1")), patch("controllers.console.workspace.models.ModelProviderService") as service, patch("controllers.console.workspace.models.ModelLoadBalancingService") as lb, @@ -242,13 +242,13 @@ class TestModelProviderModelCredentialApi: assert result["credentials"] == {} - def test_delete_success(self, app): + def test_delete_success(self, app: Flask): api = ModelProviderModelCredentialApi() method = unwrap(api.delete) payload = { "model": "gpt", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "credential_id": "123e4567-e89b-12d3-a456-426614174000", } @@ -269,7 +269,7 @@ class TestModelProviderModelCredentialSwitchApi: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "credential_id": "abc", } @@ -293,7 +293,7 @@ class TestModelEnableDisableApis: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, } with ( @@ -314,7 +314,7 @@ class TestModelEnableDisableApis: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, } with ( @@ -337,7 +337,7 @@ class TestModelProviderModelValidateApi: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "credentials": {"key": "val"}, } @@ -360,7 +360,7 @@ class TestModelProviderModelValidateApi: payload = { "model": model_name, - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "credentials": {}, } @@ -412,11 +412,11 @@ class TestParameterAndAvailableModels: ): service_mock.return_value.get_models_by_model_type.return_value = [] - result = method(api, ModelType.LLM.value) + result = method(api, ModelType.LLM) assert "data" in result - def test_empty_rules(self, app): + def test_empty_rules(self, app: Flask): api = ModelProviderModelParameterRuleApi() method = unwrap(api.get) @@ -431,7 +431,7 @@ class TestParameterAndAvailableModels: assert result["data"] == [] - def test_no_models(self, app): + def test_no_models(self, app: Flask): api = ModelProviderAvailableModelApi() method = unwrap(api.get) @@ -442,6 +442,6 @@ class TestParameterAndAvailableModels: ): service.return_value.get_models_by_model_type.return_value = [] - result = method(api, ModelType.LLM.value) + result = method(api, ModelType.LLM) assert result["data"] == [] diff --git a/api/tests/unit_tests/controllers/console/workspace/test_plugin.py b/api/tests/unit_tests/controllers/console/workspace/test_plugin.py index ce5fd1c466..d01bf7d668 100644 --- a/api/tests/unit_tests/controllers/console/workspace/test_plugin.py +++ b/api/tests/unit_tests/controllers/console/workspace/test_plugin.py @@ -2,6 +2,7 @@ import io from unittest.mock import MagicMock, patch import pytest +from flask import Flask from werkzeug.datastructures import FileStorage from werkzeug.exceptions import Forbidden @@ -61,7 +62,7 @@ def tenant(): class TestPluginListLatestVersionsApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginListLatestVersionsApi() method = unwrap(api.post) @@ -77,7 +78,7 @@ class TestPluginListLatestVersionsApi: assert "versions" in result - def test_daemon_error(self, app): + def test_daemon_error(self, app: Flask): api = PluginListLatestVersionsApi() method = unwrap(api.post) @@ -95,7 +96,7 @@ class TestPluginListLatestVersionsApi: class TestPluginDebuggingKeyApi: - def test_debugging_key_success(self, app): + def test_debugging_key_success(self, app: Flask): api = PluginDebuggingKeyApi() method = unwrap(api.get) @@ -108,7 +109,7 @@ class TestPluginDebuggingKeyApi: assert result["key"] == "k" - def test_debugging_key_error(self, app): + def test_debugging_key_error(self, app: Flask): api = PluginDebuggingKeyApi() method = unwrap(api.get) @@ -125,7 +126,7 @@ class TestPluginDebuggingKeyApi: class TestPluginListApi: - def test_plugin_list(self, app): + def test_plugin_list(self, app: Flask): api = PluginListApi() method = unwrap(api.get) @@ -142,7 +143,7 @@ class TestPluginListApi: class TestPluginIconApi: - def test_plugin_icon(self, app): + def test_plugin_icon(self, app: Flask): api = PluginIconApi() method = unwrap(api.get) @@ -156,7 +157,7 @@ class TestPluginIconApi: class TestPluginAssetApi: - def test_plugin_asset(self, app): + def test_plugin_asset(self, app: Flask): api = PluginAssetApi() method = unwrap(api.get) @@ -171,7 +172,7 @@ class TestPluginAssetApi: class TestPluginUploadFromPkgApi: - def test_upload_pkg_success(self, app): + def test_upload_pkg_success(self, app: Flask): api = PluginUploadFromPkgApi() method = unwrap(api.post) @@ -188,7 +189,7 @@ class TestPluginUploadFromPkgApi: assert result["ok"] is True - def test_upload_pkg_too_large(self, app): + def test_upload_pkg_too_large(self, app: Flask): api = PluginUploadFromPkgApi() method = unwrap(api.post) @@ -210,7 +211,7 @@ class TestPluginUploadFromPkgApi: class TestPluginInstallFromPkgApi: - def test_install_from_pkg(self, app): + def test_install_from_pkg(self, app: Flask): api = PluginInstallFromPkgApi() method = unwrap(api.post) @@ -229,7 +230,7 @@ class TestPluginInstallFromPkgApi: class TestPluginUninstallApi: - def test_uninstall(self, app): + def test_uninstall(self, app: Flask): api = PluginUninstallApi() method = unwrap(api.post) @@ -246,7 +247,7 @@ class TestPluginUninstallApi: class TestPluginChangePermissionApi: - def test_change_permission_forbidden(self, app): + def test_change_permission_forbidden(self, app: Flask): api = PluginChangePermissionApi() method = unwrap(api.post) @@ -264,7 +265,7 @@ class TestPluginChangePermissionApi: with pytest.raises(Forbidden): method(api) - def test_change_permission_success(self, app): + def test_change_permission_success(self, app: Flask): api = PluginChangePermissionApi() method = unwrap(api.post) @@ -286,7 +287,7 @@ class TestPluginChangePermissionApi: class TestPluginFetchPermissionApi: - def test_fetch_permission_default(self, app): + def test_fetch_permission_default(self, app: Flask): api = PluginFetchPermissionApi() method = unwrap(api.get) @@ -319,7 +320,7 @@ class TestPluginFetchDynamicSelectOptionsApi: class TestPluginReadmeApi: - def test_fetch_readme(self, app): + def test_fetch_readme(self, app: Flask): api = PluginReadmeApi() method = unwrap(api.get) @@ -334,7 +335,7 @@ class TestPluginReadmeApi: class TestPluginListInstallationsFromIdsApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginListInstallationsFromIdsApi() method = unwrap(api.post) @@ -352,7 +353,7 @@ class TestPluginListInstallationsFromIdsApi: assert "plugins" in result - def test_daemon_error(self, app): + def test_daemon_error(self, app: Flask): api = PluginListInstallationsFromIdsApi() method = unwrap(api.post) @@ -371,7 +372,7 @@ class TestPluginListInstallationsFromIdsApi: class TestPluginUploadFromGithubApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginUploadFromGithubApi() method = unwrap(api.post) @@ -388,7 +389,7 @@ class TestPluginUploadFromGithubApi: assert result["ok"] is True - def test_daemon_error(self, app): + def test_daemon_error(self, app: Flask): api = PluginUploadFromGithubApi() method = unwrap(api.post) @@ -407,7 +408,7 @@ class TestPluginUploadFromGithubApi: class TestPluginUploadFromBundleApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginUploadFromBundleApi() method = unwrap(api.post) @@ -430,7 +431,7 @@ class TestPluginUploadFromBundleApi: assert result["ok"] is True - def test_too_large(self, app): + def test_too_large(self, app: Flask): api = PluginUploadFromBundleApi() method = unwrap(api.post) @@ -458,7 +459,7 @@ class TestPluginUploadFromBundleApi: class TestPluginInstallFromGithubApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginInstallFromGithubApi() method = unwrap(api.post) @@ -478,7 +479,7 @@ class TestPluginInstallFromGithubApi: assert result["ok"] is True - def test_daemon_error(self, app): + def test_daemon_error(self, app: Flask): api = PluginInstallFromGithubApi() method = unwrap(api.post) @@ -502,7 +503,7 @@ class TestPluginInstallFromGithubApi: class TestPluginInstallFromMarketplaceApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginInstallFromMarketplaceApi() method = unwrap(api.post) @@ -520,7 +521,7 @@ class TestPluginInstallFromMarketplaceApi: assert result["ok"] is True - def test_daemon_error(self, app): + def test_daemon_error(self, app: Flask): api = PluginInstallFromMarketplaceApi() method = unwrap(api.post) @@ -539,7 +540,7 @@ class TestPluginInstallFromMarketplaceApi: class TestPluginFetchMarketplacePkgApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginFetchMarketplacePkgApi() method = unwrap(api.get) @@ -552,7 +553,7 @@ class TestPluginFetchMarketplacePkgApi: assert "manifest" in result - def test_daemon_error(self, app): + def test_daemon_error(self, app: Flask): api = PluginFetchMarketplacePkgApi() method = unwrap(api.get) @@ -569,7 +570,7 @@ class TestPluginFetchMarketplacePkgApi: class TestPluginFetchManifestApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginFetchManifestApi() method = unwrap(api.get) @@ -585,7 +586,7 @@ class TestPluginFetchManifestApi: assert "manifest" in result - def test_daemon_error(self, app): + def test_daemon_error(self, app: Flask): api = PluginFetchManifestApi() method = unwrap(api.get) @@ -602,7 +603,7 @@ class TestPluginFetchManifestApi: class TestPluginFetchInstallTasksApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginFetchInstallTasksApi() method = unwrap(api.get) @@ -615,7 +616,7 @@ class TestPluginFetchInstallTasksApi: assert "tasks" in result - def test_daemon_error(self, app): + def test_daemon_error(self, app: Flask): api = PluginFetchInstallTasksApi() method = unwrap(api.get) @@ -632,7 +633,7 @@ class TestPluginFetchInstallTasksApi: class TestPluginFetchInstallTaskApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginFetchInstallTaskApi() method = unwrap(api.get) @@ -645,7 +646,7 @@ class TestPluginFetchInstallTaskApi: assert "task" in result - def test_daemon_error(self, app): + def test_daemon_error(self, app: Flask): api = PluginFetchInstallTaskApi() method = unwrap(api.get) @@ -662,7 +663,7 @@ class TestPluginFetchInstallTaskApi: class TestPluginDeleteInstallTaskApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginDeleteInstallTaskApi() method = unwrap(api.post) @@ -675,7 +676,7 @@ class TestPluginDeleteInstallTaskApi: assert result["success"] is True - def test_daemon_error(self, app): + def test_daemon_error(self, app: Flask): api = PluginDeleteInstallTaskApi() method = unwrap(api.post) @@ -692,7 +693,7 @@ class TestPluginDeleteInstallTaskApi: class TestPluginDeleteAllInstallTaskItemsApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginDeleteAllInstallTaskItemsApi() method = unwrap(api.post) @@ -707,7 +708,7 @@ class TestPluginDeleteAllInstallTaskItemsApi: assert result["success"] is True - def test_daemon_error(self, app): + def test_daemon_error(self, app: Flask): api = PluginDeleteAllInstallTaskItemsApi() method = unwrap(api.post) @@ -724,7 +725,7 @@ class TestPluginDeleteAllInstallTaskItemsApi: class TestPluginDeleteInstallTaskItemApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginDeleteInstallTaskItemApi() method = unwrap(api.post) @@ -737,7 +738,7 @@ class TestPluginDeleteInstallTaskItemApi: assert result["success"] is True - def test_daemon_error(self, app): + def test_daemon_error(self, app: Flask): api = PluginDeleteInstallTaskItemApi() method = unwrap(api.post) @@ -754,7 +755,7 @@ class TestPluginDeleteInstallTaskItemApi: class TestPluginUpgradeFromMarketplaceApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginUpgradeFromMarketplaceApi() method = unwrap(api.post) @@ -775,7 +776,7 @@ class TestPluginUpgradeFromMarketplaceApi: assert result["ok"] is True - def test_daemon_error(self, app): + def test_daemon_error(self, app: Flask): api = PluginUpgradeFromMarketplaceApi() method = unwrap(api.post) @@ -797,7 +798,7 @@ class TestPluginUpgradeFromMarketplaceApi: class TestPluginUpgradeFromGithubApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginUpgradeFromGithubApi() method = unwrap(api.post) @@ -821,7 +822,7 @@ class TestPluginUpgradeFromGithubApi: assert result["ok"] is True - def test_daemon_error(self, app): + def test_daemon_error(self, app: Flask): api = PluginUpgradeFromGithubApi() method = unwrap(api.post) @@ -846,7 +847,7 @@ class TestPluginUpgradeFromGithubApi: class TestPluginFetchDynamicSelectOptionsWithCredentialsApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginFetchDynamicSelectOptionsWithCredentialsApi() method = unwrap(api.post) @@ -873,7 +874,7 @@ class TestPluginFetchDynamicSelectOptionsWithCredentialsApi: assert result["options"] == [1] - def test_daemon_error(self, app): + def test_daemon_error(self, app: Flask): api = PluginFetchDynamicSelectOptionsWithCredentialsApi() method = unwrap(api.post) @@ -901,7 +902,7 @@ class TestPluginFetchDynamicSelectOptionsWithCredentialsApi: class TestPluginChangePreferencesApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginChangePreferencesApi() method = unwrap(api.post) @@ -931,7 +932,7 @@ class TestPluginChangePreferencesApi: assert result["success"] is True - def test_permission_fail(self, app): + def test_permission_fail(self, app: Flask): api = PluginChangePreferencesApi() method = unwrap(api.post) @@ -962,7 +963,7 @@ class TestPluginChangePreferencesApi: class TestPluginFetchPreferencesApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginFetchPreferencesApi() method = unwrap(api.get) @@ -996,7 +997,7 @@ class TestPluginFetchPreferencesApi: class TestPluginAutoUpgradeExcludePluginApi: - def test_success(self, app): + def test_success(self, app: Flask): api = PluginAutoUpgradeExcludePluginApi() method = unwrap(api.post) @@ -1011,7 +1012,7 @@ class TestPluginAutoUpgradeExcludePluginApi: assert result["success"] is True - def test_fail(self, app): + def test_fail(self, app: Flask): api = PluginAutoUpgradeExcludePluginApi() method = unwrap(api.post) diff --git a/api/tests/unit_tests/controllers/console/workspace/test_workspace.py b/api/tests/unit_tests/controllers/console/workspace/test_workspace.py index e82a29f045..a52518c2d2 100644 --- a/api/tests/unit_tests/controllers/console/workspace/test_workspace.py +++ b/api/tests/unit_tests/controllers/console/workspace/test_workspace.py @@ -2,6 +2,7 @@ from io import BytesIO from unittest.mock import MagicMock, patch import pytest +from flask import Flask from werkzeug.datastructures import FileStorage from werkzeug.exceptions import Unauthorized @@ -37,7 +38,7 @@ def unwrap(func): class TestTenantListApi: - def test_get_success_saas_path(self, app): + def test_get_success_saas_path(self, app: Flask): api = TenantListApi() method = unwrap(api.get) @@ -85,7 +86,7 @@ class TestTenantListApi: get_plan_bulk_mock.assert_called_once_with(["t1", "t2"]) get_features_mock.assert_not_called() - def test_get_saas_path_partial_fallback_does_not_gate_plan_on_billing_enabled(self, app): + def test_get_saas_path_partial_fallback_does_not_gate_plan_on_billing_enabled(self, app: Flask): """Bulk omits a tenant: resolve plan via subscription.plan only; billing.enabled is not used. billing.enabled is mocked False to prove the endpoint does not gate on it for this path @@ -140,7 +141,7 @@ class TestTenantListApi: get_plan_bulk_mock.assert_called_once_with(["t1", "t2"]) get_features_mock.assert_called_once_with("t2") - def test_get_saas_path_falls_back_to_legacy_feature_path_on_bulk_error(self, app): + def test_get_saas_path_falls_back_to_legacy_feature_path_on_bulk_error(self, app: Flask): """Test fallback to FeatureService when bulk billing returns empty result. BillingService.get_plan_bulk catches exceptions internally and returns empty dict, @@ -197,7 +198,7 @@ class TestTenantListApi: assert get_features_mock.call_count == 2 logger_warning_mock.assert_called_once() - def test_get_billing_disabled_community_path(self, app): + def test_get_billing_disabled_community_path(self, app: Flask): api = TenantListApi() method = unwrap(api.get) @@ -236,7 +237,7 @@ class TestTenantListApi: assert result["workspaces"][0]["plan"] == CloudPlan.SANDBOX get_features_mock.assert_called_once_with("t1") - def test_get_enterprise_only_skips_feature_service(self, app): + def test_get_enterprise_only_skips_feature_service(self, app: Flask): api = TenantListApi() method = unwrap(api.get) @@ -276,7 +277,7 @@ class TestTenantListApi: assert result["workspaces"][1]["current"] is True get_features_mock.assert_not_called() - def test_get_enterprise_only_with_empty_tenants(self, app): + def test_get_enterprise_only_with_empty_tenants(self, app: Flask): api = TenantListApi() method = unwrap(api.get) @@ -302,7 +303,7 @@ class TestTenantListApi: class TestWorkspaceListApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = WorkspaceListApi() method = unwrap(api.get) @@ -324,7 +325,7 @@ class TestWorkspaceListApi: assert result["total"] == 1 assert result["has_more"] is False - def test_get_has_next_true(self, app): + def test_get_has_next_true(self, app: Flask): api = WorkspaceListApi() method = unwrap(api.get) @@ -355,7 +356,7 @@ class TestWorkspaceListApi: class TestTenantApi: - def test_post_active_tenant(self, app): + def test_post_active_tenant(self, app: Flask): api = TenantApi() method = unwrap(api.post) @@ -375,7 +376,7 @@ class TestTenantApi: assert status == 200 assert result["id"] == "t1" - def test_post_archived_with_switch(self, app): + def test_post_archived_with_switch(self, app: Flask): api = TenantApi() method = unwrap(api.post) @@ -397,7 +398,7 @@ class TestTenantApi: assert result["id"] == "new" - def test_post_archived_no_tenant(self, app): + def test_post_archived_no_tenant(self, app: Flask): api = TenantApi() method = unwrap(api.post) @@ -411,7 +412,7 @@ class TestTenantApi: with pytest.raises(Unauthorized): method(api) - def test_post_info_path(self, app): + def test_post_info_path(self, app: Flask): api = TenantApi() method = unwrap(api.post) @@ -454,7 +455,7 @@ class TestTenantInfoResponse: class TestSwitchWorkspaceApi: - def test_switch_success(self, app): + def test_switch_success(self, app: Flask): api = SwitchWorkspaceApi() method = unwrap(api.post) @@ -477,7 +478,7 @@ class TestSwitchWorkspaceApi: assert result["result"] == "success" - def test_switch_not_linked(self, app): + def test_switch_not_linked(self, app: Flask): api = SwitchWorkspaceApi() method = unwrap(api.post) @@ -493,7 +494,7 @@ class TestSwitchWorkspaceApi: with pytest.raises(AccountNotLinkTenantError): method(api) - def test_switch_tenant_not_found(self, app): + def test_switch_tenant_not_found(self, app: Flask): api = SwitchWorkspaceApi() method = unwrap(api.post) @@ -515,7 +516,7 @@ class TestSwitchWorkspaceApi: class TestCustomConfigWorkspaceApi: - def test_post_success(self, app): + def test_post_success(self, app: Flask): api = CustomConfigWorkspaceApi() method = unwrap(api.post) @@ -538,7 +539,7 @@ class TestCustomConfigWorkspaceApi: assert result["result"] == "success" - def test_logo_fallback(self, app): + def test_logo_fallback(self, app: Flask): api = CustomConfigWorkspaceApi() method = unwrap(api.post) @@ -569,7 +570,7 @@ class TestCustomConfigWorkspaceApi: class TestWebappLogoWorkspaceApi: - def test_no_file(self, app): + def test_no_file(self, app: Flask): api = WebappLogoWorkspaceApi() method = unwrap(api.post) @@ -582,7 +583,7 @@ class TestWebappLogoWorkspaceApi: with pytest.raises(NoFileUploadedError): method(api) - def test_too_many_files(self, app): + def test_too_many_files(self, app: Flask): api = WebappLogoWorkspaceApi() method = unwrap(api.post) @@ -601,7 +602,7 @@ class TestWebappLogoWorkspaceApi: with pytest.raises(TooManyFilesError): method(api) - def test_invalid_extension(self, app): + def test_invalid_extension(self, app: Flask): api = WebappLogoWorkspaceApi() method = unwrap(api.post) @@ -616,7 +617,7 @@ class TestWebappLogoWorkspaceApi: with pytest.raises(UnsupportedFileTypeError): method(api) - def test_upload_success(self, app): + def test_upload_success(self, app: Flask): api = WebappLogoWorkspaceApi() method = unwrap(api.post) @@ -648,7 +649,7 @@ class TestWebappLogoWorkspaceApi: assert status == 201 assert result["id"] == "file1" - def test_filename_missing(self, app): + def test_filename_missing(self, app: Flask): api = WebappLogoWorkspaceApi() method = unwrap(api.post) @@ -672,7 +673,7 @@ class TestWebappLogoWorkspaceApi: with pytest.raises(FilenameNotExistsError): method(api) - def test_file_too_large(self, app): + def test_file_too_large(self, app: Flask): api = WebappLogoWorkspaceApi() method = unwrap(api.post) @@ -701,7 +702,7 @@ class TestWebappLogoWorkspaceApi: with pytest.raises(FileTooLargeError): method(api) - def test_service_unsupported_file(self, app): + def test_service_unsupported_file(self, app: Flask): api = WebappLogoWorkspaceApi() method = unwrap(api.post) @@ -732,7 +733,7 @@ class TestWebappLogoWorkspaceApi: class TestWorkspaceInfoApi: - def test_post_success(self, app): + def test_post_success(self, app: Flask): api = WorkspaceInfoApi() method = unwrap(api.post) @@ -756,7 +757,7 @@ class TestWorkspaceInfoApi: assert result["result"] == "success" - def test_no_current_tenant(self, app): + def test_no_current_tenant(self, app: Flask): api = WorkspaceInfoApi() method = unwrap(api.post) @@ -774,7 +775,7 @@ class TestWorkspaceInfoApi: class TestWorkspacePermissionApi: - def test_get_success(self, app): + def test_get_success(self, app: Flask): api = WorkspacePermissionApi() method = unwrap(api.get) @@ -799,7 +800,7 @@ class TestWorkspacePermissionApi: assert status == 200 assert result["workspace_id"] == "t1" - def test_no_current_tenant(self, app): + def test_no_current_tenant(self, app: Flask): api = WorkspacePermissionApi() method = unwrap(api.get) diff --git a/api/tests/unit_tests/controllers/files/test_upload.py b/api/tests/unit_tests/controllers/files/test_upload.py index e8f3cd4b66..ff6ba0e9a1 100644 --- a/api/tests/unit_tests/controllers/files/test_upload.py +++ b/api/tests/unit_tests/controllers/files/test_upload.py @@ -1,3 +1,4 @@ +import io import types from unittest.mock import patch @@ -30,9 +31,10 @@ class DummyFile: self.filename = filename self.mimetype = mimetype self._content = content + self.stream = io.BytesIO(content) def read(self): - return self._content + return self.stream.read() class DummyToolFile: diff --git a/api/tests/unit_tests/controllers/inner_api/plugin/test_plugin_wraps.py b/api/tests/unit_tests/controllers/inner_api/plugin/test_plugin_wraps.py index d1b09c3a58..598677faff 100644 --- a/api/tests/unit_tests/controllers/inner_api/plugin/test_plugin_wraps.py +++ b/api/tests/unit_tests/controllers/inner_api/plugin/test_plugin_wraps.py @@ -189,7 +189,7 @@ class TestGetUserTenant: """Test get_user_tenant decorator""" @patch("controllers.inner_api.plugin.wraps.Tenant") - def test_should_inject_tenant_and_user_models(self, mock_tenant_class, app: Flask, monkeypatch): + def test_should_inject_tenant_and_user_models(self, mock_tenant_class, app: Flask, monkeypatch: pytest.MonkeyPatch): """Test that decorator injects tenant_model and user_model into kwargs""" # Arrange @@ -244,7 +244,9 @@ class TestGetUserTenant: protected_view() @patch("controllers.inner_api.plugin.wraps.Tenant") - def test_should_use_default_session_id_when_user_id_empty(self, mock_tenant_class, app: Flask, monkeypatch): + def test_should_use_default_session_id_when_user_id_empty( + self, mock_tenant_class, app: Flask, monkeypatch: pytest.MonkeyPatch + ): """Test that default session ID is used when user_id is empty string""" # Arrange diff --git a/api/tests/unit_tests/controllers/service_api/app/test_app.py b/api/tests/unit_tests/controllers/service_api/app/test_app.py index f5d93b5ac3..ae0edcf382 100644 --- a/api/tests/unit_tests/controllers/service_api/app/test_app.py +++ b/api/tests/unit_tests/controllers/service_api/app/test_app.py @@ -41,7 +41,7 @@ class TestAppParameterApi: @patch("controllers.service_api.wraps.validate_and_get_api_token") @patch("controllers.service_api.wraps.db") def test_get_parameters_for_chat_app( - self, mock_db, mock_validate_token, mock_current_app, mock_user_logged_in, app, mock_app_model + self, mock_db, mock_validate_token, mock_current_app, mock_user_logged_in, app: Flask, mock_app_model ): """Test retrieving parameters for a chat app.""" # Arrange @@ -91,7 +91,7 @@ class TestAppParameterApi: @patch("controllers.service_api.wraps.validate_and_get_api_token") @patch("controllers.service_api.wraps.db") def test_get_parameters_for_workflow_app( - self, mock_db, mock_validate_token, mock_current_app, mock_user_logged_in, app, mock_app_model + self, mock_db, mock_validate_token, mock_current_app, mock_user_logged_in, app: Flask, mock_app_model ): """Test retrieving parameters for a workflow app.""" # Arrange @@ -136,7 +136,7 @@ class TestAppParameterApi: @patch("controllers.service_api.wraps.validate_and_get_api_token") @patch("controllers.service_api.wraps.db") def test_get_parameters_raises_error_when_chat_config_missing( - self, mock_db, mock_validate_token, mock_current_app, mock_user_logged_in, app, mock_app_model + self, mock_db, mock_validate_token, mock_current_app, mock_user_logged_in, app: Flask, mock_app_model ): """Test that AppUnavailableError is raised when chat app has no config.""" # Arrange @@ -174,7 +174,7 @@ class TestAppParameterApi: @patch("controllers.service_api.wraps.validate_and_get_api_token") @patch("controllers.service_api.wraps.db") def test_get_parameters_raises_error_when_workflow_missing( - self, mock_db, mock_validate_token, mock_current_app, mock_user_logged_in, app, mock_app_model + self, mock_db, mock_validate_token, mock_current_app, mock_user_logged_in, app: Flask, mock_app_model ): """Test that AppUnavailableError is raised when workflow app has no workflow.""" # Arrange @@ -234,7 +234,14 @@ class TestAppMetaApi: @patch("controllers.service_api.wraps.db") @patch("controllers.service_api.app.app.AppService") def test_get_app_meta( - self, mock_app_service, mock_db, mock_validate_token, mock_current_app, mock_user_logged_in, app, mock_app_model + self, + mock_app_service, + mock_db, + mock_validate_token, + mock_current_app, + mock_user_logged_in, + app: Flask, + mock_app_model, ): """Test retrieving app metadata via AppService.""" # Arrange @@ -310,7 +317,7 @@ class TestAppInfoApi: @patch("controllers.service_api.wraps.validate_and_get_api_token") @patch("controllers.service_api.wraps.db") def test_get_app_info( - self, mock_db, mock_validate_token, mock_current_app, mock_user_logged_in, app, mock_app_model + self, mock_db, mock_validate_token, mock_current_app, mock_user_logged_in, app: Flask, mock_app_model ): """Test retrieving basic app information.""" mock_current_app.login_manager = Mock() @@ -402,7 +409,9 @@ class TestAppInfoApi: @patch("controllers.service_api.wraps.current_app") @patch("controllers.service_api.wraps.validate_and_get_api_token") @patch("controllers.service_api.wraps.db") - def test_get_app_info_with_no_tags(self, mock_db, mock_validate_token, mock_current_app, mock_user_logged_in, app): + def test_get_app_info_with_no_tags( + self, mock_db, mock_validate_token, mock_current_app, mock_user_logged_in, app: Flask + ): """Test retrieving app info when app has no tags.""" # Arrange mock_current_app.login_manager = Mock() @@ -453,7 +462,7 @@ class TestAppInfoApi: @patch("controllers.service_api.wraps.validate_and_get_api_token") @patch("controllers.service_api.wraps.db") def test_get_app_info_returns_correct_mode( - self, mock_db, mock_validate_token, mock_current_app, mock_user_logged_in, app, app_mode + self, mock_db, mock_validate_token, mock_current_app, mock_user_logged_in, app: Flask, app_mode ): """Test that all app modes are correctly returned.""" # Arrange diff --git a/api/tests/unit_tests/controllers/service_api/app/test_audio.py b/api/tests/unit_tests/controllers/service_api/app/test_audio.py index c16ebad739..4741481ef6 100644 --- a/api/tests/unit_tests/controllers/service_api/app/test_audio.py +++ b/api/tests/unit_tests/controllers/service_api/app/test_audio.py @@ -13,6 +13,7 @@ from types import SimpleNamespace from unittest.mock import Mock, patch import pytest +from flask import Flask from werkzeug.datastructures import FileStorage from werkzeug.exceptions import InternalServerError @@ -190,7 +191,7 @@ class TestAudioServiceMockedBehavior: class TestAudioApi: - def test_success(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_success(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr(AudioService, "transcript_asr", lambda **_kwargs: {"text": "ok"}) api = AudioApi() handler = _unwrap(api.post) @@ -216,7 +217,7 @@ class TestAudioApi: (InvokeError("invoke"), CompletionRequestError), ], ) - def test_error_mapping(self, app, monkeypatch: pytest.MonkeyPatch, exc, expected) -> None: + def test_error_mapping(self, app: Flask, monkeypatch: pytest.MonkeyPatch, exc, expected) -> None: monkeypatch.setattr(AudioService, "transcript_asr", lambda **_kwargs: (_ for _ in ()).throw(exc)) api = AudioApi() handler = _unwrap(api.post) @@ -227,7 +228,7 @@ class TestAudioApi: with pytest.raises(expected): handler(api, app_model=app_model, end_user=end_user) - def test_unhandled_error(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_unhandled_error(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( AudioService, "transcript_asr", lambda **_kwargs: (_ for _ in ()).throw(RuntimeError("boom")) ) @@ -242,7 +243,7 @@ class TestAudioApi: class TestTextApi: - def test_success(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_success(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr(AudioService, "transcript_tts", lambda **_kwargs: {"audio": "ok"}) api = TextApi() @@ -259,7 +260,7 @@ class TestTextApi: assert response == {"audio": "ok"} - def test_error_mapping(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_error_mapping(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( AudioService, "transcript_tts", lambda **_kwargs: (_ for _ in ()).throw(QuotaExceededError()) ) diff --git a/api/tests/unit_tests/controllers/service_api/app/test_completion.py b/api/tests/unit_tests/controllers/service_api/app/test_completion.py index 3364c07e62..259741937f 100644 --- a/api/tests/unit_tests/controllers/service_api/app/test_completion.py +++ b/api/tests/unit_tests/controllers/service_api/app/test_completion.py @@ -16,6 +16,7 @@ from types import SimpleNamespace from unittest.mock import Mock, patch import pytest +from flask import Flask from pydantic import ValidationError from werkzeug.exceptions import BadRequest, NotFound @@ -295,7 +296,7 @@ class TestCompletionControllerLogic: @patch("controllers.service_api.app.completion.service_api_ns") @patch("controllers.service_api.app.completion.AppGenerateService") - def test_completion_api_post_success(self, mock_generate_service, mock_service_api_ns, app): + def test_completion_api_post_success(self, mock_generate_service, mock_service_api_ns, app: Flask): """Test CompletionApi.post success path.""" from controllers.service_api.app.completion import CompletionApi @@ -320,7 +321,7 @@ class TestCompletionControllerLogic: mock_generate_service.generate.assert_called_once() @patch("controllers.service_api.app.completion.service_api_ns") - def test_completion_api_post_wrong_app_mode(self, mock_service_api_ns, app): + def test_completion_api_post_wrong_app_mode(self, mock_service_api_ns, app: Flask): """Test CompletionApi.post with wrong app mode.""" from controllers.service_api.app.completion import CompletionApi @@ -334,7 +335,7 @@ class TestCompletionControllerLogic: @patch("controllers.service_api.app.completion.service_api_ns") @patch("controllers.service_api.app.completion.AppGenerateService") - def test_chat_api_post_success(self, mock_generate_service, mock_service_api_ns, app): + def test_chat_api_post_success(self, mock_generate_service, mock_service_api_ns, app: Flask): """Test ChatApi.post success path.""" from controllers.service_api.app.completion import ChatApi @@ -355,7 +356,7 @@ class TestCompletionControllerLogic: assert response == {"text": "compacted"} @patch("controllers.service_api.app.completion.service_api_ns") - def test_chat_api_post_wrong_app_mode(self, mock_service_api_ns, app): + def test_chat_api_post_wrong_app_mode(self, mock_service_api_ns, app: Flask): """Test ChatApi.post with wrong app mode.""" from controllers.service_api.app.completion import ChatApi @@ -368,7 +369,7 @@ class TestCompletionControllerLogic: ChatApi().post.__wrapped__(ChatApi(), mock_app_model, mock_end_user) @patch("controllers.service_api.app.completion.AppTaskService") - def test_completion_stop_api_success(self, mock_task_service, app): + def test_completion_stop_api_success(self, mock_task_service, app: Flask): """Test CompletionStopApi.post success.""" from controllers.service_api.app.completion import CompletionStopApi @@ -385,7 +386,7 @@ class TestCompletionControllerLogic: mock_task_service.stop_task.assert_called_once() @patch("controllers.service_api.app.completion.AppTaskService") - def test_chat_stop_api_success(self, mock_task_service, app): + def test_chat_stop_api_success(self, mock_task_service, app: Flask): """Test ChatStopApi.post success.""" from controllers.service_api.app.completion import ChatStopApi diff --git a/api/tests/unit_tests/controllers/service_api/app/test_conversation.py b/api/tests/unit_tests/controllers/service_api/app/test_conversation.py index 4fb8ecf784..74c13d50f6 100644 --- a/api/tests/unit_tests/controllers/service_api/app/test_conversation.py +++ b/api/tests/unit_tests/controllers/service_api/app/test_conversation.py @@ -20,6 +20,7 @@ from types import SimpleNamespace from unittest.mock import Mock, patch import pytest +from flask import Flask from werkzeug.exceptions import BadRequest, NotFound import services @@ -339,7 +340,7 @@ class TestConversationAppModeValidation: @pytest.mark.parametrize( "mode", [ - AppMode.CHAT.value, + AppMode.CHAT, AppMode.AGENT_CHAT.value, AppMode.ADVANCED_CHAT.value, ], @@ -364,7 +365,7 @@ class TestConversationAppModeValidation: app raises NotChatAppError. """ app = Mock(spec=App) - app.mode = AppMode.COMPLETION.value + app.mode = AppMode.COMPLETION app_mode = AppMode.value_of(app.mode) assert app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT} @@ -497,14 +498,14 @@ class TestConversationApiController: def test_list_not_chat(self, app) -> None: api = ConversationApi() handler = _unwrap(api.get) - app_model = SimpleNamespace(mode=AppMode.COMPLETION.value) + app_model = SimpleNamespace(mode=AppMode.COMPLETION) end_user = SimpleNamespace() with app.test_request_context("/conversations", method="GET"): with pytest.raises(NotChatAppError): handler(api, app_model=app_model, end_user=end_user) - def test_list_last_not_found(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_list_last_not_found(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: class _BeginStub: def __enter__(self): return SimpleNamespace() @@ -530,7 +531,7 @@ class TestConversationApiController: api = ConversationApi() handler = _unwrap(api.get) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( @@ -545,14 +546,14 @@ class TestConversationDetailApiController: def test_delete_not_chat(self, app) -> None: api = ConversationDetailApi() handler = _unwrap(api.delete) - app_model = SimpleNamespace(mode=AppMode.COMPLETION.value) + app_model = SimpleNamespace(mode=AppMode.COMPLETION) end_user = SimpleNamespace() with app.test_request_context("/conversations/1", method="DELETE"): with pytest.raises(NotChatAppError): handler(api, app_model=app_model, end_user=end_user, c_id="00000000-0000-0000-0000-000000000001") - def test_delete_not_found(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_delete_not_found(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( ConversationService, "delete", @@ -561,7 +562,7 @@ class TestConversationDetailApiController: api = ConversationDetailApi() handler = _unwrap(api.delete) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context("/conversations/1", method="DELETE"): @@ -570,7 +571,7 @@ class TestConversationDetailApiController: class TestConversationRenameApiController: - def test_not_found(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_not_found(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( ConversationService, "rename", @@ -579,7 +580,7 @@ class TestConversationRenameApiController: api = ConversationRenameApi() handler = _unwrap(api.post) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( @@ -595,14 +596,14 @@ class TestConversationVariablesApiController: def test_not_chat(self, app) -> None: api = ConversationVariablesApi() handler = _unwrap(api.get) - app_model = SimpleNamespace(mode=AppMode.COMPLETION.value) + app_model = SimpleNamespace(mode=AppMode.COMPLETION) end_user = SimpleNamespace() with app.test_request_context("/conversations/1/variables", method="GET"): with pytest.raises(NotChatAppError): handler(api, app_model=app_model, end_user=end_user, c_id="00000000-0000-0000-0000-000000000001") - def test_not_found(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_not_found(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( ConversationService, "get_conversational_variable", @@ -611,7 +612,7 @@ class TestConversationVariablesApiController: api = ConversationVariablesApi() handler = _unwrap(api.get) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( @@ -621,7 +622,7 @@ class TestConversationVariablesApiController: with pytest.raises(NotFound): handler(api, app_model=app_model, end_user=end_user, c_id="00000000-0000-0000-0000-000000000001") - def test_success_serializes_response(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_success_serializes_response(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: created_at = datetime(2026, 1, 2, 3, 4, 5, tzinfo=UTC) monkeypatch.setattr( ConversationService, @@ -644,7 +645,7 @@ class TestConversationVariablesApiController: api = ConversationVariablesApi() handler = _unwrap(api.get) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( @@ -661,7 +662,7 @@ class TestConversationVariablesApiController: class TestConversationVariableDetailApiController: - def test_update_type_mismatch(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_update_type_mismatch(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( ConversationService, "update_conversation_variable", @@ -670,7 +671,7 @@ class TestConversationVariableDetailApiController: api = ConversationVariableDetailApi() handler = _unwrap(api.put) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( @@ -687,7 +688,7 @@ class TestConversationVariableDetailApiController: variable_id="00000000-0000-0000-0000-000000000002", ) - def test_update_not_found(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_update_not_found(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( ConversationService, "update_conversation_variable", @@ -696,7 +697,7 @@ class TestConversationVariableDetailApiController: api = ConversationVariableDetailApi() handler = _unwrap(api.put) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( @@ -713,7 +714,7 @@ class TestConversationVariableDetailApiController: variable_id="00000000-0000-0000-0000-000000000002", ) - def test_update_success_serializes_response(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_update_success_serializes_response(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: created_at = datetime(2026, 1, 2, 3, 4, 5, tzinfo=UTC) monkeypatch.setattr( ConversationService, @@ -730,7 +731,7 @@ class TestConversationVariableDetailApiController: api = ConversationVariableDetailApi() handler = _unwrap(api.put) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( diff --git a/api/tests/unit_tests/controllers/service_api/app/test_file.py b/api/tests/unit_tests/controllers/service_api/app/test_file.py index 7060bd79df..2615c3edac 100644 --- a/api/tests/unit_tests/controllers/service_api/app/test_file.py +++ b/api/tests/unit_tests/controllers/service_api/app/test_file.py @@ -16,6 +16,7 @@ import uuid from unittest.mock import Mock, patch import pytest +from flask import Flask from controllers.common.errors import ( FilenameNotExistsError, @@ -282,7 +283,7 @@ class TestFileApiPost: assert status == 201 mock_file_svc_cls.return_value.upload_file.assert_called_once() - def test_upload_no_file(self, app, mock_app_model, mock_end_user): + def test_upload_no_file(self, app: Flask, mock_app_model, mock_end_user): """Test NoFileUploadedError when no file in request.""" from controllers.service_api.app.file import FileApi @@ -296,7 +297,7 @@ class TestFileApiPost: with pytest.raises(NoFileUploadedError): _unwrap(api.post)(api, app_model=mock_app_model, end_user=mock_end_user) - def test_upload_too_many_files(self, app, mock_app_model, mock_end_user): + def test_upload_too_many_files(self, app: Flask, mock_app_model, mock_end_user): """Test TooManyFilesError when multiple files uploaded.""" from io import BytesIO @@ -317,7 +318,7 @@ class TestFileApiPost: with pytest.raises(TooManyFilesError): _unwrap(api.post)(api, app_model=mock_app_model, end_user=mock_end_user) - def test_upload_no_mimetype(self, app, mock_app_model, mock_end_user): + def test_upload_no_mimetype(self, app: Flask, mock_app_model, mock_end_user): """Test UnsupportedFileTypeError when file has no mimetype.""" from io import BytesIO diff --git a/api/tests/unit_tests/controllers/service_api/app/test_hitl_service_api.py b/api/tests/unit_tests/controllers/service_api/app/test_hitl_service_api.py index 846d5368f3..510d4a9470 100644 --- a/api/tests/unit_tests/controllers/service_api/app/test_hitl_service_api.py +++ b/api/tests/unit_tests/controllers/service_api/app/test_hitl_service_api.py @@ -11,6 +11,7 @@ from types import SimpleNamespace from unittest.mock import ANY, MagicMock, Mock import pytest +from flask import Flask import services.app_generate_service as ags_module from controllers.service_api.app.workflow_events import WorkflowEventsApi @@ -281,7 +282,7 @@ class TestHitlServiceApi: workflow_generator.convert_to_event_stream.assert_called_once_with(["raw-event"]) def test_workflow_events_snapshot_continue_on_pause_keeps_pause_open( - self, app, monkeypatch: pytest.MonkeyPatch + self, app: Flask, monkeypatch: pytest.MonkeyPatch ) -> None: workflow_run = SimpleNamespace( id="run-1", diff --git a/api/tests/unit_tests/controllers/service_api/app/test_message.py b/api/tests/unit_tests/controllers/service_api/app/test_message.py index c2b8aed1ae..2bc9771862 100644 --- a/api/tests/unit_tests/controllers/service_api/app/test_message.py +++ b/api/tests/unit_tests/controllers/service_api/app/test_message.py @@ -19,6 +19,7 @@ from types import SimpleNamespace from unittest.mock import Mock, patch import pytest +from flask import Flask from werkzeug.exceptions import BadRequest, InternalServerError, NotFound from controllers.service_api.app.error import NotChatAppError @@ -390,7 +391,7 @@ class TestMessageListApi: with pytest.raises(NotChatAppError): handler(api, app_model=app_model, end_user=end_user) - def test_conversation_not_found(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_conversation_not_found(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( MessageService, "pagination_by_first_id", @@ -409,7 +410,7 @@ class TestMessageListApi: with pytest.raises(NotFound): handler(api, app_model=app_model, end_user=end_user) - def test_first_message_not_found(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_first_message_not_found(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( MessageService, "pagination_by_first_id", @@ -430,7 +431,7 @@ class TestMessageListApi: class TestMessageFeedbackApi: - def test_not_found(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_not_found(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( MessageService, "create_feedback", @@ -452,7 +453,7 @@ class TestMessageFeedbackApi: class TestAppGetFeedbacksApi: - def test_success(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_success(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr(MessageService, "get_all_messages_feedbacks", lambda *_args, **_kwargs: ["f1"]) api = AppGetFeedbacksApi() @@ -476,7 +477,7 @@ class TestMessageSuggestedApi: with pytest.raises(NotChatAppError): handler(api, app_model=app_model, end_user=end_user, message_id="m1") - def test_not_found(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_not_found(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( MessageService, "get_suggested_questions_after_answer", @@ -492,7 +493,7 @@ class TestMessageSuggestedApi: with pytest.raises(NotFound): handler(api, app_model=app_model, end_user=end_user, message_id="m1") - def test_disabled(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_disabled(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( MessageService, "get_suggested_questions_after_answer", @@ -508,7 +509,7 @@ class TestMessageSuggestedApi: with pytest.raises(BadRequest): handler(api, app_model=app_model, end_user=end_user, message_id="m1") - def test_internal_error(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_internal_error(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( MessageService, "get_suggested_questions_after_answer", @@ -524,7 +525,7 @@ class TestMessageSuggestedApi: with pytest.raises(InternalServerError): handler(api, app_model=app_model, end_user=end_user, message_id="m1") - def test_success(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_success(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( MessageService, "get_suggested_questions_after_answer", diff --git a/api/tests/unit_tests/controllers/service_api/app/test_workflow.py b/api/tests/unit_tests/controllers/service_api/app/test_workflow.py index da09ec13ce..7115ea1e12 100644 --- a/api/tests/unit_tests/controllers/service_api/app/test_workflow.py +++ b/api/tests/unit_tests/controllers/service_api/app/test_workflow.py @@ -20,6 +20,7 @@ from types import SimpleNamespace from unittest.mock import Mock, patch import pytest +from flask import Flask from werkzeug.exceptions import BadRequest, NotFound from controllers.service_api.app.error import NotWorkflowAppError @@ -366,7 +367,7 @@ class TestWorkflowRunRepository: class TestWorkflowRunDetailApi: - def test_not_workflow_app(self, app) -> None: + def test_not_workflow_app(self, app: Flask) -> None: api = WorkflowRunDetailApi() handler = _unwrap(api.get) app_model = SimpleNamespace(mode=AppMode.CHAT.value) @@ -397,7 +398,7 @@ class TestWorkflowRunDetailApi: class TestWorkflowRunApi: - def test_not_workflow_app(self, app) -> None: + def test_not_workflow_app(self, app: Flask) -> None: api = WorkflowRunApi() handler = _unwrap(api.post) app_model = SimpleNamespace(mode=AppMode.CHAT.value) @@ -407,7 +408,7 @@ class TestWorkflowRunApi: with pytest.raises(NotWorkflowAppError): handler(api, app_model=app_model, end_user=end_user) - def test_rate_limit(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_rate_limit(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( AppGenerateService, "generate", @@ -425,7 +426,7 @@ class TestWorkflowRunApi: class TestWorkflowRunByIdApi: - def test_not_found(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_not_found(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( AppGenerateService, "generate", @@ -441,7 +442,7 @@ class TestWorkflowRunByIdApi: with pytest.raises(NotFound): handler(api, app_model=app_model, end_user=end_user, workflow_id="w1") - def test_draft_workflow(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_draft_workflow(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr( AppGenerateService, "generate", @@ -459,7 +460,7 @@ class TestWorkflowRunByIdApi: class TestWorkflowTaskStopApi: - def test_wrong_mode(self, app) -> None: + def test_wrong_mode(self, app: Flask) -> None: api = WorkflowTaskStopApi() handler = _unwrap(api.post) app_model = SimpleNamespace(mode=AppMode.CHAT.value) @@ -469,7 +470,7 @@ class TestWorkflowTaskStopApi: with pytest.raises(NotWorkflowAppError): handler(api, app_model=app_model, end_user=end_user, task_id="t1") - def test_success(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_success(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: stop_mock = Mock() send_mock = Mock() monkeypatch.setattr(AppQueueManager, "set_stop_flag_no_user_check", stop_mock) @@ -489,7 +490,7 @@ class TestWorkflowTaskStopApi: class TestWorkflowAppLogApi: - def test_success(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_success(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: class _BeginStub: def __enter__(self): return SimpleNamespace() @@ -557,7 +558,7 @@ class TestWorkflowRunDetailApiGet: self, mock_db, mock_repo_factory, - app, + app: Flask, mock_workflow_app, ): """Test successful workflow run detail retrieval.""" @@ -579,7 +580,7 @@ class TestWorkflowRunDetailApiGet: assert result["status"] == "succeeded" @patch("controllers.service_api.app.workflow.db") - def test_get_workflow_run_wrong_app_mode(self, mock_db, app): + def test_get_workflow_run_wrong_app_mode(self, mock_db, app: Flask): """Test NotWorkflowAppError when app mode is not workflow or advanced_chat.""" from controllers.service_api.app.workflow import WorkflowRunDetailApi @@ -604,7 +605,7 @@ class TestWorkflowTaskStopApiPost: self, mock_queue_mgr, mock_graph_mgr, - app, + app: Flask, mock_workflow_app, ): """Test successful workflow task stop.""" @@ -624,7 +625,7 @@ class TestWorkflowTaskStopApiPost: mock_graph_mgr.assert_called_once() mock_graph_mgr.return_value.send_stop_command.assert_called_once_with("task-1") - def test_stop_workflow_task_wrong_app_mode(self, app): + def test_stop_workflow_task_wrong_app_mode(self, app: Flask): """Test NotWorkflowAppError when app mode is not workflow.""" from controllers.service_api.app.workflow import WorkflowTaskStopApi @@ -649,7 +650,7 @@ class TestWorkflowAppLogApiGet: self, mock_db, mock_wf_svc_cls, - app, + app: Flask, mock_workflow_app, ): """Test successful workflow log retrieval.""" diff --git a/api/tests/unit_tests/controllers/service_api/app/test_workflow_events.py b/api/tests/unit_tests/controllers/service_api/app/test_workflow_events.py index f45a7f9632..b3edc2ecd8 100644 --- a/api/tests/unit_tests/controllers/service_api/app/test_workflow_events.py +++ b/api/tests/unit_tests/controllers/service_api/app/test_workflow_events.py @@ -9,6 +9,7 @@ from types import SimpleNamespace from unittest.mock import Mock import pytest +from flask import Flask from werkzeug.exceptions import NotFound from controllers.service_api.app.error import NotWorkflowAppError @@ -41,7 +42,7 @@ class TestWorkflowEventsApi: with pytest.raises(NotWorkflowAppError): handler(api, app_model=app_model, end_user=end_user, task_id="run-1") - def test_workflow_run_not_found(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_workflow_run_not_found(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: _mock_repo_for_run(monkeypatch, workflow_run=None) api = WorkflowEventsApi() handler = _unwrap(api.get) @@ -52,7 +53,7 @@ class TestWorkflowEventsApi: with pytest.raises(NotFound): handler(api, app_model=app_model, end_user=end_user, task_id="run-1") - def test_workflow_run_permission_denied(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_workflow_run_permission_denied(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: workflow_run = SimpleNamespace( id="run-1", app_id="app-1", @@ -70,7 +71,7 @@ class TestWorkflowEventsApi: with pytest.raises(NotFound): handler(api, app_model=app_model, end_user=end_user, task_id="run-1") - def test_finished_run_returns_sse(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_finished_run_returns_sse(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: workflow_run = SimpleNamespace( id="run-1", app_id="app-1", @@ -103,7 +104,7 @@ class TestWorkflowEventsApi: assert payload["task_id"] == "run-1" assert payload["event"] == "workflow_finished" - def test_running_run_streams_events(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_running_run_streams_events(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: workflow_run = SimpleNamespace( id="run-1", app_id="app-1", @@ -135,7 +136,7 @@ class TestWorkflowEventsApi: ) workflow_generator.convert_to_event_stream.assert_called_once_with(["raw-event"]) - def test_running_run_with_snapshot(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + def test_running_run_with_snapshot(self, app: Flask, monkeypatch: pytest.MonkeyPatch) -> None: workflow_run = SimpleNamespace( id="run-1", app_id="app-1", diff --git a/api/tests/unit_tests/controllers/service_api/dataset/rag_pipeline/test_rag_pipeline_workflow.py b/api/tests/unit_tests/controllers/service_api/dataset/rag_pipeline/test_rag_pipeline_workflow.py index f33c482d04..362af883ed 100644 --- a/api/tests/unit_tests/controllers/service_api/dataset/rag_pipeline/test_rag_pipeline_workflow.py +++ b/api/tests/unit_tests/controllers/service_api/dataset/rag_pipeline/test_rag_pipeline_workflow.py @@ -23,6 +23,7 @@ from datetime import UTC, datetime from unittest.mock import Mock, patch import pytest +from flask import Flask from werkzeug.datastructures import FileStorage from werkzeug.exceptions import Forbidden, NotFound @@ -373,7 +374,7 @@ class TestDatasourcePluginsApiGet: @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.db") @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.RagPipelineService") - def test_get_plugins_success(self, mock_svc_cls, mock_db, app): + def test_get_plugins_success(self, mock_svc_cls, mock_db, app: Flask): """Test successful retrieval of datasource plugins.""" tenant_id = str(uuid.uuid4()) dataset_id = str(uuid.uuid4()) @@ -396,7 +397,7 @@ class TestDatasourcePluginsApiGet: ) @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.db") - def test_get_plugins_not_found(self, mock_db, app): + def test_get_plugins_not_found(self, mock_db, app: Flask): """Test NotFound when dataset check fails.""" mock_db.session.scalar.return_value = None @@ -407,7 +408,7 @@ class TestDatasourcePluginsApiGet: @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.db") @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.RagPipelineService") - def test_get_plugins_empty_list(self, mock_svc_cls, mock_db, app): + def test_get_plugins_empty_list(self, mock_svc_cls, mock_db, app: Flask): """Test empty plugin list.""" mock_db.session.scalar.return_value = Mock() mock_svc_instance = Mock() @@ -439,7 +440,7 @@ class TestDatasourceNodeRunApiPost: @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.RagPipelineService") @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.db") @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.service_api_ns") - def test_post_success(self, mock_ns, mock_db, mock_svc_cls, mock_current_user, mock_gen, mock_helper, app): + def test_post_success(self, mock_ns, mock_db, mock_svc_cls, mock_current_user, mock_gen, mock_helper, app: Flask): """Test successful datasource node run.""" tenant_id = str(uuid.uuid4()) dataset_id = str(uuid.uuid4()) @@ -473,7 +474,7 @@ class TestDatasourceNodeRunApiPost: mock_svc_instance.run_datasource_workflow_node.assert_called_once() @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.db") - def test_post_not_found(self, mock_db, app): + def test_post_not_found(self, mock_db, app: Flask): """Test NotFound when dataset check fails.""" mock_db.session.scalar.return_value = None @@ -488,7 +489,7 @@ class TestDatasourceNodeRunApiPost: ) @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.db") @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.service_api_ns") - def test_post_fails_when_current_user_not_account(self, mock_ns, mock_db, app): + def test_post_fails_when_current_user_not_account(self, mock_ns, mock_db, app: Flask): """Test AssertionError when current_user is not an Account instance.""" mock_db.session.scalar.return_value = Mock() mock_ns.payload = { @@ -549,7 +550,7 @@ class TestPipelineRunApiPost: mock_gen_svc.generate.assert_called_once() @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.db") - def test_post_not_found(self, mock_db, app): + def test_post_not_found(self, mock_db, app: Flask): """Test NotFound when dataset check fails.""" mock_db.session.scalar.return_value = None @@ -561,7 +562,7 @@ class TestPipelineRunApiPost: @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.current_user", new="not_account") @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.db") @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.service_api_ns") - def test_post_forbidden_non_account_user(self, mock_ns, mock_db, app): + def test_post_forbidden_non_account_user(self, mock_ns, mock_db, app: Flask): """Test Forbidden when current_user is not an Account.""" mock_db.session.scalar.return_value = Mock() mock_ns.payload = { @@ -585,7 +586,7 @@ class TestFileUploadApiPost: @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.FileService") @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.current_user") @patch("controllers.service_api.dataset.rag_pipeline.rag_pipeline_workflow.db") - def test_upload_success(self, mock_db, mock_current_user, mock_file_svc_cls, app): + def test_upload_success(self, mock_db, mock_current_user, mock_file_svc_cls, app: Flask): """Test successful file upload.""" mock_current_user.__bool__ = Mock(return_value=True) @@ -621,7 +622,7 @@ class TestFileUploadApiPost: assert response["name"] == "doc.pdf" assert response["extension"] == "pdf" - def test_upload_no_file(self, app): + def test_upload_no_file(self, app: Flask): """Test error when no file is uploaded.""" with app.test_request_context( "/datasets/pipeline/file-upload", diff --git a/api/tests/unit_tests/controllers/service_api/dataset/test_dataset_segment.py b/api/tests/unit_tests/controllers/service_api/dataset/test_dataset_segment.py index e9c3e6d376..fe8fc02548 100644 --- a/api/tests/unit_tests/controllers/service_api/dataset/test_dataset_segment.py +++ b/api/tests/unit_tests/controllers/service_api/dataset/test_dataset_segment.py @@ -18,6 +18,7 @@ import uuid from unittest.mock import Mock, patch import pytest +from flask import Flask from werkzeug.exceptions import NotFound from controllers.service_api.dataset.segment import ( @@ -782,7 +783,7 @@ class TestSegmentApiGet: mock_seg_svc, mock_marshal, mock_summary_svc, - app, + app: Flask, mock_tenant, mock_dataset, mock_segment, @@ -893,7 +894,7 @@ class TestSegmentApiPost: mock_seg_svc, mock_marshal, mock_summary_svc, - app, + app: Flask, mock_tenant, mock_dataset, mock_segment, @@ -946,7 +947,7 @@ class TestSegmentApiPost: mock_db, mock_account_fn, mock_doc_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -989,7 +990,7 @@ class TestSegmentApiPost: mock_db, mock_account_fn, mock_doc_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1041,7 +1042,7 @@ class TestDatasetSegmentApiDelete: mock_doc_svc, mock_dataset_svc, mock_seg_svc, - app, + app: Flask, mock_tenant, mock_dataset, mock_segment, @@ -1086,7 +1087,7 @@ class TestDatasetSegmentApiDelete: mock_account_fn, mock_doc_svc, mock_seg_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1128,7 +1129,7 @@ class TestDatasetSegmentApiDelete: mock_account_fn, mock_doc_svc, mock_dataset_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1162,7 +1163,7 @@ class TestDatasetSegmentApiDelete: mock_account_fn, mock_dataset_svc, mock_doc_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1232,7 +1233,7 @@ class TestDatasetSegmentApiUpdate: mock_seg_svc, mock_marshal, mock_summary_svc, - app, + app: Flask, mock_tenant, mock_dataset, mock_segment, @@ -1282,7 +1283,7 @@ class TestDatasetSegmentApiUpdate: mock_account_fn, mock_dataset_svc, mock_doc_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1322,7 +1323,7 @@ class TestDatasetSegmentApiUpdate: mock_dataset_svc, mock_doc_svc, mock_seg_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1374,7 +1375,7 @@ class TestDatasetSegmentApiGetSingle: mock_seg_svc, mock_marshal, mock_summary_svc, - app, + app: Flask, mock_tenant, mock_dataset, mock_segment, @@ -1421,7 +1422,7 @@ class TestDatasetSegmentApiGetSingle: mock_seg_svc, mock_marshal, mock_summary_svc, - app, + app: Flask, mock_tenant, mock_dataset, mock_segment, @@ -1460,7 +1461,7 @@ class TestDatasetSegmentApiGetSingle: self, mock_db, mock_account_fn, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1491,7 +1492,7 @@ class TestDatasetSegmentApiGetSingle: mock_account_fn, mock_dataset_svc, mock_doc_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1526,7 +1527,7 @@ class TestDatasetSegmentApiGetSingle: mock_dataset_svc, mock_doc_svc, mock_seg_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1570,7 +1571,7 @@ class TestChildChunkApiGet: mock_doc_svc, mock_seg_svc, mock_marshal, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1609,7 +1610,7 @@ class TestChildChunkApiGet: self, mock_db, mock_account_fn, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1638,7 +1639,7 @@ class TestChildChunkApiGet: mock_db, mock_account_fn, mock_doc_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1670,7 +1671,7 @@ class TestChildChunkApiGet: mock_account_fn, mock_doc_svc, mock_seg_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1729,7 +1730,7 @@ class TestChildChunkApiPost: mock_doc_svc, mock_seg_svc, mock_marshal, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1771,7 +1772,7 @@ class TestChildChunkApiPost: mock_feature_svc, mock_db, mock_account_fn, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1809,7 +1810,7 @@ class TestChildChunkApiPost: mock_account_fn, mock_doc_svc, mock_seg_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1863,7 +1864,7 @@ class TestDatasetChildChunkApiDelete: mock_account_fn, mock_doc_svc, mock_seg_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1913,7 +1914,7 @@ class TestDatasetChildChunkApiDelete: mock_account_fn, mock_doc_svc, mock_seg_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1954,7 +1955,7 @@ class TestDatasetChildChunkApiDelete: mock_account_fn, mock_doc_svc, mock_seg_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -1994,7 +1995,7 @@ class TestDatasetChildChunkApiDelete: mock_account_fn, mock_doc_svc, mock_seg_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): diff --git a/api/tests/unit_tests/controllers/service_api/dataset/test_hit_testing.py b/api/tests/unit_tests/controllers/service_api/dataset/test_hit_testing.py index 9be8e56f56..a26cdf6563 100644 --- a/api/tests/unit_tests/controllers/service_api/dataset/test_hit_testing.py +++ b/api/tests/unit_tests/controllers/service_api/dataset/test_hit_testing.py @@ -171,6 +171,62 @@ class TestHitTestingApiPost: assert passed_retrieval_model["search_method"] == "semantic_search" assert passed_retrieval_model["top_k"] == 10 + @patch("controllers.service_api.dataset.hit_testing.service_api_ns") + @patch("controllers.console.datasets.hit_testing_base.marshal") + @patch("controllers.console.datasets.hit_testing_base.HitTestingService") + @patch("controllers.console.datasets.hit_testing_base.DatasetService") + @patch("controllers.console.datasets.hit_testing_base.current_user", new_callable=lambda: Mock(spec=Account)) + def test_post_preserves_retrieval_model_metadata_filtering_conditions( + self, + mock_current_user, + mock_dataset_svc, + mock_hit_svc, + mock_marshal, + mock_ns, + app, + ): + """Service API retrieval payload should not drop metadata filters.""" + dataset_id = str(uuid.uuid4()) + tenant_id = str(uuid.uuid4()) + + mock_dataset = Mock() + mock_dataset.id = dataset_id + + mock_dataset_svc.get_dataset.return_value = mock_dataset + mock_dataset_svc.check_dataset_permission.return_value = None + mock_hit_svc.retrieve.return_value = {"query": "filtered query", "records": []} + mock_hit_svc.hit_testing_args_check.return_value = None + mock_marshal.return_value = [] + + metadata_filtering_conditions = { + "logical_operator": "and", + "conditions": [ + { + "name": "category", + "comparison_operator": "is", + "value": "finance", + } + ], + } + mock_ns.payload = { + "query": "filtered query", + "retrieval_model": { + "search_method": "semantic_search", + "reranking_enable": False, + "score_threshold_enabled": False, + "top_k": 4, + "metadata_filtering_conditions": metadata_filtering_conditions, + }, + } + + with app.test_request_context(): + api = HitTestingApi() + HitTestingApi.post.__wrapped__(api, tenant_id, dataset_id) + + passed_retrieval_model = mock_hit_svc.retrieve.call_args.kwargs.get("retrieval_model") + assert passed_retrieval_model is not None + assert passed_retrieval_model["metadata_filtering_conditions"] == metadata_filtering_conditions + @patch("controllers.service_api.dataset.hit_testing.service_api_ns") @patch("controllers.console.datasets.hit_testing_base.marshal") @patch("controllers.console.datasets.hit_testing_base.HitTestingService") diff --git a/api/tests/unit_tests/controllers/service_api/dataset/test_metadata.py b/api/tests/unit_tests/controllers/service_api/dataset/test_metadata.py index b93a1cf14b..b7e24f9201 100644 --- a/api/tests/unit_tests/controllers/service_api/dataset/test_metadata.py +++ b/api/tests/unit_tests/controllers/service_api/dataset/test_metadata.py @@ -19,6 +19,7 @@ import uuid from unittest.mock import Mock, patch import pytest +from flask import Flask from werkzeug.exceptions import NotFound from controllers.service_api.dataset.metadata import ( @@ -76,7 +77,7 @@ class TestDatasetMetadataCreatePost: mock_dataset_svc, mock_meta_svc, mock_marshal, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -106,7 +107,7 @@ class TestDatasetMetadataCreatePost: def test_create_metadata_dataset_not_found( self, mock_dataset_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -136,7 +137,7 @@ class TestDatasetMetadataCreateGet: self, mock_dataset_svc, mock_meta_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -160,7 +161,7 @@ class TestDatasetMetadataCreateGet: def test_get_metadata_dataset_not_found( self, mock_dataset_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -201,7 +202,7 @@ class TestDatasetMetadataServiceApiPatch: mock_dataset_svc, mock_meta_svc, mock_marshal, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -232,7 +233,7 @@ class TestDatasetMetadataServiceApiPatch: def test_update_metadata_dataset_not_found( self, mock_dataset_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -273,7 +274,7 @@ class TestDatasetMetadataServiceApiDelete: mock_current_user, mock_dataset_svc, mock_meta_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -302,7 +303,7 @@ class TestDatasetMetadataServiceApiDelete: def test_delete_metadata_dataset_not_found( self, mock_dataset_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -336,7 +337,7 @@ class TestDatasetMetadataBuiltInFieldGet: def test_get_built_in_fields_success( self, mock_meta_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -382,7 +383,7 @@ class TestDatasetMetadataBuiltInFieldAction: mock_current_user, mock_dataset_svc, mock_meta_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -414,7 +415,7 @@ class TestDatasetMetadataBuiltInFieldAction: mock_current_user, mock_dataset_svc, mock_meta_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -441,7 +442,7 @@ class TestDatasetMetadataBuiltInFieldAction: def test_action_dataset_not_found( self, mock_dataset_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -485,7 +486,7 @@ class TestDocumentMetadataEditPost: mock_current_user, mock_dataset_svc, mock_meta_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): @@ -513,7 +514,7 @@ class TestDocumentMetadataEditPost: def test_update_documents_metadata_dataset_not_found( self, mock_dataset_svc, - app, + app: Flask, mock_tenant, mock_dataset, ): diff --git a/api/tests/unit_tests/controllers/service_api/end_user/test_end_user.py b/api/tests/unit_tests/controllers/service_api/end_user/test_end_user.py index 3cc444e467..9c310a4f45 100644 --- a/api/tests/unit_tests/controllers/service_api/end_user/test_end_user.py +++ b/api/tests/unit_tests/controllers/service_api/end_user/test_end_user.py @@ -3,6 +3,7 @@ from unittest.mock import Mock from uuid import UUID, uuid4 import pytest +from pytest_mock import MockerFixture from controllers.service_api.end_user.end_user import EndUserApi from controllers.service_api.end_user.error import EndUserNotFoundError @@ -21,7 +22,9 @@ class TestEndUserApi: app.tenant_id = str(uuid4()) return app - def test_get_end_user_returns_all_attributes(self, mocker, resource: EndUserApi, app_model: App) -> None: + def test_get_end_user_returns_all_attributes( + self, mocker: MockerFixture, resource: EndUserApi, app_model: App + ) -> None: end_user = Mock(spec=EndUser) end_user.id = str(uuid4()) end_user.tenant_id = app_model.tenant_id @@ -54,7 +57,7 @@ class TestEndUserApi: assert result["created_at"].startswith("2024-01-01T00:00:00") assert result["updated_at"].startswith("2024-01-02T00:00:00") - def test_get_end_user_not_found(self, mocker, resource: EndUserApi, app_model: App) -> None: + def test_get_end_user_not_found(self, mocker: MockerFixture, resource: EndUserApi, app_model: App) -> None: mocker.patch("controllers.service_api.end_user.end_user.EndUserService.get_end_user_by_id", return_value=None) with pytest.raises(EndUserNotFoundError): diff --git a/api/tests/unit_tests/controllers/service_api/test_index.py b/api/tests/unit_tests/controllers/service_api/test_index.py index c560a3c698..8441118181 100644 --- a/api/tests/unit_tests/controllers/service_api/test_index.py +++ b/api/tests/unit_tests/controllers/service_api/test_index.py @@ -5,6 +5,7 @@ Unit tests for Service API Index endpoint from unittest.mock import MagicMock, patch import pytest +from flask import Flask from controllers.service_api.index import IndexApi @@ -13,7 +14,7 @@ class TestIndexApi: """Test suite for IndexApi resource.""" @patch("controllers.service_api.index.dify_config", autospec=True) - def test_get_returns_api_info(self, mock_config, app): + def test_get_returns_api_info(self, mock_config, app: Flask): """Test that GET returns API metadata with correct structure.""" # Arrange mock_config.project.version = "1.0.0-test" @@ -32,7 +33,7 @@ class TestIndexApi: assert response["api_version"] == "v1" assert response["server_version"] == "1.0.0-test" - def test_get_response_has_required_fields(self, app): + def test_get_response_has_required_fields(self, app: Flask): """Test that response contains all required fields.""" # Arrange mock_config = MagicMock() diff --git a/api/tests/unit_tests/controllers/service_api/test_wraps.py b/api/tests/unit_tests/controllers/service_api/test_wraps.py index 6dfbdcf98e..30d7b92913 100644 --- a/api/tests/unit_tests/controllers/service_api/test_wraps.py +++ b/api/tests/unit_tests/controllers/service_api/test_wraps.py @@ -39,7 +39,7 @@ class TestValidateAndGetApiToken: app.config["TESTING"] = True return app - def test_missing_authorization_header(self, app): + def test_missing_authorization_header(self, app: Flask): """Test that Unauthorized is raised when Authorization header is missing.""" # Arrange with app.test_request_context("/", method="GET"): @@ -50,7 +50,7 @@ class TestValidateAndGetApiToken: validate_and_get_api_token("app") assert "Authorization header must be provided" in str(exc_info.value) - def test_invalid_auth_scheme(self, app): + def test_invalid_auth_scheme(self, app: Flask): """Test that Unauthorized is raised when auth scheme is not Bearer.""" # Arrange with app.test_request_context("/", method="GET", headers={"Authorization": "Basic token123"}): @@ -62,7 +62,7 @@ class TestValidateAndGetApiToken: @patch("controllers.service_api.wraps.record_token_usage") @patch("controllers.service_api.wraps.ApiTokenCache") @patch("controllers.service_api.wraps.fetch_token_with_single_flight") - def test_valid_token_returns_api_token(self, mock_fetch_token, mock_cache_cls, mock_record_usage, app): + def test_valid_token_returns_api_token(self, mock_fetch_token, mock_cache_cls, mock_record_usage, app: Flask): """Test that valid token returns the ApiToken object.""" # Arrange mock_api_token = Mock(spec=ApiToken) @@ -84,7 +84,7 @@ class TestValidateAndGetApiToken: @patch("controllers.service_api.wraps.record_token_usage") @patch("controllers.service_api.wraps.ApiTokenCache") @patch("controllers.service_api.wraps.fetch_token_with_single_flight") - def test_invalid_token_raises_unauthorized(self, mock_fetch_token, mock_cache_cls, mock_record_usage, app): + def test_invalid_token_raises_unauthorized(self, mock_fetch_token, mock_cache_cls, mock_record_usage, app: Flask): """Test that invalid token raises Unauthorized.""" # Arrange from werkzeug.exceptions import Unauthorized @@ -161,7 +161,7 @@ class TestValidateAppToken: @patch("controllers.service_api.wraps.db") @patch("controllers.service_api.wraps.validate_and_get_api_token") - def test_app_not_found_raises_forbidden(self, mock_validate_token, mock_db, app): + def test_app_not_found_raises_forbidden(self, mock_validate_token, mock_db, app: Flask): """Test that Forbidden is raised when app no longer exists.""" # Arrange mock_api_token = Mock() @@ -182,7 +182,7 @@ class TestValidateAppToken: @patch("controllers.service_api.wraps.db") @patch("controllers.service_api.wraps.validate_and_get_api_token") - def test_app_status_abnormal_raises_forbidden(self, mock_validate_token, mock_db, app): + def test_app_status_abnormal_raises_forbidden(self, mock_validate_token, mock_db, app: Flask): """Test that Forbidden is raised when app status is abnormal.""" # Arrange mock_api_token = Mock() @@ -205,7 +205,7 @@ class TestValidateAppToken: @patch("controllers.service_api.wraps.db") @patch("controllers.service_api.wraps.validate_and_get_api_token") - def test_app_api_disabled_raises_forbidden(self, mock_validate_token, mock_db, app): + def test_app_api_disabled_raises_forbidden(self, mock_validate_token, mock_db, app: Flask): """Test that Forbidden is raised when app API is disabled.""" # Arrange mock_api_token = Mock() @@ -240,7 +240,7 @@ class TestCloudEditionBillingResourceCheck: @patch("controllers.service_api.wraps.validate_and_get_api_token") @patch("controllers.service_api.wraps.FeatureService.get_features") - def test_allows_when_under_limit(self, mock_get_features, mock_validate_token, app): + def test_allows_when_under_limit(self, mock_get_features, mock_validate_token, app: Flask): """Test that request is allowed when under resource limit.""" # Arrange mock_validate_token.return_value = Mock(tenant_id="tenant123") @@ -264,7 +264,7 @@ class TestCloudEditionBillingResourceCheck: @patch("controllers.service_api.wraps.validate_and_get_api_token") @patch("controllers.service_api.wraps.FeatureService.get_features") - def test_rejects_when_at_limit(self, mock_get_features, mock_validate_token, app): + def test_rejects_when_at_limit(self, mock_get_features, mock_validate_token, app: Flask): """Test that Forbidden is raised when at resource limit.""" # Arrange mock_validate_token.return_value = Mock(tenant_id="tenant123") @@ -287,7 +287,7 @@ class TestCloudEditionBillingResourceCheck: @patch("controllers.service_api.wraps.validate_and_get_api_token") @patch("controllers.service_api.wraps.FeatureService.get_features") - def test_allows_when_billing_disabled(self, mock_get_features, mock_validate_token, app): + def test_allows_when_billing_disabled(self, mock_get_features, mock_validate_token, app: Flask): """Test that request is allowed when billing is disabled.""" # Arrange mock_validate_token.return_value = Mock(tenant_id="tenant123") @@ -320,7 +320,7 @@ class TestCloudEditionBillingKnowledgeLimitCheck: @patch("controllers.service_api.wraps.validate_and_get_api_token") @patch("controllers.service_api.wraps.FeatureService.get_features") - def test_rejects_add_segment_in_sandbox(self, mock_get_features, mock_validate_token, app): + def test_rejects_add_segment_in_sandbox(self, mock_get_features, mock_validate_token, app: Flask): """Test that add_segment is rejected in SANDBOX plan.""" # Arrange mock_validate_token.return_value = Mock(tenant_id="tenant123") @@ -342,7 +342,7 @@ class TestCloudEditionBillingKnowledgeLimitCheck: @patch("controllers.service_api.wraps.validate_and_get_api_token") @patch("controllers.service_api.wraps.FeatureService.get_features") - def test_allows_other_operations_in_sandbox(self, mock_get_features, mock_validate_token, app): + def test_allows_other_operations_in_sandbox(self, mock_get_features, mock_validate_token, app: Flask): """Test that non-add_segment operations are allowed in SANDBOX.""" # Arrange mock_validate_token.return_value = Mock(tenant_id="tenant123") @@ -376,7 +376,7 @@ class TestCloudEditionBillingRateLimitCheck: @patch("controllers.service_api.wraps.validate_and_get_api_token") @patch("controllers.service_api.wraps.FeatureService.get_knowledge_rate_limit") - def test_allows_within_rate_limit(self, mock_get_rate_limit, mock_validate_token, app): + def test_allows_within_rate_limit(self, mock_get_rate_limit, mock_validate_token, app: Flask): """Test that request is allowed when within rate limit.""" # Arrange mock_validate_token.return_value = Mock(tenant_id="tenant123") @@ -406,7 +406,7 @@ class TestCloudEditionBillingRateLimitCheck: @patch("controllers.service_api.wraps.validate_and_get_api_token") @patch("controllers.service_api.wraps.FeatureService.get_knowledge_rate_limit") @patch("controllers.service_api.wraps.db") - def test_rejects_over_rate_limit(self, mock_db, mock_get_rate_limit, mock_validate_token, app): + def test_rejects_over_rate_limit(self, mock_db, mock_get_rate_limit, mock_validate_token, app: Flask): """Test that Forbidden is raised when over rate limit.""" # Arrange mock_validate_token.return_value = Mock(tenant_id="tenant123") @@ -445,7 +445,7 @@ class TestValidateDatasetToken: @patch("controllers.service_api.wraps.db") @patch("controllers.service_api.wraps.validate_and_get_api_token") @patch("controllers.service_api.wraps.current_app") - def test_valid_dataset_token(self, mock_current_app, mock_validate_token, mock_db, mock_user_logged_in, app): + def test_valid_dataset_token(self, mock_current_app, mock_validate_token, mock_db, mock_user_logged_in, app: Flask): """Test that valid dataset token allows access.""" # Arrange # Use standard Mock for login_manager @@ -487,7 +487,7 @@ class TestValidateDatasetToken: @patch("controllers.service_api.wraps.db") @patch("controllers.service_api.wraps.validate_and_get_api_token") - def test_dataset_not_found_raises_not_found(self, mock_validate_token, mock_db, app): + def test_dataset_not_found_raises_not_found(self, mock_validate_token, mock_db, app: Flask): """Test that NotFound is raised when dataset doesn't exist.""" # Arrange mock_api_token = Mock() diff --git a/api/tests/unit_tests/controllers/test_swagger.py b/api/tests/unit_tests/controllers/test_swagger.py new file mode 100644 index 0000000000..999f1ae78d --- /dev/null +++ b/api/tests/unit_tests/controllers/test_swagger.py @@ -0,0 +1,72 @@ +"""Swagger JSON rendering tests for Flask-RESTX API blueprints.""" + +import pytest +from flask import Flask + + +def _definition_refs(value: object) -> set[str]: + refs: set[str] = set() + if isinstance(value, dict): + ref = value.get("$ref") + if isinstance(ref, str) and ref.startswith("#/definitions/"): + refs.add(ref.removeprefix("#/definitions/")) + for item in value.values(): + refs.update(_definition_refs(item)) + elif isinstance(value, list): + for item in value: + refs.update(_definition_refs(item)) + return refs + + +@pytest.mark.parametrize( + ("first_kwargs", "second_kwargs"), + [ + ({"min_items": 1}, {"min_items": 2}), + ({"max_items": 1}, {"max_items": 2}), + ({"unique": True}, {"unique": False}), + ], +) +def test_inline_model_name_includes_list_constraints( + first_kwargs: dict[str, object], + second_kwargs: dict[str, object], +): + from flask_restx import fields + + from libs.flask_restx_compat import _inline_model_name + + first_inline_model: dict[object, object] = {"items": fields.List(fields.String, **first_kwargs)} + second_inline_model: dict[object, object] = {"items": fields.List(fields.String, **second_kwargs)} + + assert _inline_model_name(first_inline_model) != _inline_model_name(second_inline_model) + + +def test_swagger_json_endpoints_render(monkeypatch: pytest.MonkeyPatch): + from configs import dify_config + from controllers.console import bp as console_bp + from controllers.service_api import bp as service_api_bp + from controllers.web import bp as web_bp + + monkeypatch.setattr(dify_config, "SWAGGER_UI_ENABLED", True) + + app = Flask(__name__) + app.config["TESTING"] = True + app.config["RESTX_INCLUDE_ALL_MODELS"] = True + app.register_blueprint(console_bp) + app.register_blueprint(web_bp) + app.register_blueprint(service_api_bp) + + client = app.test_client() + + for route in ("/console/api/swagger.json", "/api/swagger.json", "/v1/swagger.json"): + response = client.get(route) + + assert response.status_code == 200 + payload = response.get_json() + assert payload["swagger"] == "2.0" + assert "paths" in payload + assert "definitions" in payload + assert isinstance(payload["definitions"], dict) + missing_refs = _definition_refs(payload) - set(payload["definitions"]) + assert not sorted(ref for ref in missing_refs if ref.startswith("_AnonymousInlineModel")) + + assert app.config["RESTX_INCLUDE_ALL_MODELS"] is True diff --git a/api/tests/unit_tests/core/agent/output_parser/test_cot_output_parser.py b/api/tests/unit_tests/core/agent/output_parser/test_cot_output_parser.py index 9073ae1044..c1a4da8cd3 100644 --- a/api/tests/unit_tests/core/agent/output_parser/test_cot_output_parser.py +++ b/api/tests/unit_tests/core/agent/output_parser/test_cot_output_parser.py @@ -12,12 +12,13 @@ from types import SimpleNamespace from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.agent.output_parser.cot_output_parser import CotAgentOutputParser @pytest.fixture -def mock_action_class(mocker): +def mock_action_class(mocker: MockerFixture): mock_action = MagicMock() mocker.patch( "core.agent.output_parser.cot_output_parser.AgentScratchpadUnit.Action", diff --git a/api/tests/unit_tests/core/agent/strategy/test_plugin.py b/api/tests/unit_tests/core/agent/strategy/test_plugin.py index e0894f1e90..0fea04845d 100644 --- a/api/tests/unit_tests/core/agent/strategy/test_plugin.py +++ b/api/tests/unit_tests/core/agent/strategy/test_plugin.py @@ -3,6 +3,7 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.agent.strategy.plugin import PluginAgentStrategy @@ -213,7 +214,9 @@ class TestInvoke: (None, None, "msg"), ], ) - def test_invoke_optional_arguments(self, strategy, mocker, conversation_id, app_id, message_id) -> None: + def test_invoke_optional_arguments( + self, strategy, mocker: MockerFixture, conversation_id, app_id, message_id + ) -> None: mock_manager = MagicMock() mock_manager.invoke = MagicMock(return_value=iter([])) diff --git a/api/tests/unit_tests/core/agent/test_base_agent_runner.py b/api/tests/unit_tests/core/agent/test_base_agent_runner.py index db4b293b16..d5fb853ee3 100644 --- a/api/tests/unit_tests/core/agent/test_base_agent_runner.py +++ b/api/tests/unit_tests/core/agent/test_base_agent_runner.py @@ -3,6 +3,7 @@ from decimal import Decimal from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture import core.agent.base_agent_runner as module from core.agent.base_agent_runner import BaseAgentRunner @@ -13,7 +14,7 @@ from core.agent.base_agent_runner import BaseAgentRunner @pytest.fixture -def mock_db_session(mocker): +def mock_db_session(mocker: MockerFixture): session = mocker.MagicMock() mocker.patch.object(module.db, "session", session) return session @@ -41,13 +42,13 @@ def runner(mocker, mock_db_session): class TestRepack: - def test_sets_empty_if_none(self, runner, mocker): + def test_sets_empty_if_none(self, runner, mocker: MockerFixture): entity = mocker.MagicMock() entity.app_config.prompt_template.simple_prompt_template = None result = runner._repack_app_generate_entity(entity) assert result.app_config.prompt_template.simple_prompt_template == "" - def test_keeps_existing(self, runner, mocker): + def test_keeps_existing(self, runner, mocker: MockerFixture): entity = mocker.MagicMock() entity.app_config.prompt_template.simple_prompt_template = "abc" result = runner._repack_app_generate_entity(entity) @@ -60,7 +61,7 @@ class TestRepack: class TestUpdatePromptTool: - def build_param(self, mocker, **kwargs): + def build_param(self, mocker: MockerFixture, **kwargs): p = mocker.MagicMock() p.form = kwargs.get("form") @@ -75,7 +76,7 @@ class TestUpdatePromptTool: p.required = kwargs.get("required", False) return p - def test_skip_non_llm(self, runner, mocker): + def test_skip_non_llm(self, runner, mocker: MockerFixture): tool = mocker.MagicMock() param = self.build_param(mocker, form="NOT_LLM") tool.get_runtime_parameters.return_value = [param] @@ -86,7 +87,7 @@ class TestUpdatePromptTool: result = runner.update_prompt_message_tool(tool, prompt_tool) assert result.parameters["properties"] == {} - def test_enum_and_required(self, runner, mocker): + def test_enum_and_required(self, runner, mocker: MockerFixture): option = mocker.MagicMock(value="opt1") param = self.build_param( mocker, @@ -104,7 +105,7 @@ class TestUpdatePromptTool: result = runner.update_prompt_message_tool(tool, prompt_tool) assert "p1" in result.parameters["required"] - def test_skip_file_type_param(self, runner, mocker): + def test_skip_file_type_param(self, runner, mocker: MockerFixture): tool = mocker.MagicMock() param = self.build_param(mocker, form=module.ToolParameter.ToolParameterForm.LLM) param.type = module.ToolParameter.ToolParameterType.FILE @@ -116,7 +117,7 @@ class TestUpdatePromptTool: result = runner.update_prompt_message_tool(tool, prompt_tool) assert result.parameters["properties"] == {} - def test_duplicate_required_not_duplicated(self, runner, mocker): + def test_duplicate_required_not_duplicated(self, runner, mocker: MockerFixture): tool = mocker.MagicMock() param = self.build_param( @@ -141,7 +142,7 @@ class TestUpdatePromptTool: class TestCreateAgentThought: - def test_with_files(self, runner, mock_db_session, mocker): + def test_with_files(self, runner, mock_db_session, mocker: MockerFixture): mock_thought = mocker.MagicMock(id=10) mocker.patch.object(module, "MessageAgentThought", return_value=mock_thought) @@ -149,7 +150,7 @@ class TestCreateAgentThought: assert result == "10" assert runner.agent_thought_count == 1 - def test_without_files(self, runner, mock_db_session, mocker): + def test_without_files(self, runner, mock_db_session, mocker: MockerFixture): mock_thought = mocker.MagicMock(id=11) mocker.patch.object(module, "MessageAgentThought", return_value=mock_thought) @@ -163,7 +164,7 @@ class TestCreateAgentThought: class TestSaveAgentThought: - def setup_agent(self, mocker): + def setup_agent(self, mocker: MockerFixture): agent = mocker.MagicMock() agent.tool = "tool1;tool2" agent.tool_labels = {} @@ -175,7 +176,7 @@ class TestSaveAgentThought: with pytest.raises(ValueError): runner.save_agent_thought("id", None, None, None, None, None, None, [], None) - def test_full_update(self, runner, mock_db_session, mocker): + def test_full_update(self, runner, mock_db_session, mocker: MockerFixture): agent = self.setup_agent(mocker) mock_db_session.scalar.return_value = agent @@ -210,7 +211,7 @@ class TestSaveAgentThought: assert agent.tokens == 3 assert "tool1" in json.loads(agent.tool_labels_str) - def test_label_fallback_when_none(self, runner, mock_db_session, mocker): + def test_label_fallback_when_none(self, runner, mock_db_session, mocker: MockerFixture): agent = self.setup_agent(mocker) agent.tool = "unknown_tool" mock_db_session.scalar.return_value = agent @@ -220,7 +221,7 @@ class TestSaveAgentThought: labels = json.loads(agent.tool_labels_str) assert "unknown_tool" in labels - def test_json_failure_paths(self, runner, mock_db_session, mocker): + def test_json_failure_paths(self, runner, mock_db_session, mocker: MockerFixture): agent = self.setup_agent(mocker) mock_db_session.scalar.return_value = agent @@ -241,13 +242,13 @@ class TestSaveAgentThought: assert mock_db_session.commit.called - def test_messages_ids_none(self, runner, mock_db_session, mocker): + def test_messages_ids_none(self, runner, mock_db_session, mocker: MockerFixture): agent = self.setup_agent(mocker) mock_db_session.scalar.return_value = agent runner.save_agent_thought("id", None, None, None, None, None, None, None, None) assert mock_db_session.commit.called - def test_success_dict_serialization(self, runner, mock_db_session, mocker): + def test_success_dict_serialization(self, runner, mock_db_session, mocker: MockerFixture): agent = self.setup_agent(mocker) mock_db_session.scalar.return_value = agent @@ -273,19 +274,19 @@ class TestSaveAgentThought: class TestOrganizeUserPrompt: - def test_no_files(self, runner, mock_db_session, mocker): + def test_no_files(self, runner, mock_db_session, mocker: MockerFixture): mock_db_session.scalars.return_value.all.return_value = [] msg = mocker.MagicMock(id="1", query="hello", app_model_config=None) result = runner.organize_agent_user_prompt(msg) assert result.content == "hello" - def test_with_files_no_config(self, runner, mock_db_session, mocker): + def test_with_files_no_config(self, runner, mock_db_session, mocker: MockerFixture): mock_db_session.scalars.return_value.all.return_value = [mocker.MagicMock()] msg = mocker.MagicMock(id="1", query="hello", app_model_config=None) result = runner.organize_agent_user_prompt(msg) assert result.content == "hello" - def test_image_detail_low_fallback(self, runner, mock_db_session, mocker): + def test_image_detail_low_fallback(self, runner, mock_db_session, mocker: MockerFixture): mock_db_session.scalars.return_value.all.return_value = [mocker.MagicMock()] file_config = mocker.MagicMock() file_config.image_config = mocker.MagicMock(detail=None) @@ -305,27 +306,27 @@ class TestOrganizeUserPrompt: class TestOrganizeHistory: - def test_empty(self, runner, mock_db_session, mocker): + def test_empty(self, runner, mock_db_session, mocker: MockerFixture): mock_db_session.execute.return_value.scalars.return_value.all.return_value = [] mocker.patch.object(module, "extract_thread_messages", return_value=[]) result = runner.organize_agent_history([]) assert result == [] - def test_with_answer_only(self, runner, mock_db_session, mocker): + def test_with_answer_only(self, runner, mock_db_session, mocker: MockerFixture): msg = mocker.MagicMock(id="m1", answer="ans", agent_thoughts=[], app_model_config=None) mock_db_session.execute.return_value.scalars.return_value.all.return_value = [msg] mocker.patch.object(module, "extract_thread_messages", return_value=[msg]) result = runner.organize_agent_history([]) assert any(isinstance(x, module.AssistantPromptMessage) for x in result) - def test_skip_current_message(self, runner, mock_db_session, mocker): + def test_skip_current_message(self, runner, mock_db_session, mocker: MockerFixture): msg = mocker.MagicMock(id="msg_current", agent_thoughts=[], answer="ans", app_model_config=None) mock_db_session.execute.return_value.scalars.return_value.all.return_value = [msg] mocker.patch.object(module, "extract_thread_messages", return_value=[msg]) result = runner.organize_agent_history([]) assert result == [] - def test_with_tool_calls_invalid_json(self, runner, mock_db_session, mocker): + def test_with_tool_calls_invalid_json(self, runner, mock_db_session, mocker: MockerFixture): thought = mocker.MagicMock( tool="tool1", tool_input="invalid", @@ -341,7 +342,7 @@ class TestOrganizeHistory: result = runner.organize_agent_history([]) assert isinstance(result, list) - def test_empty_tool_name_split(self, runner, mock_db_session, mocker): + def test_empty_tool_name_split(self, runner, mock_db_session, mocker: MockerFixture): thought = mocker.MagicMock(tool=";", thought="thinking") msg = mocker.MagicMock(id="m5", agent_thoughts=[thought], answer=None, app_model_config=None) @@ -350,7 +351,7 @@ class TestOrganizeHistory: result = runner.organize_agent_history([]) assert isinstance(result, list) - def test_valid_json_tool_flow(self, runner, mock_db_session, mocker): + def test_valid_json_tool_flow(self, runner, mock_db_session, mocker: MockerFixture): thought = mocker.MagicMock( tool="tool1", tool_input=json.dumps({"tool1": {"x": 1}}), @@ -379,7 +380,7 @@ class TestOrganizeHistory: class TestConvertToolToPromptMessageTool: - def test_basic_conversion(self, runner, mocker): + def test_basic_conversion(self, runner, mocker: MockerFixture): tool = mocker.MagicMock(tool_name="tool1") runtime_param = mocker.MagicMock() @@ -404,7 +405,7 @@ class TestConvertToolToPromptMessageTool: prompt_tool, entity = runner._convert_tool_to_prompt_message_tool(tool) assert entity == tool_entity - def test_full_conversion_multiple_params(self, runner, mocker): + def test_full_conversion_multiple_params(self, runner, mocker: MockerFixture): tool = mocker.MagicMock(tool_name="tool1") # LLM param with input_schema override @@ -441,7 +442,7 @@ class TestConvertToolToPromptMessageTool: class TestInitPromptToolsExtended: - def test_agent_tool_branch(self, runner, mocker): + def test_agent_tool_branch(self, runner, mocker: MockerFixture): agent_tool = mocker.MagicMock(tool_name="agent_tool") runner.app_config.agent = mocker.MagicMock(tools=[agent_tool]) mocker.patch.object(runner, "_convert_tool_to_prompt_message_tool", return_value=(MagicMock(), "entity")) @@ -449,7 +450,7 @@ class TestInitPromptToolsExtended: tools, prompts = runner._init_prompt_tools() assert "agent_tool" in tools - def test_exception_in_conversion(self, runner, mocker): + def test_exception_in_conversion(self, runner, mocker: MockerFixture): agent_tool = mocker.MagicMock(tool_name="bad_tool") runner.app_config.agent = mocker.MagicMock(tools=[agent_tool]) mocker.patch.object(runner, "_convert_tool_to_prompt_message_tool", side_effect=Exception) @@ -464,7 +465,7 @@ class TestInitPromptToolsExtended: class TestAdditionalCoverage: - def test_update_prompt_with_input_schema(self, runner, mocker): + def test_update_prompt_with_input_schema(self, runner, mocker: MockerFixture): tool = mocker.MagicMock() param = mocker.MagicMock() @@ -487,7 +488,7 @@ class TestAdditionalCoverage: result = runner.update_prompt_message_tool(tool, prompt_tool) assert result.parameters["properties"]["p1"]["type"] == "number" - def test_save_agent_thought_existing_labels(self, runner, mock_db_session, mocker): + def test_save_agent_thought_existing_labels(self, runner, mock_db_session, mocker: MockerFixture): agent = mocker.MagicMock() agent.tool = "tool1" agent.tool_labels = {"tool1": {"en_US": "existing"}} @@ -498,7 +499,7 @@ class TestAdditionalCoverage: labels = json.loads(agent.tool_labels_str) assert labels["tool1"]["en_US"] == "existing" - def test_save_agent_thought_tool_meta_string(self, runner, mock_db_session, mocker): + def test_save_agent_thought_tool_meta_string(self, runner, mock_db_session, mocker: MockerFixture): agent = mocker.MagicMock() agent.tool = "tool1" agent.tool_labels = {} @@ -508,7 +509,7 @@ class TestAdditionalCoverage: runner.save_agent_thought("id", None, None, None, None, "meta_string", None, [], None) assert agent.tool_meta_str == "meta_string" - def test_convert_dataset_retriever_tool(self, runner, mocker): + def test_convert_dataset_retriever_tool(self, runner, mocker: MockerFixture): ds_tool = mocker.MagicMock() ds_tool.entity.identity.name = "ds" ds_tool.entity.description.llm = "desc" @@ -525,7 +526,7 @@ class TestAdditionalCoverage: prompt = runner._convert_dataset_retriever_tool_to_prompt_message_tool(ds_tool) assert prompt is not None - def test_organize_user_prompt_with_file_objects(self, runner, mock_db_session, mocker): + def test_organize_user_prompt_with_file_objects(self, runner, mock_db_session, mocker: MockerFixture): mock_db_session.scalars.return_value.all.return_value = [mocker.MagicMock()] file_config = mocker.MagicMock() @@ -544,7 +545,7 @@ class TestAdditionalCoverage: result = runner.organize_agent_user_prompt(msg) assert result is not None - def test_organize_history_without_tool_names(self, runner, mock_db_session, mocker): + def test_organize_history_without_tool_names(self, runner, mock_db_session, mocker: MockerFixture): thought = mocker.MagicMock(tool=None, thought="thinking") msg = mocker.MagicMock(id="m3", agent_thoughts=[thought], answer=None, app_model_config=None) @@ -554,7 +555,7 @@ class TestAdditionalCoverage: result = runner.organize_agent_history([]) assert isinstance(result, list) - def test_organize_history_multiple_tools_split(self, runner, mock_db_session, mocker): + def test_organize_history_multiple_tools_split(self, runner, mock_db_session, mocker: MockerFixture): thought = mocker.MagicMock( tool="tool1;tool2", tool_input=json.dumps({"tool1": {}, "tool2": {}}), @@ -572,7 +573,7 @@ class TestAdditionalCoverage: # ================= Additional Surgical Coverage ================= - def test_convert_tool_select_enum_branch(self, runner, mocker): + def test_convert_tool_select_enum_branch(self, runner, mocker: MockerFixture): tool = mocker.MagicMock(tool_name="tool1") param = mocker.MagicMock() @@ -599,7 +600,7 @@ class TestAdditionalCoverage: class TestConvertDatasetRetrieverTool: - def test_required_param_added(self, runner, mocker): + def test_required_param_added(self, runner, mocker: MockerFixture): ds_tool = mocker.MagicMock() ds_tool.entity.identity.name = "ds" ds_tool.entity.description.llm = "desc" @@ -619,7 +620,7 @@ class TestConvertDatasetRetrieverTool: class TestBaseAgentRunnerInit: - def test_init_sets_stream_tool_call_and_files(self, mocker): + def test_init_sets_stream_tool_call_and_files(self, mocker: MockerFixture): session = mocker.MagicMock() session.scalar.return_value = 2 mocker.patch.object(module.db, "session", session) @@ -662,7 +663,7 @@ class TestBaseAgentRunnerInit: class TestBaseAgentRunnerCoverage: - def test_convert_tool_skips_non_llm_param(self, runner, mocker): + def test_convert_tool_skips_non_llm_param(self, runner, mocker: MockerFixture): tool = mocker.MagicMock(tool_name="tool1") param = mocker.MagicMock() @@ -680,7 +681,7 @@ class TestBaseAgentRunnerCoverage: assert prompt_tool.parameters["properties"] == {} - def test_init_prompt_tools_adds_dataset_tools(self, runner, mocker): + def test_init_prompt_tools_adds_dataset_tools(self, runner, mocker: MockerFixture): dataset_tool = mocker.MagicMock() dataset_tool.entity.identity.name = "ds" runner.dataset_tools = [dataset_tool] @@ -692,7 +693,7 @@ class TestBaseAgentRunnerCoverage: assert tools["ds"] == dataset_tool assert len(prompt_tools) == 1 - def test_update_prompt_message_tool_select_enum(self, runner, mocker): + def test_update_prompt_message_tool_select_enum(self, runner, mocker: MockerFixture): tool = mocker.MagicMock() option1 = mocker.MagicMock(value="A") @@ -716,7 +717,7 @@ class TestBaseAgentRunnerCoverage: assert result.parameters["properties"]["select_param"]["enum"] == ["A", "B"] - def test_save_agent_thought_json_dumps_fallbacks(self, runner, mock_db_session, mocker): + def test_save_agent_thought_json_dumps_fallbacks(self, runner, mock_db_session, mocker: MockerFixture): agent = mocker.MagicMock() agent.tool = "tool1" agent.tool_labels = {} @@ -754,7 +755,7 @@ class TestBaseAgentRunnerCoverage: assert isinstance(agent.observation, str) assert isinstance(agent.tool_meta_str, str) - def test_save_agent_thought_skips_empty_tool_name(self, runner, mock_db_session, mocker): + def test_save_agent_thought_skips_empty_tool_name(self, runner, mock_db_session, mocker: MockerFixture): agent = mocker.MagicMock() agent.tool = "tool1;;" agent.tool_labels = {} @@ -768,7 +769,7 @@ class TestBaseAgentRunnerCoverage: labels = json.loads(agent.tool_labels_str) assert "" not in labels - def test_organize_history_includes_system_prompt(self, runner, mock_db_session, mocker): + def test_organize_history_includes_system_prompt(self, runner, mock_db_session, mocker: MockerFixture): mock_db_session.execute.return_value.scalars.return_value.all.return_value = [] mocker.patch.object(module, "extract_thread_messages", return_value=[]) @@ -778,7 +779,7 @@ class TestBaseAgentRunnerCoverage: assert system_message in result - def test_organize_history_tool_inputs_and_observation_none(self, runner, mock_db_session, mocker): + def test_organize_history_tool_inputs_and_observation_none(self, runner, mock_db_session, mocker: MockerFixture): thought = mocker.MagicMock( tool="tool1", tool_input=None, diff --git a/api/tests/unit_tests/core/agent/test_cot_agent_runner.py b/api/tests/unit_tests/core/agent/test_cot_agent_runner.py index cde8820e00..314305d371 100644 --- a/api/tests/unit_tests/core/agent/test_cot_agent_runner.py +++ b/api/tests/unit_tests/core/agent/test_cot_agent_runner.py @@ -2,6 +2,7 @@ import json from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.agent.cot_agent_runner import CotAgentRunner from core.agent.entities import AgentScratchpadUnit @@ -25,7 +26,7 @@ class DummyRunner(CotAgentRunner): @pytest.fixture -def runner(mocker): +def runner(mocker: MockerFixture): # Prevent BaseAgentRunner __init__ from hitting database mocker.patch( "core.agent.base_agent_runner.BaseAgentRunner.organize_agent_history", @@ -165,7 +166,7 @@ class TestHandleInvokeAction: response, meta = runner._handle_invoke_action(action, {}, []) assert "there is not a tool named" in response - def test_tool_with_json_string_args(self, runner, mocker): + def test_tool_with_json_string_args(self, runner, mocker: MockerFixture): action = AgentScratchpadUnit.Action(action_name="tool", action_input=json.dumps({"a": 1})) tool_instance = MagicMock() tool_instances = {"tool": tool_instance} @@ -180,7 +181,7 @@ class TestHandleInvokeAction: class TestOrganizeHistoricPromptMessages: - def test_empty_history(self, runner, mocker): + def test_empty_history(self, runner, mocker: MockerFixture): mocker.patch( "core.agent.cot_agent_runner.AgentHistoryPromptTransform.get_prompt", return_value=[], @@ -190,7 +191,7 @@ class TestOrganizeHistoricPromptMessages: class TestRun: - def test_run_handles_empty_parser_output(self, runner, mocker): + def test_run_handles_empty_parser_output(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -202,7 +203,7 @@ class TestRun: results = list(runner.run(message, "query", {})) assert isinstance(results, list) - def test_run_with_action_and_tool_invocation(self, runner, mocker): + def test_run_with_action_and_tool_invocation(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -223,7 +224,7 @@ class TestRun: with pytest.raises(AgentMaxIterationError): list(runner.run(message, "query", {"tool": MagicMock()})) - def test_run_respects_max_iteration_boundary(self, runner, mocker): + def test_run_respects_max_iteration_boundary(self, runner, mocker: MockerFixture): runner.app_config.agent.max_iteration = 1 message = MagicMock() message.id = "msg-id" @@ -245,7 +246,7 @@ class TestRun: with pytest.raises(AgentMaxIterationError): list(runner.run(message, "query", {"tool": MagicMock()})) - def test_run_basic_flow(self, runner, mocker): + def test_run_basic_flow(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -257,7 +258,7 @@ class TestRun: results = list(runner.run(message, "query", {"name": "John"})) assert results - def test_run_max_iteration_error(self, runner, mocker): + def test_run_max_iteration_error(self, runner, mocker: MockerFixture): runner.app_config.agent.max_iteration = 0 message = MagicMock() message.id = "msg-id" @@ -272,7 +273,7 @@ class TestRun: with pytest.raises(AgentMaxIterationError): list(runner.run(message, "query", {})) - def test_run_increase_usage_aggregation(self, runner, mocker): + def test_run_increase_usage_aggregation(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" runner.app_config.agent.max_iteration = 2 @@ -329,7 +330,7 @@ class TestRun: assert final_usage.completion_price == 2 assert final_usage.total_price == 4 - def test_run_when_no_action_branch(self, runner, mocker): + def test_run_when_no_action_branch(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -341,7 +342,7 @@ class TestRun: results = list(runner.run(message, "query", {})) assert results[-1].delta.message.content == "" - def test_run_usage_missing_key_branch(self, runner, mocker): + def test_run_usage_missing_key_branch(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -354,7 +355,7 @@ class TestRun: list(runner.run(message, "query", {})) - def test_run_prompt_tool_update_branch(self, runner, mocker): + def test_run_prompt_tool_update_branch(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -410,7 +411,7 @@ class TestRun: class TestInitReactState: - def test_init_react_state_resets_state(self, runner, mocker): + def test_init_react_state_resets_state(self, runner, mocker: MockerFixture): mocker.patch.object(runner, "_organize_historic_prompt_messages", return_value=["historic"]) runner._agent_scratchpad = ["old"] runner._query = "old" @@ -423,7 +424,7 @@ class TestInitReactState: class TestHandleInvokeActionExtended: - def test_tool_with_invalid_json_string_args(self, runner, mocker): + def test_tool_with_invalid_json_string_args(self, runner, mocker: MockerFixture): action = AgentScratchpadUnit.Action(action_name="tool", action_input="not-json") tool_instance = MagicMock() tool_instances = {"tool": tool_instance} @@ -457,7 +458,7 @@ class TestFillInputsEdgeCases: class TestOrganizeHistoricPromptMessagesExtended: - def test_user_message_flushes_scratchpad(self, runner, mocker): + def test_user_message_flushes_scratchpad(self, runner, mocker: MockerFixture): from graphon.model_runtime.entities.message_entities import UserPromptMessage user_message = UserPromptMessage(content="Hi") @@ -480,7 +481,7 @@ class TestOrganizeHistoricPromptMessagesExtended: with pytest.raises(NotImplementedError): runner._organize_historic_prompt_messages([]) - def test_agent_history_transform_invocation(self, runner, mocker): + def test_agent_history_transform_invocation(self, runner, mocker: MockerFixture): mock_transform = MagicMock() mock_transform.get_prompt.return_value = [] @@ -495,7 +496,7 @@ class TestOrganizeHistoricPromptMessagesExtended: class TestRunAdditionalBranches: - def test_run_with_no_action_final_answer_empty(self, runner, mocker): + def test_run_with_no_action_final_answer_empty(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -507,7 +508,7 @@ class TestRunAdditionalBranches: results = list(runner.run(message, "query", {})) assert any(hasattr(r, "delta") for r in results) - def test_run_with_final_answer_action_string(self, runner, mocker): + def test_run_with_final_answer_action_string(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -521,7 +522,7 @@ class TestRunAdditionalBranches: results = list(runner.run(message, "query", {})) assert results[-1].delta.message.content == "done" - def test_run_with_final_answer_action_dict(self, runner, mocker): + def test_run_with_final_answer_action_dict(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -535,7 +536,7 @@ class TestRunAdditionalBranches: results = list(runner.run(message, "query", {})) assert json.loads(results[-1].delta.message.content) == {"a": 1} - def test_run_with_string_final_answer(self, runner, mocker): + def test_run_with_string_final_answer(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" diff --git a/api/tests/unit_tests/core/agent/test_cot_chat_agent_runner.py b/api/tests/unit_tests/core/agent/test_cot_chat_agent_runner.py index ea8cc8aa86..8e7093fd12 100644 --- a/api/tests/unit_tests/core/agent/test_cot_chat_agent_runner.py +++ b/api/tests/unit_tests/core/agent/test_cot_chat_agent_runner.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock, patch import pytest +from pytest_mock import MockerFixture from core.agent.cot_chat_agent_runner import CotChatAgentRunner from graphon.model_runtime.entities.message_entities import TextPromptMessageContent @@ -55,7 +56,7 @@ def runner(): class TestOrganizeSystemPrompt: - def test_organize_system_prompt_success(self, runner, mocker): + def test_organize_system_prompt_success(self, runner, mocker: MockerFixture): first_prompt = "Instruction: {{instruction}}, Tools: {{tools}}, Names: {{tool_names}}" runner.app_config = DummyAppConfig(DummyAgentConfig(DummyPrompt(first_prompt))) @@ -154,7 +155,7 @@ class TestOrganizeUserQuery: class TestOrganizePromptMessages: - def test_no_scratchpad(self, runner, mocker): + def test_no_scratchpad(self, runner, mocker: MockerFixture): runner.app_config = DummyAppConfig(DummyAgentConfig(DummyPrompt("{{instruction}}"))) runner._organize_system_prompt = MagicMock(return_value="system") runner._organize_user_query = MagicMock(return_value=["query"]) @@ -164,7 +165,7 @@ class TestOrganizePromptMessages: assert "query" in result runner._organize_historic_prompt_messages.assert_called_once() - def test_with_final_scratchpad(self, runner, mocker): + def test_with_final_scratchpad(self, runner, mocker: MockerFixture): runner.app_config = DummyAppConfig(DummyAgentConfig(DummyPrompt("{{instruction}}"))) runner._organize_system_prompt = MagicMock(return_value="system") runner._organize_user_query = MagicMock(return_value=["query"]) @@ -177,7 +178,7 @@ class TestOrganizePromptMessages: combined = "".join([m.content for m in assistant_msgs if isinstance(m.content, str)]) assert "Final Answer: done" in combined - def test_with_thought_action_observation(self, runner, mocker): + def test_with_thought_action_observation(self, runner, mocker: MockerFixture): runner.app_config = DummyAppConfig(DummyAgentConfig(DummyPrompt("{{instruction}}"))) runner._organize_system_prompt = MagicMock(return_value="system") runner._organize_user_query = MagicMock(return_value=["query"]) @@ -197,7 +198,7 @@ class TestOrganizePromptMessages: assert "Action: action" in combined assert "Observation: observe" in combined - def test_multiple_units_mixed(self, runner, mocker): + def test_multiple_units_mixed(self, runner, mocker: MockerFixture): runner.app_config = DummyAppConfig(DummyAgentConfig(DummyPrompt("{{instruction}}"))) runner._organize_system_prompt = MagicMock(return_value="system") runner._organize_user_query = MagicMock(return_value=["query"]) diff --git a/api/tests/unit_tests/core/agent/test_cot_completion_agent_runner.py b/api/tests/unit_tests/core/agent/test_cot_completion_agent_runner.py index 2f5873d865..0d949c357d 100644 --- a/api/tests/unit_tests/core/agent/test_cot_completion_agent_runner.py +++ b/api/tests/unit_tests/core/agent/test_cot_completion_agent_runner.py @@ -1,6 +1,7 @@ import json import pytest +from pytest_mock import MockerFixture from core.agent.cot_completion_agent_runner import CotCompletionAgentRunner from graphon.model_runtime.entities.message_entities import ( @@ -74,7 +75,7 @@ class TestOrganizeInstructionPrompt: class TestOrganizeHistoricPrompt: - def test_with_user_and_assistant_string(self, runner, mocker): + def test_with_user_and_assistant_string(self, runner, mocker: MockerFixture): user_msg = UserPromptMessage(content="Hello") assistant_msg = AssistantPromptMessage(content="Hi there") @@ -89,7 +90,7 @@ class TestOrganizeHistoricPrompt: assert "Question: Hello" in result assert "Hi there" in result - def test_assistant_list_with_text_content(self, runner, mocker): + def test_assistant_list_with_text_content(self, runner, mocker: MockerFixture): text_content = TextPromptMessageContent(data="Partial answer") assistant_msg = AssistantPromptMessage(content=[text_content]) @@ -103,7 +104,7 @@ class TestOrganizeHistoricPrompt: assert "Partial answer" in result - def test_assistant_list_with_non_text_content_ignored(self, runner, mocker): + def test_assistant_list_with_non_text_content_ignored(self, runner, mocker: MockerFixture): non_text_content = ImagePromptMessageContent(format="url", mime_type="image/png") assistant_msg = AssistantPromptMessage(content=[non_text_content]) @@ -116,7 +117,7 @@ class TestOrganizeHistoricPrompt: result = runner._organize_historic_prompt() assert result == "" - def test_empty_history(self, runner, mocker): + def test_empty_history(self, runner, mocker: MockerFixture): mocker.patch.object( runner, "_organize_historic_prompt_messages", @@ -136,7 +137,7 @@ class TestOrganizePromptMessages: def test_full_flow_with_scratchpad( self, runner, - mocker, + mocker: MockerFixture, dummy_app_config_factory, dummy_agent_config_factory, dummy_prompt_entity_factory, @@ -171,7 +172,12 @@ class TestOrganizePromptMessages: assert "Question: What is Python?" in content def test_no_scratchpad( - self, runner, mocker, dummy_app_config_factory, dummy_agent_config_factory, dummy_prompt_entity_factory + self, + runner, + mocker: MockerFixture, + dummy_app_config_factory, + dummy_agent_config_factory, + dummy_prompt_entity_factory, ): template = "SYS {{historic_messages}} {{agent_scratchpad}} {{query}}" @@ -198,7 +204,7 @@ class TestOrganizePromptMessages: def test_partial_scratchpad_units( self, runner, - mocker, + mocker: MockerFixture, thought, action, observation, diff --git a/api/tests/unit_tests/core/agent/test_fc_agent_runner.py b/api/tests/unit_tests/core/agent/test_fc_agent_runner.py index 17ab5babcb..3a4347e723 100644 --- a/api/tests/unit_tests/core/agent/test_fc_agent_runner.py +++ b/api/tests/unit_tests/core/agent/test_fc_agent_runner.py @@ -3,6 +3,7 @@ from typing import Any from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.agent.errors import AgentMaxIterationError from core.agent.fc_agent_runner import FunctionCallAgentRunner @@ -68,7 +69,7 @@ class DummyResult: @pytest.fixture -def runner(mocker): +def runner(mocker: MockerFixture): # Completely bypass BaseAgentRunner __init__ to avoid DB / Flask context mocker.patch( "core.agent.base_agent_runner.BaseAgentRunner.__init__", @@ -230,7 +231,7 @@ class TestOrganizeUserQuery: result = runner._organize_user_query(None, []) assert len(result) == 1 - def test_with_files_uses_image_detail_config(self, runner, mocker): + def test_with_files_uses_image_detail_config(self, runner, mocker: MockerFixture): file_content = TextPromptMessageContent(data="file-content") mock_to_prompt = mocker.patch( "core.agent.fc_agent_runner.file_manager.to_prompt_message_content", @@ -352,7 +353,7 @@ class TestRunMethod: assert len(outputs) == 1 assert runner.save_agent_thought.call_args.kwargs["thought"] == "hi" - def test_run_streaming_tool_call_inputs_type_error(self, runner, mocker): + def test_run_streaming_tool_call_inputs_type_error(self, runner, mocker: MockerFixture): message = MagicMock(id="m1") runner.stream_tool_call = True @@ -398,7 +399,7 @@ class TestRunMethod: outputs = list(runner.run(message, "query")) assert len(outputs) >= 1 - def test_run_with_tool_instance_and_files(self, runner, mocker): + def test_run_with_tool_instance_and_files(self, runner, mocker: MockerFixture): message = MagicMock(id="m1") tool_call = MagicMock() diff --git a/api/tests/unit_tests/core/agent/test_plugin_entities.py b/api/tests/unit_tests/core/agent/test_plugin_entities.py index 9955190aca..aa3098a2a1 100644 --- a/api/tests/unit_tests/core/agent/test_plugin_entities.py +++ b/api/tests/unit_tests/core/agent/test_plugin_entities.py @@ -9,6 +9,7 @@ mocking; ensure entity invariants and validation rules remain stable. import pytest from pydantic import ValidationError +from pytest_mock import MockerFixture from core.agent.plugin_entities import ( AgentFeature, @@ -28,12 +29,12 @@ from core.tools.entities.tool_entities import ToolIdentity, ToolProviderIdentity @pytest.fixture -def mock_identity(mocker): +def mock_identity(mocker: MockerFixture): return mocker.MagicMock(spec=AgentStrategyIdentity) @pytest.fixture -def mock_provider_identity(mocker): +def mock_provider_identity(mocker: MockerFixture): return mocker.MagicMock(spec=AgentStrategyProviderIdentity) @@ -47,7 +48,7 @@ class TestAgentStrategyParameterType: "enum_member", list(AgentStrategyParameter.AgentStrategyParameterType), ) - def test_as_normal_type_calls_external_function(self, mocker, enum_member) -> None: + def test_as_normal_type_calls_external_function(self, mocker: MockerFixture, enum_member) -> None: mock_func = mocker.patch( "core.agent.plugin_entities.as_normal_type", return_value="normalized", @@ -58,7 +59,7 @@ class TestAgentStrategyParameterType: mock_func.assert_called_once_with(enum_member) assert result == "normalized" - def test_as_normal_type_propagates_exception(self, mocker) -> None: + def test_as_normal_type_propagates_exception(self, mocker: MockerFixture) -> None: enum_member = AgentStrategyParameter.AgentStrategyParameterType.STRING mocker.patch( "core.agent.plugin_entities.as_normal_type", @@ -79,7 +80,7 @@ class TestAgentStrategyParameterType: (AgentStrategyParameter.AgentStrategyParameterType.FILES, []), ], ) - def test_cast_value_calls_external_function(self, mocker, enum_member, value) -> None: + def test_cast_value_calls_external_function(self, mocker: MockerFixture, enum_member, value) -> None: mock_func = mocker.patch( "core.agent.plugin_entities.cast_parameter_value", return_value="casted", @@ -90,7 +91,7 @@ class TestAgentStrategyParameterType: mock_func.assert_called_once_with(enum_member, value) assert result == "casted" - def test_cast_value_propagates_exception(self, mocker) -> None: + def test_cast_value_propagates_exception(self, mocker: MockerFixture) -> None: enum_member = AgentStrategyParameter.AgentStrategyParameterType.STRING mocker.patch( "core.agent.plugin_entities.cast_parameter_value", @@ -136,7 +137,7 @@ class TestAgentStrategyParameter: assert any(error["loc"] == ("type",) for error in exc_info.value.errors()) - def test_init_frontend_parameter_calls_external(self, mocker) -> None: + def test_init_frontend_parameter_calls_external(self, mocker: MockerFixture) -> None: mock_func = mocker.patch( "core.agent.plugin_entities.init_frontend_parameter", return_value="frontend", @@ -153,7 +154,7 @@ class TestAgentStrategyParameter: mock_func.assert_called_once_with(param, param.type, "value") assert result == "frontend" - def test_init_frontend_parameter_propagates_exception(self, mocker) -> None: + def test_init_frontend_parameter_propagates_exception(self, mocker: MockerFixture) -> None: mocker.patch( "core.agent.plugin_entities.init_frontend_parameter", side_effect=RuntimeError("error"), diff --git a/api/tests/unit_tests/core/app/app_config/common/test_parameters_mapping.py b/api/tests/unit_tests/core/app/app_config/common/test_parameters_mapping.py index 1c5b6ed944..6dbf301f65 100644 --- a/api/tests/unit_tests/core/app/app_config/common/test_parameters_mapping.py +++ b/api/tests/unit_tests/core/app/app_config/common/test_parameters_mapping.py @@ -10,7 +10,7 @@ class TestGetParametersFromFeatureDict: """Test suite for get_parameters_from_feature_dict""" @pytest.fixture - def mock_config(self, monkeypatch): + def mock_config(self, monkeypatch: pytest.MonkeyPatch): """Mock dify_config values""" mock = MagicMock() mock.UPLOAD_IMAGE_FILE_SIZE_LIMIT = 1 @@ -23,7 +23,7 @@ class TestGetParametersFromFeatureDict: return mock @pytest.fixture - def mock_default_file_limits(self, monkeypatch): + def mock_default_file_limits(self, monkeypatch: pytest.MonkeyPatch): """Mock DEFAULT_FILE_NUMBER_LIMITS constant""" monkeypatch.setattr(parameters_mapping, "DEFAULT_FILE_NUMBER_LIMITS", 99) return 99 diff --git a/api/tests/unit_tests/core/app/app_config/common/test_sensitive_word_avoidance_manager.py b/api/tests/unit_tests/core/app/app_config/common/test_sensitive_word_avoidance_manager.py index 013ed0cbc4..bd4ca5ff85 100644 --- a/api/tests/unit_tests/core/app/app_config/common/test_sensitive_word_avoidance_manager.py +++ b/api/tests/unit_tests/core/app/app_config/common/test_sensitive_word_avoidance_manager.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.app.app_config.common.sensitive_word_avoidance.manager import ( SensitiveWordAvoidanceConfigManager, @@ -26,7 +27,7 @@ class TestSensitiveWordAvoidanceConfigManagerConvert: # Assert assert result is None - def test_convert_returns_entity_when_enabled(self, mocker): + def test_convert_returns_entity_when_enabled(self, mocker: MockerFixture): # Arrange mock_entity = MagicMock() mocker.patch( @@ -48,7 +49,7 @@ class TestSensitiveWordAvoidanceConfigManagerConvert: # Assert assert result == mock_entity - def test_convert_enabled_without_type_or_config(self, mocker): + def test_convert_enabled_without_type_or_config(self, mocker: MockerFixture): # Arrange mock_entity = MagicMock() patched = mocker.patch( @@ -135,7 +136,7 @@ class TestSensitiveWordAvoidanceConfigManagerValidateAndSetDefaults: with pytest.raises(ValueError, match="must be a dict"): SensitiveWordAvoidanceConfigManager.validate_and_set_defaults(tenant_id="tenant1", config=config) - def test_validate_calls_moderation_factory(self, mocker): + def test_validate_calls_moderation_factory(self, mocker: MockerFixture): # Arrange mock_validate = mocker.patch( "core.app.app_config.common.sensitive_word_avoidance.manager.ModerationFactory.validate_config" @@ -159,7 +160,7 @@ class TestSensitiveWordAvoidanceConfigManagerValidateAndSetDefaults: assert result_config["sensitive_word_avoidance"]["enabled"] is True assert fields == ["sensitive_word_avoidance"] - def test_validate_sets_empty_dict_when_config_none(self, mocker): + def test_validate_sets_empty_dict_when_config_none(self, mocker: MockerFixture): # Arrange mock_validate = mocker.patch( "core.app.app_config.common.sensitive_word_avoidance.manager.ModerationFactory.validate_config" @@ -179,7 +180,7 @@ class TestSensitiveWordAvoidanceConfigManagerValidateAndSetDefaults: # Assert mock_validate.assert_called_once_with(name="mock_type", tenant_id="tenant1", config={}) - def test_validate_only_structure_validate_skips_factory(self, mocker): + def test_validate_only_structure_validate_skips_factory(self, mocker: MockerFixture): # Arrange mock_validate = mocker.patch( "core.app.app_config.common.sensitive_word_avoidance.manager.ModerationFactory.validate_config" diff --git a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_agent_manager.py b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_agent_manager.py index 992b580376..359b04070b 100644 --- a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_agent_manager.py +++ b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_agent_manager.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.app.app_config.easy_ui_based_app.agent.manager import AgentConfigManager @@ -84,7 +85,7 @@ class TestAgentConfigManagerConvert: assert result.strategy.name == "CHAIN_OF_THOUGHT" - def test_convert_skips_disabled_tools(self, mocker, base_config): + def test_convert_skips_disabled_tools(self, mocker: MockerFixture, base_config): # Patch AgentEntity to bypass pydantic validation mock_agent_entity = mocker.patch( "core.app.app_config.easy_ui_based_app.agent.manager.AgentEntity", @@ -128,7 +129,7 @@ class TestAgentConfigManagerConvert: mock_validate.assert_called_once() mock_agent_entity.assert_called_once() - def test_convert_tool_requires_minimum_keys(self, mocker, base_config): + def test_convert_tool_requires_minimum_keys(self, mocker: MockerFixture, base_config): mock_validate = mocker.patch( "core.app.app_config.easy_ui_based_app.agent.manager.AgentToolEntity.model_validate", return_value=MagicMock(), diff --git a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_dataset_manager.py b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_dataset_manager.py index a688e2a5c5..3a239eac0e 100644 --- a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_dataset_manager.py +++ b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_dataset_manager.py @@ -2,6 +2,7 @@ import uuid from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.app.app_config.easy_ui_based_app.dataset.manager import DatasetConfigManager from core.entities.agent_entities import PlanningStrategy @@ -69,7 +70,7 @@ class TestDatasetConfigManagerConvert: assert result.dataset_ids == [valid_uuid] assert result.retrieve_config.query_variable == "query" - def test_convert_single_with_metadata_configs(self, valid_uuid, mocker): + def test_convert_single_with_metadata_configs(self, valid_uuid, mocker: MockerFixture): mock_retrieve_config = MagicMock() mock_entity = MagicMock() mock_entity.dataset_ids = [valid_uuid] @@ -258,7 +259,7 @@ class TestExtractDatasetConfig: with pytest.raises(ValueError): DatasetConfigManager.extract_dataset_config_for_legacy_compatibility("tenant1", AppMode.CHAT, config) - def test_extract_invalid_uuid(self, mocker): + def test_extract_invalid_uuid(self, mocker: MockerFixture): invalid_uuid = "not-a-uuid" config = { "agent_mode": { @@ -270,7 +271,7 @@ class TestExtractDatasetConfig: with pytest.raises(ValueError): DatasetConfigManager.extract_dataset_config_for_legacy_compatibility("tenant1", AppMode.CHAT, config) - def test_extract_dataset_not_exists(self, valid_uuid, mocker): + def test_extract_dataset_not_exists(self, valid_uuid, mocker: MockerFixture): mocker.patch( "core.app.app_config.easy_ui_based_app.dataset.manager.DatasetService.get_dataset", return_value=None, @@ -292,7 +293,7 @@ class TestExtractDatasetConfig: class TestIsDatasetExists: - def test_dataset_exists_true(self, mocker, valid_uuid): + def test_dataset_exists_true(self, mocker: MockerFixture, valid_uuid): mock_dataset = MagicMock() mock_dataset.tenant_id = "tenant1" mocker.patch( @@ -302,14 +303,14 @@ class TestIsDatasetExists: assert DatasetConfigManager.is_dataset_exists("tenant1", valid_uuid) - def test_dataset_exists_false_when_not_found(self, mocker, valid_uuid): + def test_dataset_exists_false_when_not_found(self, mocker: MockerFixture, valid_uuid): mocker.patch( "core.app.app_config.easy_ui_based_app.dataset.manager.DatasetService.get_dataset", return_value=None, ) assert not DatasetConfigManager.is_dataset_exists("tenant1", valid_uuid) - def test_dataset_exists_false_when_tenant_mismatch(self, mocker, valid_uuid): + def test_dataset_exists_false_when_tenant_mismatch(self, mocker: MockerFixture, valid_uuid): mock_dataset = MagicMock() mock_dataset.tenant_id = "other" mocker.patch( diff --git a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_converter.py b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_converter.py index 186b4a501d..e5b581b6a0 100644 --- a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_converter.py +++ b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_converter.py @@ -2,6 +2,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.app.app_config.easy_ui_based_app.model_config.converter import ModelConfigConverter from core.entities.model_entities import ModelStatus @@ -16,7 +17,7 @@ from graphon.model_runtime.entities.model_entities import ModelPropertyKey class TestModelConfigConverter: @pytest.fixture(autouse=True) - def patch_response_entity(self, mocker): + def patch_response_entity(self, mocker: MockerFixture): """ Patch ModelConfigWithCredentialsEntity to bypass Pydantic validation and return a simple namespace object instead. @@ -69,7 +70,7 @@ class TestModelConfigConverter: return bundle @pytest.fixture - def patch_provider_manager(self, mocker, mock_provider_bundle): + def patch_provider_manager(self, mocker: MockerFixture, mock_provider_bundle): mock_manager = MagicMock() mock_manager.get_provider_model_bundle.return_value = mock_provider_bundle mocker.patch( @@ -99,7 +100,7 @@ class TestModelConfigConverter: assert result.parameters == {"temperature": 0.7} assert result.stop == ["\n"] - def test_convert_mode_from_schema_valid(self, mock_app_config, mock_provider_bundle, mocker): + def test_convert_mode_from_schema_valid(self, mock_app_config, mock_provider_bundle, mocker: MockerFixture): mock_app_config.model.mode = None mock_provider_bundle.model_type_instance.get_model_schema.return_value.model_properties = { @@ -116,7 +117,9 @@ class TestModelConfigConverter: result = ModelConfigConverter.convert(mock_app_config) assert result.mode == LLMMode.COMPLETION - def test_convert_mode_from_schema_invalid_fallback(self, mock_app_config, mock_provider_bundle, mocker): + def test_convert_mode_from_schema_invalid_fallback( + self, mock_app_config, mock_provider_bundle, mocker: MockerFixture + ): mock_provider_bundle.model_type_instance.get_model_schema.return_value.model_properties = { ModelPropertyKey.MODE: "invalid" } @@ -135,7 +138,7 @@ class TestModelConfigConverter: # Credential Errors # ============================= - def test_convert_credentials_none_raises(self, mock_app_config, mock_provider_bundle, mocker): + def test_convert_credentials_none_raises(self, mock_app_config, mock_provider_bundle, mocker: MockerFixture): mock_provider_bundle.configuration.get_current_credentials.return_value = None mock_manager = MagicMock() @@ -152,7 +155,7 @@ class TestModelConfigConverter: # Provider Model Errors # ============================= - def test_convert_provider_model_none_raises(self, mock_app_config, mock_provider_bundle, mocker): + def test_convert_provider_model_none_raises(self, mock_app_config, mock_provider_bundle, mocker: MockerFixture): mock_provider_bundle.configuration.get_provider_model.return_value = None mock_manager = MagicMock() @@ -174,7 +177,7 @@ class TestModelConfigConverter: ], ) def test_convert_provider_model_status_errors( - self, mock_app_config, mock_provider_bundle, mocker, status, expected_exception + self, mock_app_config, mock_provider_bundle, mocker: MockerFixture, status, expected_exception ): mock_provider = MagicMock() mock_provider.status = status @@ -194,7 +197,7 @@ class TestModelConfigConverter: # Schema Errors # ============================= - def test_convert_model_schema_none_raises(self, mock_app_config, mock_provider_bundle, mocker): + def test_convert_model_schema_none_raises(self, mock_app_config, mock_provider_bundle, mocker: MockerFixture): mock_provider_bundle.model_type_instance.get_model_schema.return_value = None mock_manager = MagicMock() diff --git a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_manager.py b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_manager.py index 68bca485bb..72e334004e 100644 --- a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_manager.py +++ b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_manager.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture # Target from core.app.app_config.easy_ui_based_app.model_config.manager import ModelConfigManager @@ -107,7 +108,9 @@ class TestModelConfigManager: # validate_and_set_defaults # ========================================================== - def test_validate_and_set_defaults_success(self, mocker, valid_config, provider_entities, valid_model_list): + def test_validate_and_set_defaults_success( + self, mocker: MockerFixture, valid_config, provider_entities, valid_model_list + ): self._patch_model_assembly( mocker, provider_entities=provider_entities, @@ -127,35 +130,37 @@ class TestModelConfigManager: with pytest.raises(ValueError, match="object type"): ModelConfigManager.validate_and_set_defaults("tenant1", {"model": "invalid"}) - def test_validate_and_set_defaults_missing_provider(self, mocker, provider_entities): + def test_validate_and_set_defaults_missing_provider(self, mocker: MockerFixture, provider_entities): config = {"model": {"name": "gpt-4", "completion_params": {}}} self._patch_model_assembly(mocker, provider_entities=provider_entities, model_list=[]) with pytest.raises(ValueError, match="model.provider is required"): ModelConfigManager.validate_and_set_defaults("tenant1", config) - def test_validate_and_set_defaults_invalid_provider(self, mocker, provider_entities): + def test_validate_and_set_defaults_invalid_provider(self, mocker: MockerFixture, provider_entities): config = {"model": {"provider": "invalid/provider", "name": "gpt-4", "completion_params": {}}} self._patch_model_assembly(mocker, provider_entities=provider_entities, model_list=[]) with pytest.raises(ValueError, match="model.provider is required"): ModelConfigManager.validate_and_set_defaults("tenant1", config) - def test_validate_and_set_defaults_missing_name(self, mocker, provider_entities): + def test_validate_and_set_defaults_missing_name(self, mocker: MockerFixture, provider_entities): config = {"model": {"provider": "openai/gpt", "completion_params": {}}} self._patch_model_assembly(mocker, provider_entities=provider_entities, model_list=[]) with pytest.raises(ValueError, match="model.name is required"): ModelConfigManager.validate_and_set_defaults("tenant1", config) - def test_validate_and_set_defaults_empty_models(self, mocker, provider_entities): + def test_validate_and_set_defaults_empty_models(self, mocker: MockerFixture, provider_entities): config = {"model": {"provider": "openai/gpt", "name": "gpt-4", "completion_params": {}}} self._patch_model_assembly(mocker, provider_entities=provider_entities, model_list=[]) with pytest.raises(ValueError, match="must be in the specified model list"): ModelConfigManager.validate_and_set_defaults("tenant1", config) - def test_validate_and_set_defaults_invalid_model_name(self, mocker, provider_entities, valid_model_list): + def test_validate_and_set_defaults_invalid_model_name( + self, mocker: MockerFixture, provider_entities, valid_model_list + ): config = {"model": {"provider": "openai/gpt", "name": "invalid", "completion_params": {}}} self._patch_model_assembly( mocker, @@ -166,7 +171,7 @@ class TestModelConfigManager: with pytest.raises(ValueError, match="must be in the specified model list"): ModelConfigManager.validate_and_set_defaults("tenant1", config) - def test_validate_and_set_defaults_default_mode_when_missing(self, mocker, provider_entities): + def test_validate_and_set_defaults_default_mode_when_missing(self, mocker: MockerFixture, provider_entities): model = MagicMock() model.model = "gpt-4" model.model_properties = {} @@ -178,7 +183,9 @@ class TestModelConfigManager: assert updated_config["model"]["mode"] == "completion" - def test_validate_and_set_defaults_missing_completion_params(self, mocker, provider_entities, valid_model_list): + def test_validate_and_set_defaults_missing_completion_params( + self, mocker: MockerFixture, provider_entities, valid_model_list + ): config = {"model": {"provider": "openai/gpt", "name": "gpt-4"}} self._patch_model_assembly( mocker, @@ -189,7 +196,7 @@ class TestModelConfigManager: with pytest.raises(ValueError, match="completion_params is required"): ModelConfigManager.validate_and_set_defaults("tenant1", config) - def test_validate_and_set_defaults_provider_without_slash_converted(self, mocker, valid_model_list): + def test_validate_and_set_defaults_provider_without_slash_converted(self, mocker: MockerFixture, valid_model_list): """ Covers branch where provider does not contain '/' and ModelProviderID conversion is triggered (line 64). diff --git a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_prompt_template_manager.py b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_prompt_template_manager.py index fd49072cd5..62e1d22129 100644 --- a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_prompt_template_manager.py +++ b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_prompt_template_manager.py @@ -1,6 +1,8 @@ +from collections import UserString from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.app.app_config.easy_ui_based_app.prompt_template.manager import ( PromptTemplateConfigManager, @@ -11,21 +13,25 @@ from core.app.app_config.easy_ui_based_app.prompt_template.manager import ( # ----------------------------- -class DummyEnumValue: +class DummyEnumValue(UserString): def __init__(self, value): + super().__init__(value) self.value = value class DummyPromptType: def __init__(self): - self.SIMPLE = "simple" - self.ADVANCED = "advanced" + self.SIMPLE = DummyEnumValue("simple") + self.ADVANCED = DummyEnumValue("advanced") def value_of(self, value): - return value + for enum_value in self: + if enum_value.value == value: + return enum_value + raise ValueError(f"invalid prompt type value {value}") def __iter__(self): - return iter([DummyEnumValue("simple"), DummyEnumValue("advanced")]) + return iter([self.SIMPLE, self.ADVANCED]) # ----------------------------- @@ -38,7 +44,7 @@ class TestPromptTemplateConfigManagerConvert: with pytest.raises(ValueError, match="prompt_type is required"): PromptTemplateConfigManager.convert({}) - def test_convert_simple_prompt(self, mocker): + def test_convert_simple_prompt(self, mocker: MockerFixture): mock_prompt_entity_cls = MagicMock() mock_prompt_entity_cls.PromptType = DummyPromptType() @@ -56,7 +62,7 @@ class TestPromptTemplateConfigManagerConvert: assert result == "simple_entity" mock_prompt_entity_cls.assert_called_once_with(prompt_type="simple", simple_prompt_template="hello") - def test_convert_advanced_chat_valid(self, mocker): + def test_convert_advanced_chat_valid(self, mocker: MockerFixture): mock_prompt_entity_cls = MagicMock() mock_prompt_entity_cls.PromptType = DummyPromptType() mock_prompt_entity_cls.return_value = "advanced_entity" @@ -97,7 +103,7 @@ class TestPromptTemplateConfigManagerConvert: {"text": "hi", "role": 123}, ], ) - def test_convert_advanced_invalid_message_fields(self, mocker, message): + def test_convert_advanced_invalid_message_fields(self, mocker: MockerFixture, message): mock_prompt_entity_cls = MagicMock() mock_prompt_entity_cls.PromptType = DummyPromptType() @@ -114,7 +120,7 @@ class TestPromptTemplateConfigManagerConvert: with pytest.raises(ValueError): PromptTemplateConfigManager.convert(config) - def test_convert_advanced_completion_with_roles(self, mocker): + def test_convert_advanced_completion_with_roles(self, mocker: MockerFixture): mock_prompt_entity_cls = MagicMock() mock_prompt_entity_cls.PromptType = DummyPromptType() mock_prompt_entity_cls.return_value = "advanced_entity" @@ -154,7 +160,7 @@ class TestValidateAndSetDefaults: def setup_method(self): self.valid_model = {"mode": "chat"} - def _patch_prompt_type(self, mocker): + def _patch_prompt_type(self, mocker: MockerFixture): mock_prompt_entity_cls = MagicMock() mock_prompt_entity_cls.PromptType = DummyPromptType() mocker.patch( @@ -163,7 +169,7 @@ class TestValidateAndSetDefaults: ) return mock_prompt_entity_cls - def test_default_prompt_type_set(self, mocker): + def test_default_prompt_type_set(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = {"model": self.valid_model} @@ -173,7 +179,7 @@ class TestValidateAndSetDefaults: assert result["prompt_type"] == "simple" assert isinstance(keys, list) - def test_invalid_prompt_type_raises(self, mocker): + def test_invalid_prompt_type_raises(self, mocker: MockerFixture): class InvalidEnum(DummyPromptType): def __iter__(self): return iter([DummyEnumValue("valid")]) @@ -191,7 +197,7 @@ class TestValidateAndSetDefaults: with pytest.raises(ValueError): PromptTemplateConfigManager.validate_and_set_defaults("chat_app", config) - def test_invalid_chat_prompt_config_type(self, mocker): + def test_invalid_chat_prompt_config_type(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = { @@ -203,7 +209,7 @@ class TestValidateAndSetDefaults: with pytest.raises(ValueError): PromptTemplateConfigManager.validate_and_set_defaults("chat_app", config) - def test_simple_mode_invalid_pre_prompt_type(self, mocker): + def test_simple_mode_invalid_pre_prompt_type(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = { @@ -215,7 +221,7 @@ class TestValidateAndSetDefaults: with pytest.raises(ValueError): PromptTemplateConfigManager.validate_and_set_defaults("chat_app", config) - def test_advanced_requires_one_config(self, mocker): + def test_advanced_requires_one_config(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = { @@ -228,7 +234,7 @@ class TestValidateAndSetDefaults: with pytest.raises(ValueError): PromptTemplateConfigManager.validate_and_set_defaults("chat_app", config) - def test_advanced_invalid_model_mode(self, mocker): + def test_advanced_invalid_model_mode(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = { @@ -240,7 +246,7 @@ class TestValidateAndSetDefaults: with pytest.raises(ValueError): PromptTemplateConfigManager.validate_and_set_defaults("chat_app", config) - def test_advanced_chat_prompt_length_exceeds(self, mocker): + def test_advanced_chat_prompt_length_exceeds(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = { @@ -252,7 +258,7 @@ class TestValidateAndSetDefaults: with pytest.raises(ValueError): PromptTemplateConfigManager.validate_and_set_defaults("chat_app", config) - def test_completion_prefix_defaults_set_when_empty(self, mocker): + def test_completion_prefix_defaults_set_when_empty(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = { diff --git a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_variables_manager.py b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_variables_manager.py index d9fe7004ff..b82417cfed 100644 --- a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_variables_manager.py +++ b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_variables_manager.py @@ -1,4 +1,5 @@ import pytest +from pytest_mock import MockerFixture from core.app.app_config.easy_ui_based_app.variables.manager import ( BasicVariablesConfigManager, @@ -15,7 +16,7 @@ class TestBasicVariablesConfigManagerConvert: assert variables == [] assert external == [] - def test_convert_external_data_tools_enabled_and_disabled(self, mocker): + def test_convert_external_data_tools_enabled_and_disabled(self, mocker: MockerFixture): config = { "external_data_tools": [ {"enabled": False}, @@ -232,7 +233,7 @@ class TestValidateExternalDataToolsAndSetDefaults: with pytest.raises(ValueError): BasicVariablesConfigManager.validate_external_data_tools_and_set_defaults("tenant", config) - def test_validate_disabled_tool_skipped(self, mocker): + def test_validate_disabled_tool_skipped(self, mocker: MockerFixture): config = {"external_data_tools": [{"enabled": False}]} spy = mocker.patch( @@ -250,7 +251,7 @@ class TestValidateExternalDataToolsAndSetDefaults: with pytest.raises(ValueError): BasicVariablesConfigManager.validate_external_data_tools_and_set_defaults("tenant", config) - def test_validate_enabled_tool_calls_factory(self, mocker): + def test_validate_enabled_tool_calls_factory(self, mocker: MockerFixture): config = {"external_data_tools": [{"enabled": True, "type": "tool", "config": {"a": 1}}]} spy = mocker.patch( @@ -263,7 +264,7 @@ class TestValidateExternalDataToolsAndSetDefaults: class TestValidateAndSetDefaultsIntegration: - def test_validate_and_set_defaults_calls_both(self, mocker): + def test_validate_and_set_defaults_calls_both(self, mocker: MockerFixture): config = {} spy_var = mocker.patch.object( diff --git a/api/tests/unit_tests/core/app/app_config/test_base_app_config_manager.py b/api/tests/unit_tests/core/app/app_config/test_base_app_config_manager.py index e99852cf76..e2ab3e2192 100644 --- a/api/tests/unit_tests/core/app/app_config/test_base_app_config_manager.py +++ b/api/tests/unit_tests/core/app/app_config/test_base_app_config_manager.py @@ -2,6 +2,7 @@ from collections import UserDict from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.app.app_config.base_app_config_manager import BaseAppConfigManager @@ -12,7 +13,7 @@ class TestBaseAppConfigManager: return {"key": "value", "another": 123} @pytest.fixture - def mock_app_additional_features(self, mocker): + def mock_app_additional_features(self, mocker: MockerFixture): mock_instance = MagicMock() mocker.patch( "core.app.app_config.base_app_config_manager.AppAdditionalFeatures", @@ -21,7 +22,7 @@ class TestBaseAppConfigManager: return mock_instance @pytest.fixture - def mock_managers(self, mocker): + def mock_managers(self, mocker: MockerFixture): retrieval = mocker.patch( "core.app.app_config.base_app_config_manager.RetrievalResourceConfigManager.convert", return_value="retrieval_result", @@ -72,7 +73,7 @@ class TestBaseAppConfigManager: ) def test_convert_features_all_modes( self, - mocker, + mocker: MockerFixture, mock_config_dict, mock_app_additional_features, mock_managers, @@ -107,7 +108,7 @@ class TestBaseAppConfigManager: mock_managers["speech_to_text"].assert_called_once_with(config=dict(mock_config_dict.items())) mock_managers["text_to_speech"].assert_called_once_with(config=dict(mock_config_dict.items())) - def test_convert_features_empty_config(self, mocker, mock_app_additional_features, mock_managers): + def test_convert_features_empty_config(self, mocker: MockerFixture, mock_app_additional_features, mock_managers): # Arrange empty_config = {} mock_app_mode = MagicMock() @@ -143,7 +144,7 @@ class TestBaseAppConfigManager: with pytest.raises((TypeError, AttributeError)): BaseAppConfigManager.convert_features(invalid_config, "CHAT") - def test_convert_features_manager_exception_propagates(self, mocker, mock_config_dict): + def test_convert_features_manager_exception_propagates(self, mocker: MockerFixture, mock_config_dict): # Arrange mocker.patch( "core.app.app_config.base_app_config_manager.RetrievalResourceConfigManager.convert", @@ -154,7 +155,9 @@ class TestBaseAppConfigManager: with pytest.raises(RuntimeError): BaseAppConfigManager.convert_features(mock_config_dict, "CHAT") - def test_convert_features_mapping_subclass(self, mocker, mock_app_additional_features, mock_managers): + def test_convert_features_mapping_subclass( + self, mocker: MockerFixture, mock_app_additional_features, mock_managers + ): # Arrange class CustomMapping(UserDict): pass diff --git a/api/tests/unit_tests/core/app/app_config/workflow_ui_based_app/test_workflow_ui_based_app_manager.py b/api/tests/unit_tests/core/app/app_config/workflow_ui_based_app/test_workflow_ui_based_app_manager.py index fa128aca87..dacd69a578 100644 --- a/api/tests/unit_tests/core/app/app_config/workflow_ui_based_app/test_workflow_ui_based_app_manager.py +++ b/api/tests/unit_tests/core/app/app_config/workflow_ui_based_app/test_workflow_ui_based_app_manager.py @@ -1,4 +1,5 @@ import pytest +from pytest_mock import MockerFixture from core.app.app_config.workflow_ui_based_app.variables.manager import ( WorkflowVariablesConfigManager, @@ -10,19 +11,19 @@ from core.app.app_config.workflow_ui_based_app.variables.manager import ( @pytest.fixture -def mock_workflow(mocker): +def mock_workflow(mocker: MockerFixture): workflow = mocker.MagicMock() workflow.graph_dict = {"nodes": []} return workflow @pytest.fixture -def mock_variable_entity(mocker): +def mock_variable_entity(mocker: MockerFixture): return mocker.patch("core.app.app_config.workflow_ui_based_app.variables.manager.VariableEntity") @pytest.fixture -def mock_rag_entity(mocker): +def mock_rag_entity(mocker: MockerFixture): return mocker.patch("core.app.app_config.workflow_ui_based_app.variables.manager.RagPipelineVariableEntity") diff --git a/api/tests/unit_tests/core/app/apps/advanced_chat/test_app_generator.py b/api/tests/unit_tests/core/app/apps/advanced_chat/test_app_generator.py index af5d203f12..bc3b06cd1b 100644 --- a/api/tests/unit_tests/core/app/apps/advanced_chat/test_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/advanced_chat/test_app_generator.py @@ -111,7 +111,7 @@ class TestAdvancedChatAppGeneratorInternals: workflow_id="workflow-id", ) - def test_generate_loads_conversation_and_files(self, monkeypatch): + def test_generate_loads_conversation_and_files(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() app_config = self._build_app_config() @@ -195,7 +195,7 @@ class TestAdvancedChatAppGeneratorInternals: assert captured["application_generate_entity"].files == built_files assert build_files_called["called"] is True - def test_resume_delegates_to_generate(self, monkeypatch): + def test_resume_delegates_to_generate(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() application_generate_entity = AdvancedChatAppGenerateEntity.model_construct( task_id="task", @@ -235,7 +235,7 @@ class TestAdvancedChatAppGeneratorInternals: assert result == {"resumed": True} assert captured["graph_runtime_state"] is not None - def test_single_iteration_generate_builds_debug_task(self, monkeypatch): + def test_single_iteration_generate_builds_debug_task(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() app_config = self._build_app_config() captured: dict[str, object] = {} @@ -293,7 +293,7 @@ class TestAdvancedChatAppGeneratorInternals: assert captured["variable_loader"] is var_loader assert captured["application_generate_entity"].single_iteration_run.node_id == "node-1" - def test_single_loop_generate_builds_debug_task(self, monkeypatch): + def test_single_loop_generate_builds_debug_task(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() app_config = self._build_app_config() captured: dict[str, object] = {} @@ -351,7 +351,7 @@ class TestAdvancedChatAppGeneratorInternals: assert captured["variable_loader"] is var_loader assert captured["application_generate_entity"].single_loop_run.node_id == "node-2" - def test_generate_internal_flow_initial_conversation_with_pause_layer(self, monkeypatch): + def test_generate_internal_flow_initial_conversation_with_pause_layer(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 0 app_config = self._build_app_config() @@ -449,7 +449,7 @@ class TestAdvancedChatAppGeneratorInternals: assert isinstance(captured["conversation"], ConversationSnapshot) assert isinstance(captured["message"], MessageSnapshot) - def test_generate_internal_flow_with_existing_records_skips_init(self, monkeypatch): + def test_generate_internal_flow_with_existing_records_skips_init(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 0 app_config = self._build_app_config() @@ -535,7 +535,7 @@ class TestAdvancedChatAppGeneratorInternals: db_session.refresh.assert_not_called() db_session.close.assert_called_once() - def test_generate_worker_raises_when_workflow_not_found(self, monkeypatch): + def test_generate_worker_raises_when_workflow_not_found(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 app_config = self._build_app_config() @@ -594,7 +594,7 @@ class TestAdvancedChatAppGeneratorInternals: graph_runtime_state=None, ) - def test_generate_worker_raises_when_app_not_found_for_internal_call(self, monkeypatch): + def test_generate_worker_raises_when_app_not_found_for_internal_call(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 app_config = self._build_app_config() @@ -658,7 +658,7 @@ class TestAdvancedChatAppGeneratorInternals: graph_runtime_state=None, ) - def test_generate_worker_handles_stopped_error(self, monkeypatch): + def test_generate_worker_handles_stopped_error(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 app_config = self._build_app_config() @@ -732,7 +732,7 @@ class TestAdvancedChatAppGeneratorInternals: queue_manager.publish_error.assert_not_called() - def test_generate_worker_handles_validation_error(self, monkeypatch): + def test_generate_worker_handles_validation_error(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 app_config = self._build_app_config() @@ -816,7 +816,7 @@ class TestAdvancedChatAppGeneratorInternals: queue_manager.publish_error.assert_called_once() - def test_generate_worker_handles_value_and_unknown_errors(self, monkeypatch): + def test_generate_worker_handles_value_and_unknown_errors(self, monkeypatch: pytest.MonkeyPatch): app_config = self._build_app_config() @contextmanager @@ -897,7 +897,7 @@ class TestAdvancedChatAppGeneratorInternals: queue_manager.publish_error.assert_called_once() - def test_handle_response_closed_file_raises_stopped(self, monkeypatch): + def test_handle_response_closed_file_raises_stopped(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 @@ -953,7 +953,7 @@ class TestAdvancedChatAppGeneratorInternals: stream=False, ) - def test_handle_response_re_raises_value_error(self, monkeypatch): + def test_handle_response_re_raises_value_error(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 app_config = self._build_app_config() @@ -1002,7 +1002,7 @@ class TestAdvancedChatAppGeneratorInternals: logger_exception.assert_called_once() - def test_generate_worker_handles_invoke_auth_error(self, monkeypatch): + def test_generate_worker_handles_invoke_auth_error(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 @@ -1088,7 +1088,7 @@ class TestAdvancedChatAppGeneratorInternals: assert queue_manager.publish_error.called - def test_generate_debugger_enables_retrieve_source(self, monkeypatch): + def test_generate_debugger_enables_retrieve_source(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() app_config = WorkflowUIBasedAppConfig( @@ -1167,7 +1167,7 @@ class TestAdvancedChatAppGeneratorInternals: assert app_config.additional_features.show_retrieve_source is True assert captured["application_generate_entity"].query == "hello" - def test_generate_service_api_sets_parent_message_id(self, monkeypatch): + def test_generate_service_api_sets_parent_message_id(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() app_config = WorkflowUIBasedAppConfig( diff --git a/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py b/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py index 64bcfa9a18..d8f794b483 100644 --- a/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py +++ b/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py @@ -132,7 +132,9 @@ class TestAdvancedChatGenerateTaskPipeline: pipeline._task_state.answer = "partial answer" pipeline._workflow_run_id = "run-id" pipeline._graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), + variable_pool=build_test_variable_pool( + variables=build_system_variables(workflow_execution_id="run-id"), + ), start_at=0.0, total_tokens=7, node_run_steps=3, @@ -224,7 +226,7 @@ class TestAdvancedChatGenerateTaskPipeline: assert isinstance(responses[0], ValueError) - def test_handle_workflow_started_event_sets_run_id(self, monkeypatch): + def test_handle_workflow_started_event_sets_run_id(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._graph_runtime_state = GraphRuntimeState( variable_pool=build_test_variable_pool(variables=build_system_variables(workflow_execution_id="run-id")), @@ -368,11 +370,13 @@ class TestAdvancedChatGenerateTaskPipeline: assert list(pipeline._handle_loop_next_event(loop_next)) == ["loop_next"] assert list(pipeline._handle_loop_completed_event(loop_done)) == ["loop_done"] - def test_workflow_finish_handlers(self, monkeypatch): + def test_workflow_finish_handlers(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._workflow_run_id = "run-id" pipeline._graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id="run-id") + ), start_at=0.0, ) pipeline._workflow_response_converter.workflow_finish_to_stream_response = lambda **kwargs: "finish" @@ -583,7 +587,9 @@ class TestAdvancedChatGenerateTaskPipeline: self.items = items graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id="run-id") + ), start_at=0.0, ) @@ -593,7 +599,7 @@ class TestAdvancedChatGenerateTaskPipeline: assert message.answer == "hello" assert message.message_metadata - def test_handle_stop_event_saves_message_for_moderation(self, monkeypatch): + def test_handle_stop_event_saves_message_for_moderation(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._message_end_to_stream_response = lambda: "end" saved: list[str] = [] @@ -614,10 +620,12 @@ class TestAdvancedChatGenerateTaskPipeline: assert responses == ["end"] assert saved == ["saved"] - def test_handle_message_end_event_applies_output_moderation(self, monkeypatch): + def test_handle_message_end_event_applies_output_moderation(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id="run-id") + ), start_at=0.0, ) pipeline._base_task_pipeline.handle_output_moderation_when_task_finished = lambda answer: "safe" diff --git a/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_config_manager.py b/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_config_manager.py index a871e8d93b..d47b70e950 100644 --- a/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_config_manager.py +++ b/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_config_manager.py @@ -2,6 +2,7 @@ import uuid from types import SimpleNamespace import pytest +from pytest_mock import MockerFixture from core.app.app_config.entities import EasyUIBasedAppModelConfigFrom from core.app.apps.agent_chat.app_config_manager import ( @@ -11,7 +12,7 @@ from core.entities.agent_entities import PlanningStrategy class TestAgentChatAppConfigManagerGetAppConfig: - def test_get_app_config_override_config(self, mocker): + def test_get_app_config_override_config(self, mocker: MockerFixture): app_model = mocker.MagicMock(id="app1", tenant_id="tenant", mode="agent-chat") app_model_config = mocker.MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"ignored": True} @@ -45,7 +46,7 @@ class TestAgentChatAppConfigManagerGetAppConfig: assert result.variables == "variables" assert result.external_data_variables == "external" - def test_get_app_config_conversation_specific(self, mocker): + def test_get_app_config_conversation_specific(self, mocker: MockerFixture): app_model = mocker.MagicMock(id="app1", tenant_id="tenant", mode="agent-chat") app_model_config = mocker.MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"model": {"provider": "p"}} @@ -76,7 +77,7 @@ class TestAgentChatAppConfigManagerGetAppConfig: assert result.app_model_config_dict == app_model_config.to_dict.return_value assert result.app_model_config_from.value == "conversation-specific-config" - def test_get_app_config_latest_config(self, mocker): + def test_get_app_config_latest_config(self, mocker: MockerFixture): app_model = mocker.MagicMock(id="app1", tenant_id="tenant", mode="agent-chat") app_model_config = mocker.MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"model": {"provider": "p"}} @@ -107,7 +108,7 @@ class TestAgentChatAppConfigManagerGetAppConfig: class TestAgentChatAppConfigManagerConfigValidate: - def test_config_validate_filters_related_keys(self, mocker): + def test_config_validate_filters_related_keys(self, mocker: MockerFixture): config = { "model": {}, "user_input_form": {}, @@ -247,7 +248,7 @@ class TestValidateAgentModeAndSetDefaults: {"agent_mode": {"enabled": True, "tools": [{"dataset": {"enabled": True, "id": "bad"}}]}}, ) - def test_old_tool_dataset_id_not_exists(self, mocker): + def test_old_tool_dataset_id_not_exists(self, mocker: MockerFixture): mocker.patch( "core.app.apps.agent_chat.app_config_manager.DatasetConfigManager.is_dataset_exists", return_value=False, @@ -275,7 +276,7 @@ class TestValidateAgentModeAndSetDefaults: "tenant", {"agent_mode": {"enabled": True, "tools": [tool]}} ) - def test_valid_old_and_new_style_tools(self, mocker): + def test_valid_old_and_new_style_tools(self, mocker: MockerFixture): mocker.patch( "core.app.apps.agent_chat.app_config_manager.DatasetConfigManager.is_dataset_exists", return_value=True, diff --git a/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_generator.py b/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_generator.py index 80f7f94b1a..6cd62c933a 100644 --- a/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_generator.py @@ -2,6 +2,7 @@ import contextlib import pytest from pydantic import ValidationError +from pytest_mock import MockerFixture from core.app.apps.agent_chat.app_generator import AgentChatAppGenerator from core.app.apps.exc import GenerateTaskStoppedError @@ -16,7 +17,7 @@ class DummyAccount: @pytest.fixture -def generator(mocker): +def generator(mocker: MockerFixture): gen = AgentChatAppGenerator() mocker.patch( "core.app.apps.agent_chat.app_generator.current_app", @@ -27,19 +28,19 @@ def generator(mocker): class TestAgentChatAppGeneratorGenerate: - def test_generate_rejects_blocking_mode(self, generator, mocker): + def test_generate_rejects_blocking_mode(self, generator, mocker: MockerFixture): app_model = mocker.MagicMock() user = DummyAccount("user") with pytest.raises(ValueError): generator.generate(app_model=app_model, user=user, args={}, invoke_from=mocker.MagicMock(), streaming=False) - def test_generate_requires_query(self, generator, mocker): + def test_generate_requires_query(self, generator, mocker: MockerFixture): app_model = mocker.MagicMock() user = DummyAccount("user") with pytest.raises(ValueError): generator.generate(app_model=app_model, user=user, args={"inputs": {}}, invoke_from=mocker.MagicMock()) - def test_generate_rejects_non_string_query(self, generator, mocker): + def test_generate_rejects_non_string_query(self, generator, mocker: MockerFixture): app_model = mocker.MagicMock() user = DummyAccount("user") with pytest.raises(ValueError): @@ -50,7 +51,7 @@ class TestAgentChatAppGeneratorGenerate: invoke_from=mocker.MagicMock(), ) - def test_generate_override_requires_debugger(self, generator, mocker): + def test_generate_override_requires_debugger(self, generator, mocker: MockerFixture): app_model = mocker.MagicMock() user = DummyAccount("user") @@ -62,7 +63,7 @@ class TestAgentChatAppGeneratorGenerate: invoke_from=InvokeFrom.WEB_APP, ) - def test_generate_success_with_debugger_override(self, generator, mocker): + def test_generate_success_with_debugger_override(self, generator, mocker: MockerFixture): app_model = mocker.MagicMock(id="app1", tenant_id="tenant", mode="agent-chat") app_model_config = mocker.MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"model": {"provider": "p"}} @@ -142,7 +143,7 @@ class TestAgentChatAppGeneratorGenerate: assert result == {"result": "ok"} thread_obj.start.assert_called_once() - def test_generate_without_file_config(self, generator, mocker): + def test_generate_without_file_config(self, generator, mocker: MockerFixture): app_model = mocker.MagicMock(id="app1", tenant_id="tenant", mode="agent-chat") app_model_config = mocker.MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"model": {"provider": "p"}} @@ -213,14 +214,14 @@ class TestAgentChatAppGeneratorGenerate: class TestAgentChatAppGeneratorWorker: @pytest.fixture(autouse=True) - def patch_context(self, mocker): + def patch_context(self, mocker: MockerFixture): @contextlib.contextmanager def ctx_manager(*args, **kwargs): yield mocker.patch("core.app.apps.agent_chat.app_generator.preserve_flask_contexts", ctx_manager) - def test_generate_worker_handles_generate_task_stopped(self, generator, mocker): + def test_generate_worker_handles_generate_task_stopped(self, generator, mocker: MockerFixture): queue_manager = mocker.MagicMock() generator._get_conversation = mocker.MagicMock(return_value=mocker.MagicMock()) generator._get_message = mocker.MagicMock(return_value=mocker.MagicMock()) @@ -250,7 +251,7 @@ class TestAgentChatAppGeneratorWorker: Exception("bad"), ], ) - def test_generate_worker_publishes_errors(self, generator, mocker, error): + def test_generate_worker_publishes_errors(self, generator, mocker: MockerFixture, error): queue_manager = mocker.MagicMock() generator._get_conversation = mocker.MagicMock(return_value=mocker.MagicMock()) generator._get_message = mocker.MagicMock(return_value=mocker.MagicMock()) @@ -271,7 +272,7 @@ class TestAgentChatAppGeneratorWorker: assert queue_manager.publish_error.called - def test_generate_worker_logs_value_error_when_debug(self, generator, mocker): + def test_generate_worker_logs_value_error_when_debug(self, generator, mocker: MockerFixture): queue_manager = mocker.MagicMock() generator._get_conversation = mocker.MagicMock(return_value=mocker.MagicMock()) generator._get_message = mocker.MagicMock(return_value=mocker.MagicMock()) diff --git a/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_runner.py b/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_runner.py index 4567b35480..0260235b03 100644 --- a/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_runner.py +++ b/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_runner.py @@ -1,4 +1,5 @@ import pytest +from pytest_mock import MockerFixture from core.agent.entities import AgentEntity from core.app.apps.agent_chat.app_runner import AgentChatAppRunner @@ -13,7 +14,7 @@ def runner(): class TestAgentChatAppRunnerRun: - def test_run_app_not_found(self, runner, mocker): + def test_run_app_not_found(self, runner, mocker: MockerFixture): app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", agent=mocker.MagicMock()) generate_entity = mocker.MagicMock(app_config=app_config, inputs={}, query="q", files=[], stream=True) @@ -22,7 +23,7 @@ class TestAgentChatAppRunnerRun: with pytest.raises(ValueError): runner.run(generate_entity, mocker.MagicMock(), mocker.MagicMock(), mocker.MagicMock()) - def test_run_moderation_error_direct_output(self, runner, mocker): + def test_run_moderation_error_direct_output(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = mocker.MagicMock() @@ -45,7 +46,7 @@ class TestAgentChatAppRunnerRun: runner.direct_output.assert_called_once() - def test_run_annotation_reply_short_circuits(self, runner, mocker): + def test_run_annotation_reply_short_circuits(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = mocker.MagicMock() @@ -74,7 +75,7 @@ class TestAgentChatAppRunnerRun: queue_manager.publish.assert_called_once() runner.direct_output.assert_called_once() - def test_run_hosting_moderation_short_circuits(self, runner, mocker): + def test_run_hosting_moderation_short_circuits(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = mocker.MagicMock() @@ -98,7 +99,7 @@ class TestAgentChatAppRunnerRun: runner.run(generate_entity, mocker.MagicMock(), mocker.MagicMock(), mocker.MagicMock()) - def test_run_model_schema_missing(self, runner, mocker): + def test_run_model_schema_missing(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = AgentEntity(provider="p", model="m", strategy=AgentEntity.Strategy.CHAIN_OF_THOUGHT) @@ -140,7 +141,7 @@ class TestAgentChatAppRunnerRun: (LLMMode.COMPLETION, "CotCompletionAgentRunner"), ], ) - def test_run_chain_of_thought_modes(self, runner, mocker, mode, expected_runner): + def test_run_chain_of_thought_modes(self, runner, mocker: MockerFixture, mode, expected_runner): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = AgentEntity(provider="p", model="m", strategy=AgentEntity.Strategy.CHAIN_OF_THOUGHT) @@ -196,7 +197,7 @@ class TestAgentChatAppRunnerRun: runner_instance.run.assert_called_once() runner._handle_invoke_result.assert_called_once() - def test_run_invalid_llm_mode_raises(self, runner, mocker): + def test_run_invalid_llm_mode_raises(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = AgentEntity(provider="p", model="m", strategy=AgentEntity.Strategy.CHAIN_OF_THOUGHT) @@ -242,7 +243,7 @@ class TestAgentChatAppRunnerRun: with pytest.raises(ValueError): runner.run(generate_entity, mocker.MagicMock(), conversation, message) - def test_run_function_calling_strategy_selected_by_features(self, runner, mocker): + def test_run_function_calling_strategy_selected_by_features(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = AgentEntity(provider="p", model="m", strategy=AgentEntity.Strategy.CHAIN_OF_THOUGHT) @@ -298,7 +299,7 @@ class TestAgentChatAppRunnerRun: assert app_config.agent.strategy == AgentEntity.Strategy.FUNCTION_CALLING runner_instance.run.assert_called_once() - def test_run_conversation_not_found(self, runner, mocker): + def test_run_conversation_not_found(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = AgentEntity(provider="p", model="m", strategy=AgentEntity.Strategy.FUNCTION_CALLING) @@ -332,7 +333,7 @@ class TestAgentChatAppRunnerRun: with pytest.raises(ValueError): runner.run(generate_entity, mocker.MagicMock(), mocker.MagicMock(id="conv"), mocker.MagicMock(id="msg")) - def test_run_message_not_found(self, runner, mocker): + def test_run_message_not_found(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = AgentEntity(provider="p", model="m", strategy=AgentEntity.Strategy.FUNCTION_CALLING) @@ -366,7 +367,7 @@ class TestAgentChatAppRunnerRun: with pytest.raises(ValueError): runner.run(generate_entity, mocker.MagicMock(), mocker.MagicMock(id="conv"), mocker.MagicMock(id="msg")) - def test_run_invalid_agent_strategy_raises(self, runner, mocker): + def test_run_invalid_agent_strategy_raises(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = mocker.MagicMock(strategy="invalid", provider="p", model="m") diff --git a/api/tests/unit_tests/core/app/apps/completion/test_app_runner.py b/api/tests/unit_tests/core/app/apps/completion/test_app_runner.py index aa2085177e..8dcf6e9193 100644 --- a/api/tests/unit_tests/core/app/apps/completion/test_app_runner.py +++ b/api/tests/unit_tests/core/app/apps/completion/test_app_runner.py @@ -2,6 +2,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture import core.app.apps.completion.app_runner as module from core.app.apps.completion.app_runner import CompletionAppRunner @@ -47,7 +48,7 @@ def _build_generate_entity(app_config, file_upload_config=None): class TestCompletionAppRunner: - def test_run_app_not_found(self, runner, mocker): + def test_run_app_not_found(self, runner, mocker: MockerFixture): session = mocker.MagicMock() session.scalar.return_value = None mocker.patch.object(module.db, "session", session) @@ -58,7 +59,7 @@ class TestCompletionAppRunner: with pytest.raises(ValueError): runner.run(app_generate_entity, MagicMock(), MagicMock()) - def test_run_moderation_error_outputs_direct(self, runner, mocker): + def test_run_moderation_error_outputs_direct(self, runner, mocker: MockerFixture): app_record = MagicMock(id="app1", tenant_id="tenant") session = mocker.MagicMock() @@ -78,7 +79,7 @@ class TestCompletionAppRunner: runner.direct_output.assert_called_once() runner._handle_invoke_result.assert_not_called() - def test_run_hosting_moderation_stops(self, runner, mocker): + def test_run_hosting_moderation_stops(self, runner, mocker: MockerFixture): app_record = MagicMock(id="app1", tenant_id="tenant") session = mocker.MagicMock() @@ -97,7 +98,7 @@ class TestCompletionAppRunner: runner._handle_invoke_result.assert_not_called() - def test_run_dataset_and_external_tools_flow(self, runner, mocker): + def test_run_dataset_and_external_tools_flow(self, runner, mocker: MockerFixture): app_record = MagicMock(id="app1", tenant_id="tenant") session = mocker.MagicMock() @@ -140,7 +141,7 @@ class TestCompletionAppRunner: assert dataset_retrieval.retrieve.call_args.kwargs["query"] == "query_from_input" runner._handle_invoke_result.assert_called_once() - def test_run_uses_low_image_detail_default(self, runner, mocker): + def test_run_uses_low_image_detail_default(self, runner, mocker: MockerFixture): app_record = MagicMock(id="app1", tenant_id="tenant") session = mocker.MagicMock() diff --git a/api/tests/unit_tests/core/app/apps/completion/test_completion_app_config_manager.py b/api/tests/unit_tests/core/app/apps/completion/test_completion_app_config_manager.py index 024bd8f302..353162be8c 100644 --- a/api/tests/unit_tests/core/app/apps/completion/test_completion_app_config_manager.py +++ b/api/tests/unit_tests/core/app/apps/completion/test_completion_app_config_manager.py @@ -1,6 +1,8 @@ from types import SimpleNamespace from unittest.mock import MagicMock +from pytest_mock import MockerFixture + import core.app.apps.completion.app_config_manager as module from core.app.app_config.entities import EasyUIBasedAppModelConfigFrom from core.app.apps.completion.app_config_manager import CompletionAppConfigManager @@ -8,7 +10,7 @@ from models.model import AppMode class TestCompletionAppConfigManager: - def test_get_app_config_with_override(self, mocker): + def test_get_app_config_with_override(self, mocker: MockerFixture): app_model = MagicMock(tenant_id="tenant", id="app1", mode=AppMode.COMPLETION.value) app_model_config = MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"model": {"provider": "x"}} @@ -35,8 +37,8 @@ class TestCompletionAppConfigManager: assert result.external_data_variables == ["ext1"] assert result.app_mode == AppMode.COMPLETION - def test_get_app_config_without_override_uses_model_config(self, mocker): - app_model = MagicMock(tenant_id="tenant", id="app1", mode=AppMode.COMPLETION.value) + def test_get_app_config_without_override_uses_model_config(self, mocker: MockerFixture): + app_model = MagicMock(tenant_id="tenant", id="app1", mode=AppMode.COMPLETION) app_model_config = MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"model": {"provider": "x"}} @@ -53,7 +55,7 @@ class TestCompletionAppConfigManager: assert result.app_model_config_from == EasyUIBasedAppModelConfigFrom.APP_LATEST_CONFIG assert result.app_model_config_dict == {"model": {"provider": "x"}} - def test_config_validate_filters_related_keys(self, mocker): + def test_config_validate_filters_related_keys(self, mocker: MockerFixture): config = { "model": {"provider": "x"}, "variables": ["v"], diff --git a/api/tests/unit_tests/core/app/apps/completion/test_completion_completion_app_generator.py b/api/tests/unit_tests/core/app/apps/completion/test_completion_completion_app_generator.py index f2e35f9900..de20dde677 100644 --- a/api/tests/unit_tests/core/app/apps/completion/test_completion_completion_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/completion/test_completion_completion_app_generator.py @@ -4,6 +4,7 @@ from unittest.mock import MagicMock import pytest from pydantic import ValidationError +from pytest_mock import MockerFixture import core.app.apps.completion.app_generator as module from core.app.apps.completion.app_generator import CompletionAppGenerator @@ -15,7 +16,7 @@ from services.errors.message import MessageNotExistsError @pytest.fixture -def generator(mocker): +def generator(mocker: MockerFixture): gen = CompletionAppGenerator() mocker.patch.object(module, "copy_current_request_context", side_effect=lambda fn: fn) @@ -69,7 +70,7 @@ class TestCompletionAppGenerator: streaming=False, ) - def test_generate_success_no_file_config(self, generator, mocker): + def test_generate_success_no_file_config(self, generator, mocker: MockerFixture): app_model_config = _build_app_model_config() mocker.patch.object(generator, "_get_app_model_config", return_value=app_model_config) mocker.patch.object(module.FileUploadConfigManager, "convert", return_value=None) @@ -99,7 +100,7 @@ class TestCompletionAppGenerator: assert result == "converted" module.file_factory.build_from_mappings.assert_not_called() - def test_generate_success_with_files(self, generator, mocker): + def test_generate_success_with_files(self, generator, mocker: MockerFixture): app_model_config = _build_app_model_config() mocker.patch.object(generator, "_get_app_model_config", return_value=app_model_config) @@ -131,7 +132,7 @@ class TestCompletionAppGenerator: assert result == "converted" module.file_factory.build_from_mappings.assert_called_once() - def test_generate_override_model_config_debugger(self, generator, mocker): + def test_generate_override_model_config_debugger(self, generator, mocker: MockerFixture): app_model_config = _build_app_model_config() mocker.patch.object(generator, "_get_app_model_config", return_value=app_model_config) @@ -165,7 +166,7 @@ class TestCompletionAppGenerator: assert get_app_config.call_args.kwargs["override_config_dict"] == override_config - def test_generate_more_like_this_message_not_found(self, generator, mocker): + def test_generate_more_like_this_message_not_found(self, generator, mocker: MockerFixture): session = mocker.MagicMock() session.scalar.return_value = None mocker.patch.object(module.db, "session", session) @@ -178,7 +179,7 @@ class TestCompletionAppGenerator: invoke_from=InvokeFrom.WEB_APP, ) - def test_generate_more_like_this_disabled(self, generator, mocker): + def test_generate_more_like_this_disabled(self, generator, mocker: MockerFixture): app_model = _build_app_model() app_model.app_model_config = MagicMock(more_like_this=False, more_like_this_dict={"enabled": False}) @@ -195,7 +196,7 @@ class TestCompletionAppGenerator: invoke_from=InvokeFrom.WEB_APP, ) - def test_generate_more_like_this_app_model_config_missing(self, generator, mocker): + def test_generate_more_like_this_app_model_config_missing(self, generator, mocker: MockerFixture): app_model = _build_app_model() app_model.app_model_config = None @@ -212,7 +213,7 @@ class TestCompletionAppGenerator: invoke_from=InvokeFrom.WEB_APP, ) - def test_generate_more_like_this_message_config_none(self, generator, mocker): + def test_generate_more_like_this_message_config_none(self, generator, mocker: MockerFixture): app_model = _build_app_model() app_model.app_model_config = MagicMock(more_like_this=True, more_like_this_dict={"enabled": True}) @@ -229,7 +230,7 @@ class TestCompletionAppGenerator: invoke_from=InvokeFrom.WEB_APP, ) - def test_generate_more_like_this_success(self, generator, mocker): + def test_generate_more_like_this_success(self, generator, mocker: MockerFixture): app_model = _build_app_model() app_model.app_model_config = MagicMock(more_like_this=True, more_like_this_dict={"enabled": True}) @@ -297,7 +298,7 @@ class TestCompletionAppGenerator: (RuntimeError("boom"), True), ], ) - def test_generate_worker_error_handling(self, generator, mocker, error, should_publish): + def test_generate_worker_error_handling(self, generator, mocker: MockerFixture, error, should_publish): flask_app = MagicMock() flask_app.app_context.return_value = contextlib.nullcontext() diff --git a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_config_manager.py b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_config_manager.py index 5d4c9bcde0..6c1ee20ffb 100644 --- a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_config_manager.py +++ b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_config_manager.py @@ -1,12 +1,14 @@ from types import SimpleNamespace from unittest.mock import MagicMock +from pytest_mock import MockerFixture + import core.app.apps.pipeline.pipeline_config_manager as module from core.app.apps.pipeline.pipeline_config_manager import PipelineConfigManager from models.model import AppMode -def test_get_pipeline_config(mocker): +def test_get_pipeline_config(mocker: MockerFixture): pipeline = MagicMock(tenant_id="tenant", id="pipe1") workflow = MagicMock(id="wf1") @@ -26,7 +28,7 @@ def test_get_pipeline_config(mocker): assert result.rag_pipeline_variables == ["var1"] -def test_config_validate_filters_related_keys(mocker): +def test_config_validate_filters_related_keys(mocker: MockerFixture): config = { "file_upload": {"enabled": True}, "tts": {"enabled": True}, diff --git a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_generator.py b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_generator.py index c36edf48fc..dd91243a37 100644 --- a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_generator.py +++ b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_generator.py @@ -3,6 +3,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock, PropertyMock import pytest +from pytest_mock import MockerFixture import core.app.apps.pipeline.pipeline_generator as module from core.app.apps.exc import GenerateTaskStoppedError @@ -23,7 +24,7 @@ class FakeRagPipelineGenerateEntity(SimpleNamespace): @pytest.fixture -def generator(mocker): +def generator(mocker: MockerFixture): gen = module.PipelineGenerator() mocker.patch.object(module, "RagPipelineGenerateEntity", FakeRagPipelineGenerateEntity) @@ -88,7 +89,7 @@ class DummySession: return False -def test_generate_dataset_missing(generator, mocker): +def test_generate_dataset_missing(generator, mocker: MockerFixture): pipeline = _build_pipeline() pipeline.retrieve_dataset.return_value = None @@ -106,7 +107,7 @@ def test_generate_dataset_missing(generator, mocker): ) -def test_generate_debugger_calls_generate(generator, mocker): +def test_generate_debugger_calls_generate(generator, mocker: MockerFixture): pipeline = _build_pipeline() workflow = _build_workflow() @@ -150,7 +151,7 @@ def test_generate_debugger_calls_generate(generator, mocker): assert result == {"result": "ok"} -def test_generate_published_pipeline_creates_documents_and_delay(generator, mocker): +def test_generate_published_pipeline_creates_documents_and_delay(generator, mocker: MockerFixture): pipeline = _build_pipeline() workflow = _build_workflow() @@ -228,7 +229,7 @@ def test_generate_published_pipeline_creates_documents_and_delay(generator, mock task_proxy.delay.assert_called_once() -def test_generate_is_retry_calls_generate(generator, mocker): +def test_generate_is_retry_calls_generate(generator, mocker: MockerFixture): pipeline = _build_pipeline() workflow = _build_workflow() @@ -273,7 +274,7 @@ def test_generate_is_retry_calls_generate(generator, mocker): assert result == {"result": "ok"} -def test_generate_worker_handles_errors(generator, mocker): +def test_generate_worker_handles_errors(generator, mocker: MockerFixture): flask_app = MagicMock() flask_app.app_context.return_value = contextlib.nullcontext() mocker.patch.object(module, "preserve_flask_contexts", _dummy_preserve) @@ -308,7 +309,7 @@ def test_generate_worker_handles_errors(generator, mocker): queue_manager.publish_error.assert_called_once() -def test_generate_worker_sets_system_user_id_for_external_call(generator, mocker): +def test_generate_worker_sets_system_user_id_for_external_call(generator, mocker: MockerFixture): flask_app = MagicMock() flask_app.app_context.return_value = contextlib.nullcontext() mocker.patch.object(module, "preserve_flask_contexts", _dummy_preserve) @@ -341,7 +342,7 @@ def test_generate_worker_sets_system_user_id_for_external_call(generator, mocker assert module.PipelineRunner.call_args.kwargs["system_user_id"] == "session" -def test_generate_raises_when_workflow_not_found(generator, mocker): +def test_generate_raises_when_workflow_not_found(generator, mocker: MockerFixture): flask_app = MagicMock() mocker.patch.object(module, "preserve_flask_contexts", _dummy_preserve) @@ -369,7 +370,7 @@ def test_generate_raises_when_workflow_not_found(generator, mocker): ) -def test_generate_success_returns_converted(generator, mocker): +def test_generate_success_returns_converted(generator, mocker: MockerFixture): flask_app = MagicMock() mocker.patch.object(module, "preserve_flask_contexts", _dummy_preserve) @@ -409,7 +410,7 @@ def test_generate_success_returns_converted(generator, mocker): assert result == "converted" -def test_single_iteration_generate_validates_inputs(generator, mocker): +def test_single_iteration_generate_validates_inputs(generator, mocker: MockerFixture): with pytest.raises(ValueError): generator.single_iteration_generate(_build_pipeline(), _build_workflow(), "", _build_user(), {}) @@ -419,7 +420,7 @@ def test_single_iteration_generate_validates_inputs(generator, mocker): ) -def test_single_iteration_generate_dataset_required(generator, mocker): +def test_single_iteration_generate_dataset_required(generator, mocker: MockerFixture): pipeline = _build_pipeline() pipeline.retrieve_dataset.return_value = None @@ -436,7 +437,7 @@ def test_single_iteration_generate_dataset_required(generator, mocker): ) -def test_single_iteration_generate_success(generator, mocker): +def test_single_iteration_generate_success(generator, mocker: MockerFixture): pipeline = _build_pipeline() session = DummySession() @@ -476,7 +477,7 @@ def test_single_iteration_generate_success(generator, mocker): assert result == {"ok": True} -def test_single_loop_generate_success(generator, mocker): +def test_single_loop_generate_success(generator, mocker: MockerFixture): pipeline = _build_pipeline() session = DummySession() @@ -516,7 +517,7 @@ def test_single_loop_generate_success(generator, mocker): assert result == {"ok": True} -def test_handle_response_value_error_triggers_generate_task_stopped(generator, mocker): +def test_handle_response_value_error_triggers_generate_task_stopped(generator, mocker: MockerFixture): pipeline = _build_pipeline() workflow = _build_workflow() app_entity = FakeRagPipelineGenerateEntity(task_id="t") @@ -536,7 +537,7 @@ def test_handle_response_value_error_triggers_generate_task_stopped(generator, m ) -def test_build_document_sets_metadata_for_builtin_fields(generator, mocker): +def test_build_document_sets_metadata_for_builtin_fields(generator, mocker: MockerFixture): class DummyDocument(SimpleNamespace): pass @@ -620,7 +621,7 @@ def test_format_datasource_info_list_missing_node_data(generator): ) -def test_format_datasource_info_list_online_drive_folder(generator, mocker): +def test_format_datasource_info_list_online_drive_folder(generator, mocker: MockerFixture): workflow = MagicMock( graph_dict={ "nodes": [ diff --git a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_queue_manager.py b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_queue_manager.py index 9db83f5531..abfc76afa0 100644 --- a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_queue_manager.py +++ b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_queue_manager.py @@ -1,4 +1,5 @@ import pytest +from pytest_mock import MockerFixture import core.app.apps.pipeline.pipeline_queue_manager as module from core.app.apps.base_app_queue_manager import PublishFrom @@ -16,7 +17,7 @@ from core.app.entities.queue_entities import ( from graphon.model_runtime.entities.llm_entities import LLMResult -def test_publish_sets_stop_listen_and_raises_on_stopped(mocker): +def test_publish_sets_stop_listen_and_raises_on_stopped(mocker: MockerFixture): manager = PipelineQueueManager(task_id="t", user_id="u", invoke_from=InvokeFrom.WEB_APP, app_mode="rag") manager._q = mocker.MagicMock() manager.stop_listen = mocker.MagicMock() @@ -28,7 +29,7 @@ def test_publish_sets_stop_listen_and_raises_on_stopped(mocker): manager.stop_listen.assert_called_once() -def test_publish_stop_events_trigger_stop_listen(mocker): +def test_publish_stop_events_trigger_stop_listen(mocker: MockerFixture): manager = PipelineQueueManager(task_id="t", user_id="u", invoke_from=InvokeFrom.WEB_APP, app_mode="rag") manager._q = mocker.MagicMock() manager.stop_listen = mocker.MagicMock() @@ -46,7 +47,7 @@ def test_publish_stop_events_trigger_stop_listen(mocker): manager.stop_listen.assert_called_once() -def test_publish_non_stop_event_no_stop_listen(mocker): +def test_publish_non_stop_event_no_stop_listen(mocker: MockerFixture): manager = PipelineQueueManager(task_id="t", user_id="u", invoke_from=InvokeFrom.WEB_APP, app_mode="rag") manager._q = mocker.MagicMock() manager.stop_listen = mocker.MagicMock() diff --git a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_runner.py b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_runner.py index 603062a51c..1eed76cf84 100644 --- a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_runner.py +++ b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_runner.py @@ -22,6 +22,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture import core.app.apps.pipeline.pipeline_runner as module from core.app.apps.pipeline.pipeline_runner import PipelineRunner @@ -126,7 +127,7 @@ def test_update_document_status_on_failure(mocker, runner): session.commit.assert_called_once() -def test_run_pipeline_not_found(mocker): +def test_run_pipeline_not_found(mocker: MockerFixture): app_generate_entity = _build_app_generate_entity() app_generate_entity.invoke_from = InvokeFrom.WEB_APP app_generate_entity.single_iteration_run = None @@ -150,7 +151,7 @@ def test_run_pipeline_not_found(mocker): runner.run() -def test_run_workflow_not_initialized(mocker): +def test_run_workflow_not_initialized(mocker: MockerFixture): app_generate_entity = _build_app_generate_entity() pipeline = MagicMock(id="pipe") @@ -174,7 +175,7 @@ def test_run_workflow_not_initialized(mocker): runner.run() -def test_run_single_iteration_path(mocker): +def test_run_single_iteration_path(mocker: MockerFixture): app_generate_entity = _build_app_generate_entity() app_generate_entity.single_iteration_run = MagicMock() @@ -223,7 +224,7 @@ def test_run_single_iteration_path(mocker): runner._handle_event.assert_called() -def test_run_normal_path_builds_graph(mocker): +def test_run_normal_path_builds_graph(mocker: MockerFixture): app_generate_entity = _build_app_generate_entity() pipeline = MagicMock(id="pipe") diff --git a/api/tests/unit_tests/core/app/apps/test_advanced_chat_app_generator.py b/api/tests/unit_tests/core/app/apps/test_advanced_chat_app_generator.py index f48a7fb38e..835c9a8576 100644 --- a/api/tests/unit_tests/core/app/apps/test_advanced_chat_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/test_advanced_chat_app_generator.py @@ -45,7 +45,7 @@ def _make_generate_entity(app_config: WorkflowUIBasedAppConfig) -> AdvancedChatA @pytest.fixture(autouse=True) -def _mock_db_session(monkeypatch): +def _mock_db_session(monkeypatch: pytest.MonkeyPatch): session = MagicMock() def refresh_side_effect(obj): @@ -108,7 +108,7 @@ def test_init_generate_records_marks_existing_conversation(): assert entity.is_new_conversation is False -def test_message_cycle_manager_uses_new_conversation_flag(monkeypatch): +def test_message_cycle_manager_uses_new_conversation_flag(monkeypatch: pytest.MonkeyPatch): app_config = _make_app_config() entity = _make_generate_entity(app_config) entity.conversation_id = "existing-conversation-id" diff --git a/api/tests/unit_tests/core/app/apps/test_base_app_generator.py b/api/tests/unit_tests/core/app/apps/test_base_app_generator.py index b0f8b423e1..f2a1700664 100644 --- a/api/tests/unit_tests/core/app/apps/test_base_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/test_base_app_generator.py @@ -369,7 +369,7 @@ def test_validate_inputs_optional_file_with_empty_string_ignores_default(): class TestBaseAppGeneratorExtras: - def test_prepare_user_inputs_converts_files_and_lists(self, monkeypatch): + def test_prepare_user_inputs_converts_files_and_lists(self, monkeypatch: pytest.MonkeyPatch): base_app_generator = BaseAppGenerator() variables = [ diff --git a/api/tests/unit_tests/core/app/apps/test_base_app_runner.py b/api/tests/unit_tests/core/app/apps/test_base_app_runner.py index 17de39ca99..c6eedf7be7 100644 --- a/api/tests/unit_tests/core/app/apps/test_base_app_runner.py +++ b/api/tests/unit_tests/core/app/apps/test_base_app_runner.py @@ -42,7 +42,7 @@ class _QueueRecorder: class TestAppRunner: - def test_recalc_llm_max_tokens_updates_parameters(self, monkeypatch): + def test_recalc_llm_max_tokens_updates_parameters(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() model_schema = SimpleNamespace( @@ -65,7 +65,7 @@ class TestAppRunner: assert model_config.parameters["max_tokens"] == 20 - def test_recalc_llm_max_tokens_returns_minus_one_when_no_context(self, monkeypatch): + def test_recalc_llm_max_tokens_returns_minus_one_when_no_context(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() model_schema = SimpleNamespace( @@ -86,7 +86,7 @@ class TestAppRunner: assert runner.recalc_llm_max_tokens(model_config, prompt_messages=[]) == -1 - def test_direct_output_streaming_publishes_chunks_and_end(self, monkeypatch): + def test_direct_output_streaming_publishes_chunks_and_end(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() queue = _QueueRecorder() app_generate_entity = SimpleNamespace(model_conf=SimpleNamespace(model="mock"), stream=True) @@ -133,7 +133,7 @@ class TestAppRunner: stream=True, ) - def test_organize_prompt_messages_simple_template(self, monkeypatch): + def test_organize_prompt_messages_simple_template(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() model_config = SimpleNamespace(mode="chat", stop=["STOP"]) prompt_template_entity = PromptTemplateEntity( @@ -158,7 +158,7 @@ class TestAppRunner: assert prompt_messages == ["simple-message"] assert stop == ["simple-stop"] - def test_organize_prompt_messages_advanced_completion_template(self, monkeypatch): + def test_organize_prompt_messages_advanced_completion_template(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() model_config = SimpleNamespace(mode="completion", stop=[""]) captured: dict[str, object] = {} @@ -191,7 +191,7 @@ class TestAppRunner: assert memory_config.role_prefix.user == "U" assert memory_config.role_prefix.assistant == "A" - def test_organize_prompt_messages_advanced_chat_template(self, monkeypatch): + def test_organize_prompt_messages_advanced_chat_template(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() model_config = SimpleNamespace(mode="chat", stop=[""]) captured: dict[str, object] = {} @@ -245,7 +245,7 @@ class TestAppRunner: files=[], ) - def test_handle_invoke_result_stream_routes_chunks_and_builds_message(self, monkeypatch): + def test_handle_invoke_result_stream_routes_chunks_and_builds_message(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() queue = _QueueRecorder() warning_logger = MagicMock() @@ -284,7 +284,7 @@ class TestAppRunner: assert queue.events[-1].llm_result.message.content == "abc" warning_logger.assert_called_once() - def test_handle_invoke_result_stream_agent_mode_handles_multimodal_errors(self, monkeypatch): + def test_handle_invoke_result_stream_agent_mode_handles_multimodal_errors(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() queue = _QueueRecorder() exception_logger = MagicMock() @@ -331,7 +331,7 @@ class TestAppRunner: assert queue.events[-1].llm_result.usage == usage exception_logger.assert_called_once() - def test_handle_multimodal_image_content_fallback_return_branch(self, monkeypatch): + def test_handle_multimodal_image_content_fallback_return_branch(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() class _ToggleBool: @@ -367,7 +367,7 @@ class TestAppRunner: db_session.add.assert_not_called() queue_manager.publish.assert_not_called() - def test_check_hosting_moderation_direct_output_called(self, monkeypatch): + def test_check_hosting_moderation_direct_output_called(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() queue = _QueueRecorder() app_generate_entity = SimpleNamespace(stream=False) @@ -388,7 +388,7 @@ class TestAppRunner: assert result is True assert direct_output.called - def test_fill_in_inputs_from_external_data_tools(self, monkeypatch): + def test_fill_in_inputs_from_external_data_tools(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() monkeypatch.setattr( "core.app.apps.base_app_runner.ExternalDataFetch.fetch", @@ -405,7 +405,7 @@ class TestAppRunner: assert result == {"foo": "bar"} - def test_moderation_for_inputs_returns_result(self, monkeypatch): + def test_moderation_for_inputs_returns_result(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() monkeypatch.setattr( "core.app.apps.base_app_runner.InputModeration.check", @@ -424,7 +424,7 @@ class TestAppRunner: assert result == (True, {}, "") - def test_query_app_annotations_to_reply(self, monkeypatch): + def test_query_app_annotations_to_reply(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() monkeypatch.setattr( "core.app.apps.base_app_runner.AnnotationReplyFeature.query", diff --git a/api/tests/unit_tests/core/app/apps/test_message_based_app_generator.py b/api/tests/unit_tests/core/app/apps/test_message_based_app_generator.py index 1250ac5ecf..6a9b5e7619 100644 --- a/api/tests/unit_tests/core/app/apps/test_message_based_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/test_message_based_app_generator.py @@ -85,7 +85,7 @@ def _make_chat_generate_entity(app_config: EasyUIBasedAppConfig) -> ChatAppGener @pytest.fixture(autouse=True) -def _mock_db_session(monkeypatch): +def _mock_db_session(monkeypatch: pytest.MonkeyPatch): session = MagicMock() def refresh_side_effect(obj): @@ -130,7 +130,7 @@ def test_init_generate_records_sets_conversation_fields_for_chat_entity(): class TestMessageBasedAppGeneratorExtras: - def test_handle_response_closed_file_raises_stopped(self, monkeypatch): + def test_handle_response_closed_file_raises_stopped(self, monkeypatch: pytest.MonkeyPatch): generator = MessageBasedAppGenerator() class _Pipeline: @@ -155,7 +155,7 @@ class TestMessageBasedAppGeneratorExtras: stream=False, ) - def test_get_app_model_config_requires_valid_config(self, monkeypatch): + def test_get_app_model_config_requires_valid_config(self, monkeypatch: pytest.MonkeyPatch): generator = MessageBasedAppGenerator() app_model = SimpleNamespace(id="app", app_model_config_id=None, app_model_config=None) diff --git a/api/tests/unit_tests/core/app/apps/test_pause_resume.py b/api/tests/unit_tests/core/app/apps/test_pause_resume.py index 6104b8d6ca..1acebfee17 100644 --- a/api/tests/unit_tests/core/app/apps/test_pause_resume.py +++ b/api/tests/unit_tests/core/app/apps/test_pause_resume.py @@ -3,6 +3,8 @@ import time from types import ModuleType, SimpleNamespace from typing import Any +from pytest_mock import MockerFixture + import graphon.nodes.human_input.entities # noqa: F401 from core.app.apps.advanced_chat import app_generator as adv_app_gen_module from core.app.apps.workflow import app_generator as wf_app_gen_module @@ -58,7 +60,7 @@ class _StubToolNode(Node[_StubToolNodeData]): def __init__( self, node_id: str, - config: _StubToolNodeData, + data: _StubToolNodeData, *, graph_init_params, graph_runtime_state, @@ -66,7 +68,7 @@ class _StubToolNode(Node[_StubToolNodeData]): ) -> None: super().__init__( node_id=node_id, - config=config, + data=data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, ) @@ -101,7 +103,7 @@ class _StubToolNode(Node[_StubToolNodeData]): yield self._convert_node_run_result_to_graph_node_event(result) -def _patch_tool_node(mocker): +def _patch_tool_node(mocker: MockerFixture): original_resolve_node_class = node_factory_module.resolve_workflow_node_class def _patched_resolve_node_class(*, node_type: NodeType, node_version: str) -> type[Node]: @@ -167,7 +169,7 @@ def _build_graph(runtime_state: GraphRuntimeState, *, pause_on: str | None) -> G def _build_runtime_state(run_id: str) -> GraphRuntimeState: - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id="user", app_id="app", workflow_id="workflow"), user_inputs={}, conversation_variables=[], @@ -196,7 +198,7 @@ def _node_successes(events: list[GraphEngineEvent]) -> list[str]: return [evt.node_id for evt in events if isinstance(evt, NodeRunSucceededEvent)] -def test_workflow_app_pause_resume_matches_baseline(mocker): +def test_workflow_app_pause_resume_matches_baseline(mocker: MockerFixture): _patch_tool_node(mocker) baseline_state = _build_runtime_state("baseline") @@ -236,7 +238,7 @@ def test_workflow_app_pause_resume_matches_baseline(mocker): assert resumed_state.outputs == baseline_outputs -def test_advanced_chat_pause_resume_matches_baseline(mocker): +def test_advanced_chat_pause_resume_matches_baseline(mocker: MockerFixture): _patch_tool_node(mocker) baseline_state = _build_runtime_state("adv-baseline") diff --git a/api/tests/unit_tests/core/app/apps/test_streaming_utils.py b/api/tests/unit_tests/core/app/apps/test_streaming_utils.py index 58f0e47a4b..12f3ed9f07 100644 --- a/api/tests/unit_tests/core/app/apps/test_streaming_utils.py +++ b/api/tests/unit_tests/core/app/apps/test_streaming_utils.py @@ -54,7 +54,7 @@ class FakeTopic: return self._state["subscribed"] -def test_retrieve_events_calls_on_subscribe_after_subscription(monkeypatch): +def test_retrieve_events_calls_on_subscribe_after_subscription(monkeypatch: pytest.MonkeyPatch): topic = FakeTopic() def fake_get_response_topic(cls, app_mode, workflow_run_id): @@ -92,7 +92,7 @@ def test_normalize_terminal_events_empty_values(): assert _normalize_terminal_events([]) == set({}) -def test_stream_topic_events_emits_ping_and_idle_timeout(monkeypatch): +def test_stream_topic_events_emits_ping_and_idle_timeout(monkeypatch: pytest.MonkeyPatch): topic = FakeTopic() times = [1000.0, 1000.0, 1001.0, 1001.0, 1002.0] diff --git a/api/tests/unit_tests/core/app/apps/test_workflow_app_generator.py b/api/tests/unit_tests/core/app/apps/test_workflow_app_generator.py index 7e8367c6c4..2e4e469eb5 100644 --- a/api/tests/unit_tests/core/app/apps/test_workflow_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/test_workflow_app_generator.py @@ -1,6 +1,9 @@ +import contextlib from types import SimpleNamespace from unittest.mock import MagicMock +from pytest_mock import MockerFixture + from core.app.apps.workflow.app_generator import SKIP_PREPARE_USER_INPUTS_KEY, WorkflowAppGenerator @@ -22,7 +25,77 @@ def test_should_prepare_user_inputs_keeps_validation_when_flag_false(): assert WorkflowAppGenerator()._should_prepare_user_inputs(args) -def test_resume_delegates_to_generate(mocker): +def test_generate_includes_parent_trace_context_in_extras(monkeypatch): + generator = WorkflowAppGenerator() + + monkeypatch.setattr( + "core.app.apps.workflow.app_generator.WorkflowAppGenerator._bind_file_access_scope", + lambda *args, **kwargs: contextlib.nullcontext(), + ) + monkeypatch.setattr( + "core.app.apps.workflow.app_generator.WorkflowAppConfigManager.get_app_config", + lambda *args, **kwargs: SimpleNamespace( + app_id="app-1", tenant_id="tenant-1", workflow_id="workflow-1", variables=[] + ), + ) + monkeypatch.setattr( + "core.app.apps.workflow.app_generator.file_factory.build_from_mappings", lambda *args, **kwargs: [] + ) + monkeypatch.setattr("core.app.apps.workflow.app_generator.TraceQueueManager", MagicMock()) + monkeypatch.setattr( + "core.app.apps.workflow.app_generator.DifyCoreRepositoryFactory.create_workflow_execution_repository", + MagicMock(return_value=MagicMock()), + ) + monkeypatch.setattr( + "core.app.apps.workflow.app_generator.DifyCoreRepositoryFactory.create_workflow_node_execution_repository", + MagicMock(return_value=MagicMock()), + ) + monkeypatch.setattr("core.app.apps.workflow.app_generator.db", SimpleNamespace(engine=MagicMock())) + monkeypatch.setattr(generator, "_prepare_user_inputs", lambda *, user_inputs, **kwargs: user_inputs) + + captured = {} + + def fake_workflow_app_generate_entity(**kwargs): + captured["workflow_app_generate_entity_kwargs"] = kwargs + return SimpleNamespace(**kwargs) + + def fake_generate(**kwargs): + captured["application_generate_entity"] = kwargs["application_generate_entity"] + return {"data": {}} + + monkeypatch.setattr( + "core.app.apps.workflow.app_generator.WorkflowAppGenerateEntity", fake_workflow_app_generate_entity + ) + monkeypatch.setattr(generator, "_generate", fake_generate) + + result = generator.generate( + app_model=SimpleNamespace(tenant_id="tenant-1", id="app-1"), + workflow=SimpleNamespace(features_dict={}), + user=SimpleNamespace(id="user-1", session_id="session-1"), + args={ + "inputs": {"query": "hello"}, + "files": [], + "external_trace_id": "trace-1", + "parent_trace_context": { + "parent_workflow_run_id": "outer-workflow-run-1", + "parent_node_execution_id": "outer-node-execution-1", + }, + }, + invoke_from="service-api", + streaming=False, + call_depth=0, + ) + + assert result == {"data": {}} + extras = captured["workflow_app_generate_entity_kwargs"]["extras"] + assert extras["external_trace_id"] == "trace-1" + assert extras["parent_trace_context"].model_dump() == { + "parent_workflow_run_id": "outer-workflow-run-1", + "parent_node_execution_id": "outer-node-execution-1", + } + + +def test_resume_delegates_to_generate(mocker: MockerFixture): generator = WorkflowAppGenerator() mock_generate = mocker.patch.object(generator, "_generate", return_value="ok") @@ -52,7 +125,7 @@ def test_resume_delegates_to_generate(mocker): assert kwargs["invoke_from"] == "debugger" -def test_generate_appends_pause_layer_and_forwards_state(mocker): +def test_generate_appends_pause_layer_and_forwards_state(mocker: MockerFixture): generator = WorkflowAppGenerator() mock_queue_manager = MagicMock() @@ -124,7 +197,7 @@ def test_generate_appends_pause_layer_and_forwards_state(mocker): assert worker_kwargs["kwargs"]["graph_runtime_state"] is graph_runtime_state -def test_resume_path_runs_worker_with_runtime_state(mocker): +def test_resume_path_runs_worker_with_runtime_state(mocker: MockerFixture): generator = WorkflowAppGenerator() runtime_state = MagicMock(name="runtime-state") diff --git a/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_core.py b/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_core.py index 58c7bfa4bc..3949c41eae 100644 --- a/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_core.py +++ b/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_core.py @@ -54,7 +54,7 @@ class TestWorkflowBasedAppRunner: runner = WorkflowBasedAppRunner(queue_manager=SimpleNamespace(), app_id="app") runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=default_system_variables()), + variable_pool=VariablePool.from_bootstrap(system_variables=default_system_variables()), start_at=0.0, ) @@ -90,10 +90,10 @@ class TestWorkflowBasedAppRunner: with pytest.raises(ValueError, match="Neither single_iteration_run nor single_loop_run"): runner._prepare_single_node_execution(workflow, None, None, user_id="00000000-0000-0000-0000-000000000001") - def test_get_graph_and_variable_pool_for_single_node_run(self, monkeypatch): + def test_get_graph_and_variable_pool_for_single_node_run(self, monkeypatch: pytest.MonkeyPatch): runner = WorkflowBasedAppRunner(queue_manager=SimpleNamespace(), app_id="app") graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=default_system_variables()), + variable_pool=VariablePool.from_bootstrap(system_variables=default_system_variables()), start_at=0.0, ) @@ -142,7 +142,9 @@ class TestWorkflowBasedAppRunner: assert graph is not None assert variable_pool is graph_runtime_state.variable_pool - def test_get_graph_and_variable_pool_preloads_constructor_variables_before_graph_init(self, monkeypatch): + def test_get_graph_and_variable_pool_preloads_constructor_variables_before_graph_init( + self, monkeypatch: pytest.MonkeyPatch + ): variable_loader = SimpleNamespace( load_variables=lambda selectors: ( [ @@ -162,7 +164,7 @@ class TestWorkflowBasedAppRunner: app_id="app", ) graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=default_system_variables()), + variable_pool=VariablePool.from_bootstrap(system_variables=default_system_variables()), start_at=0.0, ) @@ -232,7 +234,7 @@ class TestWorkflowBasedAppRunner: assert graph is not None assert variable_pool.get(["sys", "conversation_id"]).value == "conv-1" - def test_handle_graph_run_events_and_pause_notifications(self, monkeypatch): + def test_handle_graph_run_events_and_pause_notifications(self, monkeypatch: pytest.MonkeyPatch): published: list[object] = [] class _QueueManager: @@ -241,7 +243,7 @@ class TestWorkflowBasedAppRunner: runner = WorkflowBasedAppRunner(queue_manager=_QueueManager(), app_id="app") graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=default_system_variables()), + variable_pool=VariablePool.from_bootstrap(system_variables=default_system_variables()), start_at=0.0, ) graph_runtime_state.register_paused_node("node-1") @@ -284,7 +286,7 @@ class TestWorkflowBasedAppRunner: runner = WorkflowBasedAppRunner(queue_manager=_QueueManager(), app_id="app") graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=default_system_variables()), + variable_pool=VariablePool.from_bootstrap(system_variables=default_system_variables()), start_at=0.0, ) workflow_entry = SimpleNamespace(graph_engine=SimpleNamespace(graph_runtime_state=graph_runtime_state)) @@ -423,7 +425,7 @@ class TestWorkflowBasedAppRunner: runner = WorkflowBasedAppRunner(queue_manager=_QueueManager(), app_id="app") graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=default_system_variables()), + variable_pool=VariablePool.from_bootstrap(system_variables=default_system_variables()), start_at=0.0, ) workflow_entry = SimpleNamespace(graph_engine=SimpleNamespace(graph_runtime_state=graph_runtime_state)) diff --git a/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_single_node.py b/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_single_node.py index 620a153204..248fed5388 100644 --- a/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_single_node.py +++ b/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_single_node.py @@ -16,7 +16,7 @@ from models.workflow import Workflow def _make_graph_state(): - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, environment_variables=[], diff --git a/api/tests/unit_tests/core/app/apps/workflow/test_app_generator_extra.py b/api/tests/unit_tests/core/app/apps/workflow/test_app_generator_extra.py index 09ad078a70..320189143e 100644 --- a/api/tests/unit_tests/core/app/apps/workflow/test_app_generator_extra.py +++ b/api/tests/unit_tests/core/app/apps/workflow/test_app_generator_extra.py @@ -67,7 +67,7 @@ class TestWorkflowAppGeneratorValidation: class TestWorkflowAppGeneratorHandleResponse: - def test_handle_response_closed_file_raises_stopped(self, monkeypatch): + def test_handle_response_closed_file_raises_stopped(self, monkeypatch: pytest.MonkeyPatch): generator = WorkflowAppGenerator() app_config = WorkflowUIBasedAppConfig( @@ -116,7 +116,7 @@ class TestWorkflowAppGeneratorHandleResponse: class TestWorkflowAppGeneratorGenerate: - def test_generate_skips_prepare_inputs_when_flag_set(self, monkeypatch): + def test_generate_skips_prepare_inputs_when_flag_set(self, monkeypatch: pytest.MonkeyPatch): generator = WorkflowAppGenerator() app_config = WorkflowUIBasedAppConfig( diff --git a/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py b/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py index 0bcc1029b0..ea21a1cc1a 100644 --- a/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py +++ b/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py @@ -95,7 +95,9 @@ class TestWorkflowGenerateTaskPipeline: def test_to_blocking_response_falls_back_to_human_input_required_when_pause_event_missing(self): pipeline = _make_pipeline() pipeline._graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), + variable_pool=build_test_variable_pool( + variables=build_system_variables(workflow_execution_id="run-id"), + ), start_at=0.0, total_tokens=5, node_run_steps=2, @@ -187,7 +189,7 @@ class TestWorkflowGenerateTaskPipeline: assert isinstance(responses[0], ValueError) - def test_handle_workflow_started_event_sets_run_id(self, monkeypatch): + def test_handle_workflow_started_event_sets_run_id(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._graph_runtime_state = GraphRuntimeState( variable_pool=build_test_variable_pool(variables=build_system_variables(workflow_execution_id="run-id")), @@ -283,7 +285,9 @@ class TestWorkflowGenerateTaskPipeline: pipeline = _make_pipeline() pipeline._workflow_execution_id = "run-id" pipeline._graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id="run-id") + ), start_at=0.0, ) pipeline._workflow_response_converter.workflow_finish_to_stream_response = lambda **kwargs: "finish" @@ -408,7 +412,7 @@ class TestWorkflowGenerateTaskPipeline: assert list(pipeline._handle_human_input_form_timeout_event(timeout_event)) == ["timeout"] assert list(pipeline._handle_agent_log_event(agent_event)) == ["log"] - def test_wrapper_process_stream_response_emits_audio_end(self, monkeypatch): + def test_wrapper_process_stream_response_emits_audio_end(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._workflow_features_dict = { "text_to_speech": {"enabled": True, "autoPlay": "enabled", "voice": "v", "language": "en"} @@ -560,7 +564,7 @@ class TestWorkflowGenerateTaskPipeline: responses = list(pipeline._wrapper_process_stream_response()) assert responses == [PingStreamResponse(task_id="task")] - def test_wrapper_process_stream_response_final_audio_none_then_finish(self, monkeypatch): + def test_wrapper_process_stream_response_final_audio_none_then_finish(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._workflow_features_dict = { "text_to_speech": {"enabled": True, "autoPlay": "enabled", "voice": "v", "language": "en"} @@ -597,7 +601,7 @@ class TestWorkflowGenerateTaskPipeline: assert sleep_spy assert any(isinstance(item, MessageAudioEndStreamResponse) for item in responses) - def test_wrapper_process_stream_response_handles_audio_exception(self, monkeypatch): + def test_wrapper_process_stream_response_handles_audio_exception(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._workflow_features_dict = { "text_to_speech": {"enabled": True, "autoPlay": "enabled", "voice": "v", "language": "en"} @@ -633,7 +637,7 @@ class TestWorkflowGenerateTaskPipeline: assert logger_exception assert any(isinstance(item, MessageAudioEndStreamResponse) for item in responses) - def test_database_session_rolls_back_on_error(self, monkeypatch): + def test_database_session_rolls_back_on_error(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() calls = {"enter": 0, "exit_exc": None} @@ -725,7 +729,9 @@ class TestWorkflowGenerateTaskPipeline: pipeline = _make_pipeline() pipeline._workflow_execution_id = "run-id" pipeline._graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id="run-id") + ), start_at=0.0, ) @@ -753,7 +759,9 @@ class TestWorkflowGenerateTaskPipeline: pipeline = _make_pipeline() pipeline._workflow_execution_id = "run-id" pipeline._graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id="run-id") + ), start_at=0.0, ) pipeline._handle_ping_event = lambda event, **kwargs: iter(["ping"]) @@ -769,7 +777,9 @@ class TestWorkflowGenerateTaskPipeline: def test_process_stream_response_main_match_paths_and_cleanup(self): pipeline = _make_pipeline() pipeline._graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id="run-id") + ), start_at=0.0, ) pipeline._base_task_pipeline.queue_manager.listen = lambda: iter( diff --git a/api/tests/unit_tests/core/app/layers/test_trigger_post_layer.py b/api/tests/unit_tests/core/app/layers/test_trigger_post_layer.py index d3bd15b6f3..320a3bc42c 100644 --- a/api/tests/unit_tests/core/app/layers/test_trigger_post_layer.py +++ b/api/tests/unit_tests/core/app/layers/test_trigger_post_layer.py @@ -21,7 +21,9 @@ class TestTriggerPostLayer: ) runtime_state = SimpleNamespace( outputs={"answer": "ok"}, - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-1")), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id="run-1") + ), total_tokens=12, ) @@ -60,7 +62,9 @@ class TestTriggerPostLayer: def test_on_event_handles_missing_trigger_log(self): runtime_state = SimpleNamespace( outputs={}, - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-1")), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id="run-1") + ), total_tokens=0, ) @@ -91,7 +95,9 @@ class TestTriggerPostLayer: def test_on_event_ignores_non_status_events(self): runtime_state = SimpleNamespace( outputs={}, - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-1")), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id="run-1") + ), total_tokens=0, ) diff --git a/api/tests/unit_tests/core/app/task_pipeline/test_easy_ui_based_generate_task_pipeline_core.py b/api/tests/unit_tests/core/app/task_pipeline/test_easy_ui_based_generate_task_pipeline_core.py index a20d89d807..f10e0084d0 100644 --- a/api/tests/unit_tests/core/app/task_pipeline/test_easy_ui_based_generate_task_pipeline_core.py +++ b/api/tests/unit_tests/core/app/task_pipeline/test_easy_ui_based_generate_task_pipeline_core.py @@ -143,7 +143,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert pipeline._listen_audio_msg(publisher=None, task_id="task") is None - def test_process_stream_response_handles_chunks_and_end(self, monkeypatch): + def test_process_stream_response_handles_chunks_and_end(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) @@ -245,7 +245,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert any(isinstance(event, QueueLLMChunkEvent) for event in events) assert any(isinstance(event, QueueStopEvent) for event in events) - def test_handle_stop_updates_usage(self, monkeypatch): + def test_handle_stop_updates_usage(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) @@ -313,7 +313,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert pipeline._task_state.llm_result.usage.prompt_tokens == 10 assert pipeline._task_state.llm_result.usage.completion_tokens == 5 - def test_record_files_builds_file_payloads(self, monkeypatch): + def test_record_files_builds_file_payloads(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) @@ -405,7 +405,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert files assert len(files) == 3 - def test_process_stream_response_handles_annotation_and_error(self, monkeypatch): + def test_process_stream_response_handles_annotation_and_error(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) @@ -472,7 +472,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert isinstance(responses[-1], ValueError) assert pipeline._task_state.llm_result.message.content == "annotatedagent" - def test_agent_thought_to_stream_response_returns_payload(self, monkeypatch): + def test_agent_thought_to_stream_response_returns_payload(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) @@ -681,7 +681,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert responses == ["payload"] - def test_wrapper_process_stream_response_with_tts_publisher(self, monkeypatch): + def test_wrapper_process_stream_response_with_tts_publisher(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) entity = _make_entity(ChatAppGenerateEntity, AppMode.CHAT) @@ -715,7 +715,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert responses[1] == "payload" assert isinstance(responses[-1], MessageAudioEndStreamResponse) - def test_wrapper_process_stream_response_timeout_yields_audio_chunk(self, monkeypatch): + def test_wrapper_process_stream_response_timeout_yields_audio_chunk(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) entity = _make_entity(ChatAppGenerateEntity, AppMode.CHAT) @@ -756,7 +756,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert any(isinstance(item, MessageAudioStreamResponse) for item in responses) assert isinstance(responses[-1], MessageAudioEndStreamResponse) - def test_process_stream_response_handles_stop_event_and_output_replacement(self, monkeypatch): + def test_process_stream_response_handles_stop_event_and_output_replacement(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) pipeline = EasyUIBasedGenerateTaskPipeline( @@ -896,7 +896,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert list(pipeline._process_stream_response(publisher=None)) == [] - def test_save_message_persists_fields_and_emits_trace(self, monkeypatch): + def test_save_message_persists_fields_and_emits_trace(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) pipeline = EasyUIBasedGenerateTaskPipeline( @@ -981,7 +981,7 @@ class TestEasyUiBasedGenerateTaskPipeline: with pytest.raises(ValueError, match="Conversation conv not found"): pipeline._save_message(session=session) - def test_message_end_to_stream_response_includes_usage_metadata(self, monkeypatch): + def test_message_end_to_stream_response_includes_usage_metadata(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) pipeline = EasyUIBasedGenerateTaskPipeline( @@ -1021,7 +1021,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert response.id == "msg" assert response.metadata["usage"]["prompt_tokens"] == 1 - def test_record_files_returns_none_when_message_has_no_files(self, monkeypatch): + def test_record_files_returns_none_when_message_has_no_files(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) pipeline = EasyUIBasedGenerateTaskPipeline( @@ -1059,7 +1059,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert response.files is None - def test_record_files_handles_local_fallback_and_tool_url_variants(self, monkeypatch): + def test_record_files_handles_local_fallback_and_tool_url_variants(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) pipeline = EasyUIBasedGenerateTaskPipeline( @@ -1155,7 +1155,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert response.id == "msg" assert response.answer == "hello" - def test_agent_thought_to_stream_response_returns_none_when_not_found(self, monkeypatch): + def test_agent_thought_to_stream_response_returns_none_when_not_found(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) pipeline = EasyUIBasedGenerateTaskPipeline( diff --git a/api/tests/unit_tests/core/app/test_llm_quota.py b/api/tests/unit_tests/core/app/test_llm_quota.py new file mode 100644 index 0000000000..d9390a4a8f --- /dev/null +++ b/api/tests/unit_tests/core/app/test_llm_quota.py @@ -0,0 +1,617 @@ +from types import SimpleNamespace +from unittest.mock import MagicMock, patch + +import pytest +from sqlalchemy import create_engine, select + +from configs import dify_config +from core.app.llm.quota import ( + deduct_llm_quota, + deduct_llm_quota_for_model, + ensure_llm_quota_available, + ensure_llm_quota_available_for_model, +) +from core.entities.model_entities import ModelStatus +from core.entities.provider_entities import ProviderQuotaType, QuotaUnit +from core.errors.error import QuotaExceededError +from graphon.model_runtime.entities.llm_entities import LLMUsage +from graphon.model_runtime.entities.model_entities import ModelType +from models import TenantCreditPool +from models.enums import ProviderQuotaType as ModelProviderQuotaType +from models.provider import Provider, ProviderType + + +def test_ensure_llm_quota_available_for_model_raises_when_system_model_is_exhausted() -> None: + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + get_provider_model=MagicMock(return_value=SimpleNamespace(status=ModelStatus.QUOTA_EXCEEDED)), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + pytest.raises(QuotaExceededError, match="Model provider openai quota exceeded."), + ): + ensure_llm_quota_available_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + ) + + provider_configuration.get_provider_model.assert_called_once_with( + model_type=ModelType.LLM, + model="gpt-4o", + ) + + +def test_ensure_llm_quota_available_for_model_raises_when_provider_is_missing() -> None: + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = None + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + pytest.raises(ValueError, match="Provider openai does not exist."), + ): + ensure_llm_quota_available_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + ) + + +def test_ensure_llm_quota_available_for_model_ignores_custom_provider_configuration() -> None: + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.CUSTOM, + get_provider_model=MagicMock(), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + + with patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager): + ensure_llm_quota_available_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + ) + + provider_configuration.get_provider_model.assert_not_called() + + +def test_deduct_llm_quota_for_model_uses_identity_based_trial_billing() -> None: + usage = LLMUsage.empty_usage() + usage.total_tokens = 42 + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=SimpleNamespace( + current_quota_type=ProviderQuotaType.TRIAL, + quota_configurations=[ + SimpleNamespace( + quota_type=ProviderQuotaType.TRIAL, + quota_unit=QuotaUnit.TOKENS, + quota_limit=100, + ) + ], + ), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch("services.credit_pool_service.CreditPoolService.deduct_credits_capped") as mock_deduct_credits, + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + mock_deduct_credits.assert_called_once_with( + tenant_id="tenant-id", + credits_required=42, + ) + + +def test_deduct_llm_quota_for_model_caps_trial_pool_when_usage_exceeds_remaining() -> None: + usage = LLMUsage.empty_usage() + usage.total_tokens = 3 + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=SimpleNamespace( + current_quota_type=ProviderQuotaType.TRIAL, + quota_configurations=[ + SimpleNamespace( + quota_type=ProviderQuotaType.TRIAL, + quota_unit=QuotaUnit.TOKENS, + quota_limit=100, + ) + ], + ), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + engine = create_engine("sqlite:///:memory:") + TenantCreditPool.__table__.create(engine) + with engine.begin() as connection: + connection.execute( + TenantCreditPool.__table__.insert(), + { + "id": "trial-pool", + "tenant_id": "tenant-id", + "pool_type": ModelProviderQuotaType.TRIAL, + "quota_limit": 10, + "quota_used": 9, + }, + ) + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)), + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + with engine.connect() as connection: + quota_used = connection.scalar(select(TenantCreditPool.quota_used).where(TenantCreditPool.id == "trial-pool")) + + assert quota_used == 10 + + +def test_deduct_llm_quota_for_model_returns_for_unbounded_quota() -> None: + usage = LLMUsage.empty_usage() + usage.total_tokens = 42 + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=SimpleNamespace( + current_quota_type=ProviderQuotaType.TRIAL, + quota_configurations=[ + SimpleNamespace( + quota_type=ProviderQuotaType.TRIAL, + quota_unit=QuotaUnit.TOKENS, + quota_limit=-1, + ) + ], + ), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch("services.credit_pool_service.CreditPoolService.deduct_credits_capped") as mock_deduct_credits, + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + mock_deduct_credits.assert_not_called() + + +def test_deduct_llm_quota_for_model_uses_credit_configuration() -> None: + usage = LLMUsage.empty_usage() + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=SimpleNamespace( + current_quota_type=ProviderQuotaType.TRIAL, + quota_configurations=[ + SimpleNamespace( + quota_type=ProviderQuotaType.TRIAL, + quota_unit=QuotaUnit.CREDITS, + quota_limit=100, + ) + ], + ), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch.object(type(dify_config), "get_model_credits", return_value=9) as mock_get_model_credits, + patch("services.credit_pool_service.CreditPoolService.deduct_credits_capped") as mock_deduct_credits, + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + mock_get_model_credits.assert_called_once_with("gpt-4o") + mock_deduct_credits.assert_called_once_with( + tenant_id="tenant-id", + credits_required=9, + ) + + +def test_deduct_llm_quota_for_model_uses_single_charge_for_times_quota() -> None: + usage = LLMUsage.empty_usage() + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=SimpleNamespace( + current_quota_type=ProviderQuotaType.TRIAL, + quota_configurations=[ + SimpleNamespace( + quota_type=ProviderQuotaType.TRIAL, + quota_unit=QuotaUnit.TIMES, + quota_limit=100, + ) + ], + ), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch("services.credit_pool_service.CreditPoolService.deduct_credits_capped") as mock_deduct_credits, + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + mock_deduct_credits.assert_called_once_with( + tenant_id="tenant-id", + credits_required=1, + ) + + +def test_deduct_llm_quota_for_model_uses_paid_billing_pool() -> None: + usage = LLMUsage.empty_usage() + usage.total_tokens = 5 + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=SimpleNamespace( + current_quota_type=ProviderQuotaType.PAID, + quota_configurations=[ + SimpleNamespace( + quota_type=ProviderQuotaType.PAID, + quota_unit=QuotaUnit.TOKENS, + quota_limit=100, + ) + ], + ), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch("services.credit_pool_service.CreditPoolService.deduct_credits_capped") as mock_deduct_credits, + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + mock_deduct_credits.assert_called_once_with( + tenant_id="tenant-id", + credits_required=5, + pool_type="paid", + ) + + +def test_deduct_llm_quota_for_model_updates_free_quota_usage() -> None: + usage = LLMUsage.empty_usage() + usage.total_tokens = 3 + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=SimpleNamespace( + current_quota_type=ProviderQuotaType.FREE, + quota_configurations=[ + SimpleNamespace( + quota_type=ProviderQuotaType.FREE, + quota_unit=QuotaUnit.TOKENS, + quota_limit=100, + ) + ], + ), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + engine = create_engine("sqlite:///:memory:") + Provider.__table__.create(engine) + with engine.begin() as connection: + connection.execute( + Provider.__table__.insert(), + [ + { + "id": "matching-provider", + "tenant_id": "tenant-id", + "provider_name": "openai", + "provider_type": ProviderType.SYSTEM, + "quota_type": ProviderQuotaType.FREE, + "quota_limit": 100, + "quota_used": 10, + "is_valid": True, + }, + { + "id": "other-tenant", + "tenant_id": "other-tenant-id", + "provider_name": "openai", + "provider_type": ProviderType.SYSTEM, + "quota_type": ProviderQuotaType.FREE, + "quota_limit": 100, + "quota_used": 20, + "is_valid": True, + }, + { + "id": "other-provider", + "tenant_id": "tenant-id", + "provider_name": "anthropic", + "provider_type": ProviderType.SYSTEM, + "quota_type": ProviderQuotaType.FREE, + "quota_limit": 100, + "quota_used": 30, + "is_valid": True, + }, + { + "id": "custom-provider", + "tenant_id": "tenant-id", + "provider_name": "openai", + "provider_type": ProviderType.CUSTOM, + "quota_type": ProviderQuotaType.FREE, + "quota_limit": 100, + "quota_used": 40, + "is_valid": True, + }, + ], + ) + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch("core.app.llm.quota.db", SimpleNamespace(engine=engine)), + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + with engine.connect() as connection: + quota_used_by_id = dict(connection.execute(select(Provider.id, Provider.quota_used)).all()) + + assert quota_used_by_id == { + "matching-provider": 13, + "other-tenant": 20, + "other-provider": 30, + "custom-provider": 40, + } + + with engine.begin() as connection: + connection.execute( + Provider.__table__.update().where(Provider.id == "matching-provider").values(quota_limit=13, quota_used=13) + ) + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch("core.app.llm.quota.db", SimpleNamespace(engine=engine)), + pytest.raises(QuotaExceededError, match="Model provider openai quota exceeded."), + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + with engine.connect() as connection: + exhausted_quota_used = connection.scalar(select(Provider.quota_used).where(Provider.id == "matching-provider")) + + assert exhausted_quota_used == 13 + + +def test_deduct_llm_quota_for_model_caps_free_quota_and_raises_when_usage_exceeds_remaining() -> None: + usage = LLMUsage.empty_usage() + usage.total_tokens = 3 + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=SimpleNamespace( + current_quota_type=ProviderQuotaType.FREE, + quota_configurations=[ + SimpleNamespace( + quota_type=ProviderQuotaType.FREE, + quota_unit=QuotaUnit.TOKENS, + quota_limit=100, + ) + ], + ), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + engine = create_engine("sqlite:///:memory:") + Provider.__table__.create(engine) + with engine.begin() as connection: + connection.execute( + Provider.__table__.insert(), + { + "id": "matching-provider", + "tenant_id": "tenant-id", + "provider_name": "openai", + "provider_type": ProviderType.SYSTEM, + "quota_type": ProviderQuotaType.FREE, + "quota_limit": 15, + "quota_used": 13, + "is_valid": True, + }, + ) + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch("core.app.llm.quota.db", SimpleNamespace(engine=engine)), + pytest.raises(QuotaExceededError, match="Model provider openai quota exceeded."), + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + with engine.connect() as connection: + quota_used = connection.scalar(select(Provider.quota_used).where(Provider.id == "matching-provider")) + + assert quota_used == 15 + + +def test_deduct_llm_quota_for_model_ignores_unknown_quota_type() -> None: + usage = LLMUsage.empty_usage() + usage.total_tokens = 2 + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=SimpleNamespace( + current_quota_type="unexpected", + quota_configurations=[ + SimpleNamespace( + quota_type="unexpected", + quota_unit=QuotaUnit.TOKENS, + quota_limit=100, + ) + ], + ), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch("services.credit_pool_service.CreditPoolService.deduct_credits_capped") as mock_deduct_credits, + patch("core.app.llm.quota.sessionmaker") as mock_sessionmaker, + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + mock_deduct_credits.assert_not_called() + mock_sessionmaker.assert_not_called() + + +def test_deduct_llm_quota_for_model_ignores_custom_provider_configuration() -> None: + usage = LLMUsage.empty_usage() + usage.total_tokens = 2 + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.CUSTOM, + system_configuration=SimpleNamespace( + current_quota_type=ProviderQuotaType.TRIAL, + quota_configurations=[], + ), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch("services.credit_pool_service.CreditPoolService.deduct_credits_capped") as mock_deduct_credits, + patch("core.app.llm.quota.sessionmaker") as mock_sessionmaker, + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + mock_deduct_credits.assert_not_called() + mock_sessionmaker.assert_not_called() + + +def test_ensure_llm_quota_available_wrapper_warns_and_delegates() -> None: + model_instance = SimpleNamespace( + provider="openai", + model_name="gpt-4o", + provider_model_bundle=SimpleNamespace(configuration=SimpleNamespace(tenant_id="tenant-id")), + model_type_instance=SimpleNamespace(model_type=ModelType.LLM), + ) + + with ( + pytest.deprecated_call(match="ensure_llm_quota_available\\(model_instance=.*deprecated"), + patch("core.app.llm.quota.ensure_llm_quota_available_for_model") as mock_ensure, + ): + ensure_llm_quota_available(model_instance=model_instance) + + mock_ensure.assert_called_once_with( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + ) + + +def test_ensure_llm_quota_available_wrapper_rejects_non_llm_model_instances() -> None: + model_instance = SimpleNamespace( + provider="openai", + model_name="gpt-4o", + provider_model_bundle=SimpleNamespace(configuration=SimpleNamespace(tenant_id="tenant-id")), + model_type_instance=SimpleNamespace(model_type=ModelType.TEXT_EMBEDDING), + ) + + with ( + pytest.deprecated_call(match="ensure_llm_quota_available\\(model_instance=.*deprecated"), + pytest.raises(ValueError, match="only support LLM model instances"), + ): + ensure_llm_quota_available(model_instance=model_instance) + + +def test_deduct_llm_quota_wrapper_warns_and_delegates() -> None: + usage = LLMUsage.empty_usage() + usage.total_tokens = 7 + model_instance = SimpleNamespace( + provider="openai", + model_name="gpt-4o", + model_type_instance=SimpleNamespace(model_type=ModelType.LLM), + provider_model_bundle=SimpleNamespace(configuration=SimpleNamespace()), + ) + + with ( + pytest.deprecated_call(match="deduct_llm_quota\\(tenant_id=.*deprecated"), + patch("core.app.llm.quota.deduct_llm_quota_for_model") as mock_deduct, + ): + deduct_llm_quota( + tenant_id="tenant-id", + model_instance=model_instance, + usage=usage, + ) + + mock_deduct.assert_called_once_with( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + +def test_deduct_llm_quota_wrapper_rejects_non_llm_model_instances() -> None: + usage = LLMUsage.empty_usage() + model_instance = SimpleNamespace( + provider="openai", + model_name="gpt-4o", + model_type_instance=SimpleNamespace(model_type=ModelType.TEXT_EMBEDDING), + provider_model_bundle=SimpleNamespace(configuration=SimpleNamespace()), + ) + + with ( + pytest.deprecated_call(match="deduct_llm_quota\\(tenant_id=.*deprecated"), + pytest.raises(ValueError, match="only support LLM model instances"), + ): + deduct_llm_quota( + tenant_id="tenant-id", + model_instance=model_instance, + usage=usage, + ) diff --git a/api/tests/unit_tests/core/app/workflow/test_node_factory.py b/api/tests/unit_tests/core/app/workflow/test_node_factory.py index 30a068f4c5..addce649d5 100644 --- a/api/tests/unit_tests/core/app/workflow/test_node_factory.py +++ b/api/tests/unit_tests/core/app/workflow/test_node_factory.py @@ -8,9 +8,9 @@ from graphon.enums import BuiltinNodeTypes class DummyNode: - def __init__(self, *, node_id, config, graph_init_params, graph_runtime_state, **kwargs): + def __init__(self, *, node_id, data, graph_init_params, graph_runtime_state, **kwargs): self.id = node_id - self.config = config + self.data = data self.graph_init_params = graph_init_params self.graph_runtime_state = graph_runtime_state self.kwargs = kwargs @@ -46,7 +46,7 @@ class TestDifyNodeFactory: lambda **_kwargs: node_class, ) - def _factory(self, monkeypatch): + def _factory(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr("core.workflow.node_factory.dify_config.CODE_MAX_STRING_LENGTH", 10) monkeypatch.setattr("core.workflow.node_factory.dify_config.CODE_MAX_NUMBER", 10) monkeypatch.setattr("core.workflow.node_factory.dify_config.CODE_MIN_NUMBER", -10) @@ -72,20 +72,20 @@ class TestDifyNodeFactory: graph_runtime_state=SimpleNamespace(), ) - def test_create_node_unknown_type(self, monkeypatch): + def test_create_node_unknown_type(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) with pytest.raises(ValueError): factory.create_node({"id": "node-1", "data": {"type": "unknown"}}) - def test_create_node_missing_mapping(self, monkeypatch): + def test_create_node_missing_mapping(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) monkeypatch.setattr("core.workflow.node_factory.get_node_type_classes_mapping", lambda: {}) with pytest.raises(ValueError): factory.create_node({"id": "node-1", "data": {"type": BuiltinNodeTypes.START}}) - def test_create_node_missing_latest_class(self, monkeypatch): + def test_create_node_missing_latest_class(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) monkeypatch.setattr( "core.workflow.node_factory.get_node_type_classes_mapping", @@ -96,7 +96,7 @@ class TestDifyNodeFactory: with pytest.raises(ValueError): factory.create_node({"id": "node-1", "data": {"type": BuiltinNodeTypes.START}}) - def test_create_node_selects_versioned_class(self, monkeypatch): + def test_create_node_selects_versioned_class(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) selected_versions: list[tuple[str, str]] = [] @@ -115,7 +115,7 @@ class TestDifyNodeFactory: assert node.id == "node-1" assert selected_versions == [("snapshot", "called")] - def test_create_node_code_branch(self, monkeypatch): + def test_create_node_code_branch(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) self._stub_node_resolution(monkeypatch, DummyCodeNode) @@ -124,7 +124,7 @@ class TestDifyNodeFactory: assert isinstance(node, DummyCodeNode) assert node.id == "node-1" - def test_create_node_template_transform_branch(self, monkeypatch): + def test_create_node_template_transform_branch(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) self._stub_node_resolution(monkeypatch, DummyTemplateTransformNode) @@ -133,7 +133,7 @@ class TestDifyNodeFactory: assert isinstance(node, DummyTemplateTransformNode) assert "jinja2_template_renderer" in node.kwargs - def test_create_node_http_request_branch(self, monkeypatch): + def test_create_node_http_request_branch(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) self._stub_node_resolution(monkeypatch, DummyHttpRequestNode) @@ -142,7 +142,7 @@ class TestDifyNodeFactory: assert isinstance(node, DummyHttpRequestNode) assert "http_request_config" in node.kwargs - def test_create_node_knowledge_retrieval_branch(self, monkeypatch): + def test_create_node_knowledge_retrieval_branch(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) self._stub_node_resolution(monkeypatch, DummyKnowledgeRetrievalNode) @@ -151,7 +151,7 @@ class TestDifyNodeFactory: assert isinstance(node, DummyKnowledgeRetrievalNode) assert node.kwargs == {} - def test_create_node_document_extractor_branch(self, monkeypatch): + def test_create_node_document_extractor_branch(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) self._stub_node_resolution(monkeypatch, DummyDocumentExtractorNode) diff --git a/api/tests/unit_tests/core/app/workflow/test_observability_layer_extra.py b/api/tests/unit_tests/core/app/workflow/test_observability_layer_extra.py index 82552470a9..04ce524904 100644 --- a/api/tests/unit_tests/core/app/workflow/test_observability_layer_extra.py +++ b/api/tests/unit_tests/core/app/workflow/test_observability_layer_extra.py @@ -2,12 +2,14 @@ from __future__ import annotations from types import SimpleNamespace +import pytest + from core.app.workflow.layers.observability import ObservabilityLayer from graphon.enums import BuiltinNodeTypes class TestObservabilityLayerExtras: - def test_init_tracer_enabled_sets_tracer(self, monkeypatch): + def test_init_tracer_enabled_sets_tracer(self, monkeypatch: pytest.MonkeyPatch): tracer = object() monkeypatch.setattr("core.app.workflow.layers.observability.dify_config.ENABLE_OTEL", True) monkeypatch.setattr("core.app.workflow.layers.observability.is_instrument_flag_enabled", lambda: False) @@ -18,7 +20,7 @@ class TestObservabilityLayerExtras: assert layer._is_disabled is False assert layer._tracer is tracer - def test_init_tracer_disables_when_get_tracer_fails(self, monkeypatch, caplog): + def test_init_tracer_disables_when_get_tracer_fails(self, monkeypatch: pytest.MonkeyPatch, caplog): monkeypatch.setattr("core.app.workflow.layers.observability.dify_config.ENABLE_OTEL", True) monkeypatch.setattr("core.app.workflow.layers.observability.is_instrument_flag_enabled", lambda: False) @@ -33,7 +35,7 @@ class TestObservabilityLayerExtras: assert layer._tracer is None assert "Failed to get OpenTelemetry tracer" in caplog.text - def test_init_tracer_disables_when_otel_disabled(self, monkeypatch): + def test_init_tracer_disables_when_otel_disabled(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr("core.app.workflow.layers.observability.dify_config.ENABLE_OTEL", False) monkeypatch.setattr("core.app.workflow.layers.observability.is_instrument_flag_enabled", lambda: False) @@ -143,7 +145,7 @@ class TestObservabilityLayerExtras: assert layer._node_contexts == {} - def test_on_node_run_end_calls_span_end(self, monkeypatch): + def test_on_node_run_end_calls_span_end(self, monkeypatch: pytest.MonkeyPatch): layer = ObservabilityLayer() layer._is_disabled = False ended: list[str] = [] @@ -164,7 +166,7 @@ class TestObservabilityLayerExtras: assert ended == ["ended"] assert "exec" not in layer._node_contexts - def test_on_node_run_end_logs_detach_failure(self, monkeypatch, caplog): + def test_on_node_run_end_logs_detach_failure(self, monkeypatch: pytest.MonkeyPatch, caplog): layer = ObservabilityLayer() layer._is_disabled = False @@ -186,7 +188,7 @@ class TestObservabilityLayerExtras: assert "Failed to detach OpenTelemetry token" in caplog.text assert "exec" not in layer._node_contexts - def test_on_node_run_start_and_end_creates_span(self, monkeypatch): + def test_on_node_run_start_and_end_creates_span(self, monkeypatch: pytest.MonkeyPatch): layer = ObservabilityLayer() layer._is_disabled = False diff --git a/api/tests/unit_tests/core/app/workflow/test_persistence_layer.py b/api/tests/unit_tests/core/app/workflow/test_persistence_layer.py index cacb4dd4fa..9cefa97bef 100644 --- a/api/tests/unit_tests/core/app/workflow/test_persistence_layer.py +++ b/api/tests/unit_tests/core/app/workflow/test_persistence_layer.py @@ -7,6 +7,7 @@ import pytest from core.app.entities.app_invoke_entities import WorkflowAppGenerateEntity from core.app.workflow.layers.persistence import PersistenceWorkflowInfo, WorkflowPersistenceLayer +from core.ops.ops_trace_manager import TraceTask, TraceTaskName from core.workflow.system_variables import SystemVariableKey, build_system_variables from graphon.entities import WorkflowNodeExecution from graphon.entities.pause_reason import SchedulingPause @@ -60,7 +61,10 @@ def _make_layer( workflow_execution_id="run-id", conversation_id="conv-id", ) - runtime_state = GraphRuntimeState(variable_pool=VariablePool(system_variables=system_variables), start_at=0.0) + runtime_state = GraphRuntimeState( + variable_pool=VariablePool.from_bootstrap(system_variables=system_variables), + start_at=0.0, + ) read_only_state = ReadOnlyGraphRuntimeStateWrapper(runtime_state) application_generate_entity = WorkflowAppGenerateEntity.model_construct( @@ -120,7 +124,7 @@ class TestWorkflowPersistenceLayer: with pytest.raises(ValueError, match="workflow_execution_id must be provided"): layer._get_execution_id() - def test_prepare_workflow_inputs_excludes_conversation_id(self, monkeypatch): + def test_prepare_workflow_inputs_excludes_conversation_id(self, monkeypatch: pytest.MonkeyPatch): layer, _, _, _ = _make_layer() monkeypatch.setattr( @@ -214,6 +218,59 @@ class TestWorkflowPersistenceLayer: assert exec_repo.saved[-1].status == WorkflowExecutionStatus.FAILED assert trace_tasks + def test_handle_graph_run_succeeded_enqueues_parent_trace_context(self, monkeypatch): + trace_tasks: list[TraceTask] = [] + trace_manager = SimpleNamespace(user_id="user", add_trace_task=lambda task: trace_tasks.append(task)) + layer, _, _, _ = _make_layer( + extras={ + "external_trace_id": "trace", + "parent_trace_context": { + "parent_workflow_run_id": "outer-workflow-run-1", + "parent_node_execution_id": "outer-node-execution-1", + }, + }, + trace_manager=trace_manager, + ) + layer._handle_graph_run_started() + + captured: dict[str, object] = {} + + def fake_workflow_trace( + self: TraceTask, + *, + workflow_run_id: str | None, + conversation_id: str | None, + user_id: str | None, + total_tokens_override: int | None = None, + ): + captured["trace_type"] = self.trace_type + captured["external_trace_id"] = self.kwargs.get("external_trace_id") + captured["parent_trace_context"] = self.kwargs.get("parent_trace_context") + captured["workflow_run_id"] = workflow_run_id + return {"ok": True} + + monkeypatch.setattr(TraceTask, "workflow_trace", fake_workflow_trace) + + layer._handle_graph_run_succeeded(GraphRunSucceededEvent(outputs={"ok": True})) + + assert trace_tasks + trace_task = trace_tasks[0] + assert trace_task.trace_type == TraceTaskName.WORKFLOW_TRACE + assert trace_task.kwargs["external_trace_id"] == "trace" + assert trace_task.kwargs["parent_trace_context"] == { + "parent_workflow_run_id": "outer-workflow-run-1", + "parent_node_execution_id": "outer-node-execution-1", + } + + trace_task.execute() + + assert captured["trace_type"] == TraceTaskName.WORKFLOW_TRACE + assert captured["external_trace_id"] == "trace" + assert captured["parent_trace_context"] == { + "parent_workflow_run_id": "outer-workflow-run-1", + "parent_node_execution_id": "outer-node-execution-1", + } + def test_handle_graph_run_aborted_sets_status(self): layer, exec_repo, _, _ = _make_layer() layer._handle_graph_run_started() diff --git a/api/tests/unit_tests/core/base/test_app_generator_tts_publisher.py b/api/tests/unit_tests/core/base/test_app_generator_tts_publisher.py index 7b433ab57b..1125ce6dbc 100644 --- a/api/tests/unit_tests/core/base/test_app_generator_tts_publisher.py +++ b/api/tests/unit_tests/core/base/test_app_generator_tts_publisher.py @@ -3,6 +3,7 @@ import queue from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.base.tts.app_generator_tts_publisher import ( AppGeneratorTTSPublisher, @@ -17,7 +18,7 @@ from core.base.tts.app_generator_tts_publisher import ( @pytest.fixture -def mock_model_instance(mocker): +def mock_model_instance(mocker: MockerFixture): model = mocker.MagicMock() model.invoke_tts.return_value = [b"audio1", b"audio2"] model.get_tts_voices.return_value = [{"value": "voice1"}, {"value": "voice2"}] @@ -33,7 +34,7 @@ def mock_model_manager(mocker, mock_model_instance): @pytest.fixture(autouse=True) -def patch_threads(mocker): +def patch_threads(mocker: MockerFixture): """Prevent real threads from starting during tests""" mocker.patch("threading.Thread.start", return_value=None) @@ -114,7 +115,7 @@ class TestProcessFuture: finish = audio_queue.get() assert finish.status == "finish" - def test_process_future_exception(self, mocker): + def test_process_future_exception(self, mocker: MockerFixture): future_queue = queue.Queue() audio_queue = queue.Queue() @@ -222,7 +223,7 @@ class TestAppGeneratorTTSPublisher: publisher.executor.submit.assert_not_called() - def test_runtime_sentence_threshold_triggers_submit(self, mock_model_manager, mocker): + def test_runtime_sentence_threshold_triggers_submit(self, mock_model_manager, mocker: MockerFixture): publisher = AppGeneratorTTSPublisher("tenant", "voice1") publisher.executor = MagicMock() @@ -297,7 +298,7 @@ class TestAppGeneratorTTSPublisher: publisher.executor.submit.assert_not_called() - def test_runtime_handles_agent_message_event_list_content(self, mock_model_manager, mocker): + def test_runtime_handles_agent_message_event_list_content(self, mock_model_manager, mocker: MockerFixture): publisher = AppGeneratorTTSPublisher("tenant", "voice1") publisher.executor = MagicMock() @@ -332,7 +333,7 @@ class TestAppGeneratorTTSPublisher: assert publisher.msg_text == "Hello " - def test_runtime_handles_agent_message_event_empty_content(self, mock_model_manager, mocker): + def test_runtime_handles_agent_message_event_empty_content(self, mock_model_manager, mocker: MockerFixture): publisher = AppGeneratorTTSPublisher("tenant", "voice1") publisher.executor = MagicMock() @@ -358,7 +359,7 @@ class TestAppGeneratorTTSPublisher: assert publisher.msg_text == "" - def test_runtime_resets_msg_text_when_text_tmp_not_str(self, mock_model_manager, mocker): + def test_runtime_resets_msg_text_when_text_tmp_not_str(self, mock_model_manager, mocker: MockerFixture): publisher = AppGeneratorTTSPublisher("tenant", "voice1") publisher.executor = MagicMock() diff --git a/api/tests/unit_tests/core/callback_handler/test_agent_tool_callback_handler.py b/api/tests/unit_tests/core/callback_handler/test_agent_tool_callback_handler.py index 4c1aa33540..f9b3b1864e 100644 --- a/api/tests/unit_tests/core/callback_handler/test_agent_tool_callback_handler.py +++ b/api/tests/unit_tests/core/callback_handler/test_agent_tool_callback_handler.py @@ -1,8 +1,10 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture import core.callback_handler.agent_tool_callback_handler as module +from core.callback_handler.agent_tool_callback_handler import DifyAgentCallbackHandler # ----------------------------- # Fixtures @@ -10,17 +12,17 @@ import core.callback_handler.agent_tool_callback_handler as module @pytest.fixture -def enable_debug(mocker): +def enable_debug(mocker: MockerFixture): mocker.patch.object(module.dify_config, "DEBUG", True) @pytest.fixture -def disable_debug(mocker): +def disable_debug(mocker: MockerFixture): mocker.patch.object(module.dify_config, "DEBUG", False) @pytest.fixture -def mock_print(mocker): +def mock_print(mocker: MockerFixture): return mocker.patch("builtins.print") @@ -71,7 +73,7 @@ class TestPrintText: module.print_text("hello") mock_print.assert_called_once_with("hello", end="", file=None) - def test_print_text_with_color(self, mocker, mock_print): + def test_print_text_with_color(self, mocker: MockerFixture, mock_print): mock_get_color = mocker.patch( "core.callback_handler.agent_tool_callback_handler.get_colored_text", return_value="colored_text", @@ -82,7 +84,7 @@ class TestPrintText: mock_get_color.assert_called_once_with("hello", "green") mock_print.assert_called_once_with("colored_text", end="", file=None) - def test_print_text_with_file_flush(self, mocker): + def test_print_text_with_file_flush(self, mocker: MockerFixture): mock_file = MagicMock() mock_print = mocker.patch("builtins.print") @@ -107,21 +109,25 @@ class TestDifyAgentCallbackHandler: assert handler.color == "green" assert handler.current_loop == 1 - def test_on_tool_start_debug_enabled(self, handler, enable_debug, mocker): + def test_on_tool_start_debug_enabled(self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_tool_start("tool1", {"a": 1}) mock_print_text.assert_called() - def test_on_tool_start_debug_disabled(self, handler, disable_debug, mocker): + def test_on_tool_start_debug_disabled( + self, handler: DifyAgentCallbackHandler, disable_debug, mocker: MockerFixture + ): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_tool_start("tool1", {"a": 1}) mock_print_text.assert_not_called() - def test_on_tool_end_debug_enabled_and_trace(self, handler, enable_debug, mocker): + def test_on_tool_end_debug_enabled_and_trace( + self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture + ): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") mock_trace_manager = MagicMock() @@ -137,7 +143,9 @@ class TestDifyAgentCallbackHandler: assert mock_print_text.call_count >= 1 mock_trace_manager.add_trace_task.assert_called_once() - def test_on_tool_end_without_trace_manager(self, handler, enable_debug, mocker): + def test_on_tool_end_without_trace_manager( + self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture + ): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_tool_end( @@ -148,14 +156,16 @@ class TestDifyAgentCallbackHandler: assert mock_print_text.call_count >= 1 - def test_on_tool_error_debug_enabled(self, handler, enable_debug, mocker): + def test_on_tool_error_debug_enabled(self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_tool_error(Exception("error")) mock_print_text.assert_called_once() - def test_on_tool_error_debug_disabled(self, handler, disable_debug, mocker): + def test_on_tool_error_debug_disabled( + self, handler: DifyAgentCallbackHandler, disable_debug, mocker: MockerFixture + ): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_tool_error(Exception("error")) @@ -163,14 +173,16 @@ class TestDifyAgentCallbackHandler: mock_print_text.assert_not_called() @pytest.mark.parametrize("thought", ["thinking", ""]) - def test_on_agent_start(self, handler, enable_debug, mocker, thought): + def test_on_agent_start(self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture, thought): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_agent_start(thought) mock_print_text.assert_called() - def test_on_agent_finish_increments_loop(self, handler, enable_debug, mocker): + def test_on_agent_finish_increments_loop( + self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture + ): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") current_loop = handler.current_loop @@ -179,19 +191,21 @@ class TestDifyAgentCallbackHandler: assert handler.current_loop == current_loop + 1 mock_print_text.assert_called() - def test_on_datasource_start_debug_enabled(self, handler, enable_debug, mocker): + def test_on_datasource_start_debug_enabled( + self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture + ): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_datasource_start("ds1", {"x": 1}) mock_print_text.assert_called_once() - def test_ignore_agent_property(self, disable_debug, handler): + def test_ignore_agent_property(self, disable_debug, handler: DifyAgentCallbackHandler): assert handler.ignore_agent is True - def test_ignore_chat_model_property(self, disable_debug, handler): + def test_ignore_chat_model_property(self, disable_debug, handler: DifyAgentCallbackHandler): assert handler.ignore_chat_model is True - def test_ignore_properties_when_debug_enabled(self, enable_debug, handler): + def test_ignore_properties_when_debug_enabled(self, enable_debug, handler: DifyAgentCallbackHandler): assert handler.ignore_agent is False assert handler.ignore_chat_model is False diff --git a/api/tests/unit_tests/core/callback_handler/test_index_tool_callback_handler.py b/api/tests/unit_tests/core/callback_handler/test_index_tool_callback_handler.py index 8e5670e9be..f23669c3c7 100644 --- a/api/tests/unit_tests/core/callback_handler/test_index_tool_callback_handler.py +++ b/api/tests/unit_tests/core/callback_handler/test_index_tool_callback_handler.py @@ -1,4 +1,5 @@ import pytest +from pytest_mock import MockerFixture from core.app.entities.app_invoke_entities import InvokeFrom from core.callback_handler.index_tool_callback_handler import ( @@ -7,12 +8,12 @@ from core.callback_handler.index_tool_callback_handler import ( @pytest.fixture -def mock_queue_manager(mocker): +def mock_queue_manager(mocker: MockerFixture): return mocker.Mock() @pytest.fixture -def handler(mock_queue_manager, mocker): +def handler(mock_queue_manager, mocker: MockerFixture): mocker.patch( "core.callback_handler.index_tool_callback_handler.db", ) @@ -34,7 +35,7 @@ class TestOnQuery: (InvokeFrom.WEB_APP, "end_user"), ], ) - def test_on_query_success_roles(self, mocker, mock_queue_manager, invoke_from, expected_role): + def test_on_query_success_roles(self, mocker: MockerFixture, mock_queue_manager, invoke_from, expected_role): # Arrange mock_db = mocker.patch("core.callback_handler.index_tool_callback_handler.db") @@ -57,7 +58,7 @@ class TestOnQuery: assert dataset_query.created_by_role == expected_role mock_db.session.commit.assert_called_once() - def test_on_query_none_values(self, mocker, mock_queue_manager): + def test_on_query_none_values(self, mocker: MockerFixture, mock_queue_manager): mock_db = mocker.patch("core.callback_handler.index_tool_callback_handler.db") handler = DatasetIndexToolCallbackHandler( @@ -75,7 +76,7 @@ class TestOnQuery: class TestOnToolEnd: - def test_on_tool_end_no_metadata(self, handler, mocker): + def test_on_tool_end_no_metadata(self, handler: DatasetIndexToolCallbackHandler, mocker: MockerFixture): mock_db = mocker.patch("core.callback_handler.index_tool_callback_handler.db") document = mocker.Mock() @@ -85,7 +86,9 @@ class TestOnToolEnd: mock_db.session.commit.assert_not_called() - def test_on_tool_end_dataset_document_not_found(self, handler, mocker): + def test_on_tool_end_dataset_document_not_found( + self, handler: DatasetIndexToolCallbackHandler, mocker: MockerFixture + ): mock_db = mocker.patch("core.callback_handler.index_tool_callback_handler.db") mock_db.session.scalar.return_value = None @@ -96,7 +99,9 @@ class TestOnToolEnd: mock_db.session.scalar.assert_called_once() - def test_on_tool_end_parent_child_index_with_child(self, handler, mocker): + def test_on_tool_end_parent_child_index_with_child( + self, handler: DatasetIndexToolCallbackHandler, mocker: MockerFixture + ): mock_db = mocker.patch("core.callback_handler.index_tool_callback_handler.db") mock_dataset_doc = mocker.Mock() @@ -119,7 +124,7 @@ class TestOnToolEnd: mock_db.session.execute.assert_called_once() mock_db.session.commit.assert_called_once() - def test_on_tool_end_non_parent_child_index(self, handler, mocker): + def test_on_tool_end_non_parent_child_index(self, handler: DatasetIndexToolCallbackHandler, mocker: MockerFixture): mock_db = mocker.patch("core.callback_handler.index_tool_callback_handler.db") mock_dataset_doc = mocker.Mock() @@ -139,12 +144,12 @@ class TestOnToolEnd: mock_db.session.execute.assert_called_once() mock_db.session.commit.assert_called_once() - def test_on_tool_end_empty_documents(self, handler): + def test_on_tool_end_empty_documents(self, handler: DatasetIndexToolCallbackHandler): handler.on_tool_end([]) class TestReturnRetrieverResourceInfo: - def test_publish_called(self, handler, mock_queue_manager, mocker): + def test_publish_called(self, handler: DatasetIndexToolCallbackHandler, mock_queue_manager, mocker: MockerFixture): mock_event = mocker.patch("core.callback_handler.index_tool_callback_handler.QueueRetrieverResourcesEvent") resources = [mocker.Mock()] diff --git a/api/tests/unit_tests/core/callback_handler/test_workflow_tool_callback_handler.py b/api/tests/unit_tests/core/callback_handler/test_workflow_tool_callback_handler.py index 131fb006ed..5b53c5965c 100644 --- a/api/tests/unit_tests/core/callback_handler/test_workflow_tool_callback_handler.py +++ b/api/tests/unit_tests/core/callback_handler/test_workflow_tool_callback_handler.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock, call import pytest +from pytest_mock import MockerFixture from core.callback_handler.workflow_tool_callback_handler import ( DifyWorkflowCallbackHandler, @@ -26,13 +27,13 @@ def handler(): @pytest.fixture -def mock_print_text(mocker): +def mock_print_text(mocker: MockerFixture): """Mock print_text to avoid real stdout printing.""" return mocker.patch("core.callback_handler.workflow_tool_callback_handler.print_text") class TestDifyWorkflowCallbackHandler: - def test_on_tool_execution_single_output_success(self, handler, mock_print_text): + def test_on_tool_execution_single_output_success(self, handler: DifyWorkflowCallbackHandler, mock_print_text): # Arrange tool_name = "test_tool" tool_inputs = {"a": 1} @@ -62,7 +63,7 @@ class TestDifyWorkflowCallbackHandler: ] ) - def test_on_tool_execution_multiple_outputs(self, handler, mock_print_text): + def test_on_tool_execution_multiple_outputs(self, handler: DifyWorkflowCallbackHandler, mock_print_text): # Arrange tool_name = "multi_tool" outputs = [ @@ -83,7 +84,7 @@ class TestDifyWorkflowCallbackHandler: assert results == outputs assert mock_print_text.call_count == 4 * len(outputs) - def test_on_tool_execution_empty_iterable(self, handler, mock_print_text): + def test_on_tool_execution_empty_iterable(self, handler: DifyWorkflowCallbackHandler, mock_print_text): # Arrange tool_name = "empty_tool" @@ -108,7 +109,9 @@ class TestDifyWorkflowCallbackHandler: ("not_iterable", AttributeError), ], ) - def test_on_tool_execution_invalid_outputs_type(self, handler, invalid_outputs, expected_exception): + def test_on_tool_execution_invalid_outputs_type( + self, handler: DifyWorkflowCallbackHandler, invalid_outputs, expected_exception + ): # Arrange tool_name = "invalid_tool" @@ -122,7 +125,7 @@ class TestDifyWorkflowCallbackHandler: ) ) - def test_on_tool_execution_long_json_truncation(self, handler, mock_print_text): + def test_on_tool_execution_long_json_truncation(self, handler: DifyWorkflowCallbackHandler, mock_print_text): # Arrange tool_name = "long_json_tool" long_json = "x" * 1500 @@ -144,7 +147,7 @@ class TestDifyWorkflowCallbackHandler: color="blue", ) - def test_on_tool_execution_model_dump_json_exception(self, handler, mock_print_text): + def test_on_tool_execution_model_dump_json_exception(self, handler: DifyWorkflowCallbackHandler, mock_print_text): # Arrange tool_name = "exception_tool" bad_message = MagicMock() @@ -163,7 +166,9 @@ class TestDifyWorkflowCallbackHandler: # Ensure first two prints happened before failure assert mock_print_text.call_count >= 2 - def test_on_tool_execution_none_message_id_and_trace_manager(self, handler, mock_print_text): + def test_on_tool_execution_none_message_id_and_trace_manager( + self, handler: DifyWorkflowCallbackHandler, mock_print_text + ): # Arrange tool_name = "optional_params_tool" message = DummyToolInvokeMessage('{"data": "ok"}') diff --git a/api/tests/unit_tests/core/datasource/test_datasource_manager.py b/api/tests/unit_tests/core/datasource/test_datasource_manager.py index deeac49bbc..8842d678c7 100644 --- a/api/tests/unit_tests/core/datasource/test_datasource_manager.py +++ b/api/tests/unit_tests/core/datasource/test_datasource_manager.py @@ -2,6 +2,7 @@ import types from collections.abc import Generator import pytest +from pytest_mock import MockerFixture from contexts.wrapper import RecyclableContextVar from core.datasource.datasource_manager import DatasourceManager @@ -37,7 +38,7 @@ def _invalidate_recyclable_contextvars() -> None: RecyclableContextVar.increment_thread_recycles() -def test_get_icon_url_calls_runtime(mocker): +def test_get_icon_url_calls_runtime(mocker: MockerFixture): fake_runtime = mocker.Mock() fake_runtime.get_icon_url.return_value = "https://icon" mocker.patch.object(DatasourceManager, "get_datasource_runtime", return_value=fake_runtime) @@ -52,7 +53,7 @@ def test_get_icon_url_calls_runtime(mocker): DatasourceManager.get_datasource_runtime.assert_called_once() -def test_get_datasource_runtime_delegates_to_provider_controller(mocker): +def test_get_datasource_runtime_delegates_to_provider_controller(mocker: MockerFixture): provider_controller = mocker.Mock() provider_controller.get_datasource.return_value = object() mocker.patch.object(DatasourceManager, "get_datasource_plugin_provider", return_value=provider_controller) @@ -114,7 +115,7 @@ def test_get_datasource_plugin_provider_creates_controller_and_caches(mocker, da assert ctrl_cls.call_count == 1 -def test_get_datasource_plugin_provider_raises_when_provider_entity_missing(mocker): +def test_get_datasource_plugin_provider_raises_when_provider_entity_missing(mocker: MockerFixture): _invalidate_recyclable_contextvars() mocker.patch( "core.datasource.datasource_manager.PluginDatasourceManager.fetch_datasource_provider", @@ -129,7 +130,7 @@ def test_get_datasource_plugin_provider_raises_when_provider_entity_missing(mock ) -def test_get_datasource_plugin_provider_raises_for_unsupported_type(mocker): +def test_get_datasource_plugin_provider_raises_for_unsupported_type(mocker: MockerFixture): _invalidate_recyclable_contextvars() provider_entity = types.SimpleNamespace(declaration=object(), plugin_id="plugin", plugin_unique_identifier="uniq") mocker.patch( @@ -145,7 +146,7 @@ def test_get_datasource_plugin_provider_raises_for_unsupported_type(mocker): ) -def test_get_datasource_plugin_provider_raises_when_controller_none(mocker): +def test_get_datasource_plugin_provider_raises_when_controller_none(mocker: MockerFixture): _invalidate_recyclable_contextvars() provider_entity = types.SimpleNamespace(declaration=object(), plugin_id="plugin", plugin_unique_identifier="uniq") mocker.patch( @@ -165,7 +166,7 @@ def test_get_datasource_plugin_provider_raises_when_controller_none(mocker): ) -def test_stream_online_results_yields_messages_online_document(mocker): +def test_stream_online_results_yields_messages_online_document(mocker: MockerFixture): # stub runtime to yield a text message def _doc_messages(**_): yield from _gen_messages_text_only("hello") @@ -195,7 +196,7 @@ def test_stream_online_results_yields_messages_online_document(mocker): assert msgs[0].message.text == "hello" -def test_stream_online_results_sets_credentials_and_returns_empty_dict_online_document(mocker): +def test_stream_online_results_sets_credentials_and_returns_empty_dict_online_document(mocker: MockerFixture): class _Runtime: def __init__(self) -> None: self.runtime = types.SimpleNamespace(credentials=None) @@ -229,7 +230,7 @@ def test_stream_online_results_sets_credentials_and_returns_empty_dict_online_do assert final_value == {} -def test_stream_online_results_raises_when_missing_params(mocker): +def test_stream_online_results_raises_when_missing_params(mocker: MockerFixture): class _Runtime: def __init__(self) -> None: self.runtime = types.SimpleNamespace(credentials=None) @@ -279,7 +280,7 @@ def test_stream_online_results_raises_when_missing_params(mocker): ) -def test_stream_online_results_yields_messages_and_returns_empty_dict_online_drive(mocker): +def test_stream_online_results_yields_messages_and_returns_empty_dict_online_drive(mocker: MockerFixture): class _Runtime: def __init__(self) -> None: self.runtime = types.SimpleNamespace(credentials=None) @@ -313,7 +314,7 @@ def test_stream_online_results_yields_messages_and_returns_empty_dict_online_dri assert final_value == {} -def test_stream_online_results_raises_for_unsupported_stream_type(mocker): +def test_stream_online_results_raises_for_unsupported_stream_type(mocker: MockerFixture): mocker.patch.object(DatasourceManager, "get_datasource_runtime", return_value=mocker.Mock()) mocker.patch( "core.datasource.datasource_manager.DatasourceProviderService.get_datasource_credentials", @@ -337,7 +338,7 @@ def test_stream_online_results_raises_for_unsupported_stream_type(mocker): ) -def test_stream_node_events_emits_events_online_document(mocker): +def test_stream_node_events_emits_events_online_document(mocker: MockerFixture): # make manager's low-level stream produce TEXT only mocker.patch.object( DatasourceManager, @@ -370,7 +371,7 @@ def test_stream_node_events_emits_events_online_document(mocker): assert events[-1].node_run_result.status == WorkflowNodeExecutionStatus.SUCCEEDED -def test_stream_node_events_builds_file_and_variables_from_messages(mocker): +def test_stream_node_events_builds_file_and_variables_from_messages(mocker: MockerFixture): mocker.patch.object(DatasourceManager, "stream_online_results", return_value=_gen_messages_text_only("ignored")) def _transformed(**_kwargs): @@ -478,7 +479,7 @@ def test_stream_node_events_builds_file_and_variables_from_messages(mocker): assert events[-1].node_run_result.outputs["x"] == 1 -def test_stream_node_events_raises_when_toolfile_missing(mocker): +def test_stream_node_events_raises_when_toolfile_missing(mocker: MockerFixture): mocker.patch.object(DatasourceManager, "stream_online_results", return_value=_gen_messages_text_only("ignored")) def _transformed(**_kwargs): @@ -526,7 +527,7 @@ def test_stream_node_events_raises_when_toolfile_missing(mocker): ) -def test_stream_node_events_online_drive_sets_variable_pool_file_and_outputs(mocker): +def test_stream_node_events_online_drive_sets_variable_pool_file_and_outputs(mocker: MockerFixture): mocker.patch.object(DatasourceManager, "stream_online_results", return_value=_gen_messages_text_only("ignored")) file_in = File( @@ -580,7 +581,7 @@ def test_stream_node_events_online_drive_sets_variable_pool_file_and_outputs(moc assert completed.node_run_result.outputs["datasource_type"] == DatasourceProviderType.ONLINE_DRIVE -def test_stream_node_events_skips_file_build_for_non_online_types(mocker): +def test_stream_node_events_skips_file_build_for_non_online_types(mocker: MockerFixture): mocker.patch.object(DatasourceManager, "stream_online_results", return_value=_gen_messages_text_only("ignored")) def _transformed(**_kwargs): @@ -620,7 +621,7 @@ def test_stream_node_events_skips_file_build_for_non_online_types(mocker): assert events[-1].node_run_result.outputs["file"] is None -def test_get_upload_file_by_id_builds_file(mocker): +def test_get_upload_file_by_id_builds_file(mocker: MockerFixture): # fake UploadFile row fake_row = types.SimpleNamespace( id="fid", @@ -654,7 +655,7 @@ def test_get_upload_file_by_id_builds_file(mocker): assert f.storage_key == "k" -def test_get_upload_file_by_id_raises_when_missing(mocker): +def test_get_upload_file_by_id_raises_when_missing(mocker: MockerFixture): class _S: def __enter__(self): return self diff --git a/api/tests/unit_tests/core/entities/test_entities_provider_configuration.py b/api/tests/unit_tests/core/entities/test_entities_provider_configuration.py index a28143026f..1b714d6830 100644 --- a/api/tests/unit_tests/core/entities/test_entities_provider_configuration.py +++ b/api/tests/unit_tests/core/entities/test_entities_provider_configuration.py @@ -354,7 +354,8 @@ def test_validate_provider_credentials_handles_hidden_secret_value() -> None: with _patched_session(mock_session): with patch( - "core.entities.provider_configuration.create_plugin_model_provider_factory", return_value=mock_factory + "core.entities.provider_configuration.create_plugin_model_assembly", + return_value=SimpleNamespace(model_runtime=Mock(), model_provider_factory=mock_factory), ): with patch("core.entities.provider_configuration.encrypter.decrypt_token", return_value="restored-key"): with patch( @@ -379,7 +380,10 @@ def test_validate_provider_credentials_without_credential_id() -> None: mock_factory = Mock() mock_factory.provider_credentials_validate.return_value = {"region": "us"} - with patch("core.entities.provider_configuration.create_plugin_model_provider_factory", return_value=mock_factory): + with patch( + "core.entities.provider_configuration.create_plugin_model_assembly", + return_value=SimpleNamespace(model_runtime=Mock(), model_provider_factory=mock_factory), + ): validated = configuration.validate_provider_credentials(credentials={"region": "us"}) assert validated == {"region": "us"} @@ -426,23 +430,37 @@ def test_switch_preferred_provider_type_creates_record_when_missing() -> None: def test_get_model_type_instance_and_schema_delegate_to_factory() -> None: configuration = _build_provider_configuration() - mock_factory = Mock() mock_model_type_instance = Mock() mock_schema = _build_ai_model("gpt-4o") - mock_factory.get_model_type_instance.return_value = mock_model_type_instance + mock_factory = Mock() + mock_factory.get_provider_schema.return_value = configuration.provider mock_factory.get_model_schema.return_value = mock_schema + mock_assembly = Mock() + mock_assembly.model_runtime = Mock() + mock_assembly.model_provider_factory = mock_factory - with patch( - "core.entities.provider_configuration.create_plugin_model_provider_factory", - return_value=mock_factory, - ) as mock_factory_builder: + with ( + patch( + "core.entities.provider_configuration.create_plugin_model_assembly", + return_value=mock_assembly, + ) as mock_assembly_builder, + patch( + "core.entities.provider_configuration.create_model_type_instance", + return_value=mock_model_type_instance, + ) as mock_model_builder, + ): model_type_instance = configuration.get_model_type_instance(ModelType.LLM) model_schema = configuration.get_model_schema(ModelType.LLM, "gpt-4o", {"api_key": "x"}) assert model_type_instance is mock_model_type_instance assert model_schema is mock_schema - assert mock_factory_builder.call_count == 2 - mock_factory.get_model_type_instance.assert_called_once_with(provider="openai", model_type=ModelType.LLM) + assert mock_assembly_builder.call_count == 2 + mock_factory.get_provider_schema.assert_called_once_with(provider="openai") + mock_model_builder.assert_called_once_with( + runtime=mock_assembly.model_runtime, + provider_schema=configuration.provider, + model_type=ModelType.LLM, + ) mock_factory.get_model_schema.assert_called_once_with( provider="openai", model_type=ModelType.LLM, @@ -456,17 +474,21 @@ def test_get_model_type_instance_and_schema_reuse_bound_runtime_factory() -> Non bound_runtime = Mock() configuration.bind_model_runtime(bound_runtime) - mock_factory = Mock() mock_model_type_instance = Mock() mock_schema = _build_ai_model("gpt-4o") - mock_factory.get_model_type_instance.return_value = mock_model_type_instance + mock_factory = Mock() + mock_factory.get_provider_schema.return_value = configuration.provider mock_factory.get_model_schema.return_value = mock_schema with ( patch( "core.entities.provider_configuration.ModelProviderFactory", return_value=mock_factory ) as mock_factory_cls, - patch("core.entities.provider_configuration.create_plugin_model_provider_factory") as mock_factory_builder, + patch("core.entities.provider_configuration.create_plugin_model_assembly") as mock_assembly_builder, + patch( + "core.entities.provider_configuration.create_model_type_instance", + return_value=mock_model_type_instance, + ) as mock_model_builder, ): model_type_instance = configuration.get_model_type_instance(ModelType.LLM) model_schema = configuration.get_model_schema(ModelType.LLM, "gpt-4o", {"api_key": "x"}) @@ -474,8 +496,14 @@ def test_get_model_type_instance_and_schema_reuse_bound_runtime_factory() -> Non assert model_type_instance is mock_model_type_instance assert model_schema is mock_schema assert mock_factory_cls.call_count == 2 - mock_factory_cls.assert_called_with(model_runtime=bound_runtime) - mock_factory_builder.assert_not_called() + mock_factory_cls.assert_called_with(runtime=bound_runtime) + mock_assembly_builder.assert_not_called() + mock_factory.get_provider_schema.assert_called_once_with(provider="openai") + mock_model_builder.assert_called_once_with( + runtime=bound_runtime, + provider_schema=configuration.provider, + model_type=ModelType.LLM, + ) def test_get_provider_model_returns_none_when_model_not_found() -> None: @@ -504,7 +532,10 @@ def test_get_provider_models_system_deduplicates_sorts_and_filters_active() -> N mock_factory = Mock() mock_factory.get_provider_schema.return_value = provider_schema - with patch("core.entities.provider_configuration.create_plugin_model_provider_factory", return_value=mock_factory): + with patch( + "core.entities.provider_configuration.create_plugin_model_assembly", + return_value=SimpleNamespace(model_runtime=Mock(), model_provider_factory=mock_factory), + ): all_models = configuration.get_provider_models(model_type=ModelType.LLM, only_active=False) active_models = configuration.get_provider_models(model_type=ModelType.LLM, only_active=True) @@ -722,7 +753,8 @@ def test_validate_provider_credentials_handles_invalid_original_json() -> None: with _patched_session(mock_session): with patch( - "core.entities.provider_configuration.create_plugin_model_provider_factory", return_value=mock_factory + "core.entities.provider_configuration.create_plugin_model_assembly", + return_value=SimpleNamespace(model_runtime=Mock(), model_provider_factory=mock_factory), ): with patch("core.entities.provider_configuration.encrypter.encrypt_token", return_value="enc-key"): validated = configuration.validate_provider_credentials( @@ -1069,7 +1101,8 @@ def test_validate_custom_model_credentials_supports_hidden_reuse_and_sessionless with _patched_session(mock_session): with patch( - "core.entities.provider_configuration.create_plugin_model_provider_factory", return_value=mock_factory + "core.entities.provider_configuration.create_plugin_model_assembly", + return_value=SimpleNamespace(model_runtime=Mock(), model_provider_factory=mock_factory), ): with patch("core.entities.provider_configuration.encrypter.decrypt_token", return_value="raw"): with patch("core.entities.provider_configuration.encrypter.encrypt_token", return_value="enc-new"): @@ -1083,7 +1116,10 @@ def test_validate_custom_model_credentials_supports_hidden_reuse_and_sessionless mock_factory2 = Mock() mock_factory2.model_credentials_validate.return_value = {"region": "us"} - with patch("core.entities.provider_configuration.create_plugin_model_provider_factory", return_value=mock_factory2): + with patch( + "core.entities.provider_configuration.create_plugin_model_assembly", + return_value=SimpleNamespace(model_runtime=Mock(), model_provider_factory=mock_factory2), + ): validated = configuration.validate_custom_model_credentials( model_type=ModelType.LLM, model="gpt-4o", @@ -1575,7 +1611,8 @@ def test_validate_provider_credentials_uses_empty_original_when_record_missing() with _patched_session(mock_session): with patch( - "core.entities.provider_configuration.create_plugin_model_provider_factory", return_value=mock_factory + "core.entities.provider_configuration.create_plugin_model_assembly", + return_value=SimpleNamespace(model_runtime=Mock(), model_provider_factory=mock_factory), ): with patch("core.entities.provider_configuration.encrypter.encrypt_token", return_value="enc-new"): validated = configuration.validate_provider_credentials( @@ -1701,7 +1738,8 @@ def test_validate_custom_model_credentials_handles_invalid_original_json() -> No with _patched_session(mock_session): with patch( - "core.entities.provider_configuration.create_plugin_model_provider_factory", return_value=mock_factory + "core.entities.provider_configuration.create_plugin_model_assembly", + return_value=SimpleNamespace(model_runtime=Mock(), model_provider_factory=mock_factory), ): with patch("core.entities.provider_configuration.encrypter.encrypt_token", return_value="enc-new"): validated = configuration.validate_custom_model_credentials( diff --git a/api/tests/unit_tests/core/extension/test_api_based_extension_requestor.py b/api/tests/unit_tests/core/extension/test_api_based_extension_requestor.py index 399b531205..9c1cbe82a0 100644 --- a/api/tests/unit_tests/core/extension/test_api_based_extension_requestor.py +++ b/api/tests/unit_tests/core/extension/test_api_based_extension_requestor.py @@ -1,11 +1,12 @@ import httpx import pytest +from pytest_mock import MockerFixture from core.extension.api_based_extension_requestor import APIBasedExtensionRequestor from models.api_based_extension import APIBasedExtensionPoint -def test_request_success(mocker): +def test_request_success(mocker: MockerFixture): # Mock httpx.Client and its context manager mock_client = mocker.MagicMock() mock_client_instance = mock_client.__enter__.return_value @@ -28,7 +29,7 @@ def test_request_success(mocker): ) -def test_request_with_ssrf_proxy(mocker): +def test_request_with_ssrf_proxy(mocker: MockerFixture): # Mock dify_config mocker.patch("configs.dify_config.SSRF_PROXY_HTTP_URL", "http://proxy:8080") mocker.patch("configs.dify_config.SSRF_PROXY_HTTPS_URL", "https://proxy:8081") @@ -59,7 +60,7 @@ def test_request_with_ssrf_proxy(mocker): assert mock_transport.call_count == 2 -def test_request_with_only_one_proxy_config(mocker): +def test_request_with_only_one_proxy_config(mocker: MockerFixture): # Mock dify_config with only one proxy mocker.patch("configs.dify_config.SSRF_PROXY_HTTP_URL", "http://proxy:8080") mocker.patch("configs.dify_config.SSRF_PROXY_HTTPS_URL", None) @@ -84,7 +85,7 @@ def test_request_with_only_one_proxy_config(mocker): assert kwargs.get("mounts") is None -def test_request_timeout(mocker): +def test_request_timeout(mocker: MockerFixture): mock_client = mocker.MagicMock() mock_client_instance = mock_client.__enter__.return_value mocker.patch("httpx.Client", return_value=mock_client) @@ -95,7 +96,7 @@ def test_request_timeout(mocker): requestor.request(APIBasedExtensionPoint.PING, {}) -def test_request_connection_error(mocker): +def test_request_connection_error(mocker: MockerFixture): mock_client = mocker.MagicMock() mock_client_instance = mock_client.__enter__.return_value mocker.patch("httpx.Client", return_value=mock_client) @@ -106,7 +107,7 @@ def test_request_connection_error(mocker): requestor.request(APIBasedExtensionPoint.PING, {}) -def test_request_error_status_code(mocker): +def test_request_error_status_code(mocker: MockerFixture): mock_client = mocker.MagicMock() mock_client_instance = mock_client.__enter__.return_value mocker.patch("httpx.Client", return_value=mock_client) @@ -121,7 +122,7 @@ def test_request_error_status_code(mocker): requestor.request(APIBasedExtensionPoint.PING, {}) -def test_request_error_status_code_long_content(mocker): +def test_request_error_status_code_long_content(mocker: MockerFixture): mock_client = mocker.MagicMock() mock_client_instance = mock_client.__enter__.return_value mocker.patch("httpx.Client", return_value=mock_client) diff --git a/api/tests/unit_tests/core/external_data_tool/test_external_data_fetch.py b/api/tests/unit_tests/core/external_data_tool/test_external_data_fetch.py index 86b461cf04..c1c1291281 100644 --- a/api/tests/unit_tests/core/external_data_tool/test_external_data_fetch.py +++ b/api/tests/unit_tests/core/external_data_tool/test_external_data_fetch.py @@ -13,7 +13,7 @@ class TestExternalDataFetch: app = Flask(__name__) return app - def test_fetch_success(self, app): + def test_fetch_success(self, app: Flask): with app.app_context(): fetcher = ExternalDataFetch() @@ -79,7 +79,7 @@ class TestExternalDataFetch: assert result_inputs == inputs assert result_inputs is not inputs # Should be a copy - def test_fetch_with_none_variable(self, app): + def test_fetch_with_none_variable(self, app: Flask): with app.app_context(): fetcher = ExternalDataFetch() tool = ExternalDataVariableEntity(variable="var1", type="type1", config={}) @@ -95,7 +95,7 @@ class TestExternalDataFetch: assert "var1" not in result_inputs assert result_inputs == {"in": "val"} - def test_query_external_data_tool(self, app): + def test_query_external_data_tool(self, app: Flask): fetcher = ExternalDataFetch() tool = ExternalDataVariableEntity(variable="var1", type="type1", config={"k": "v"}) diff --git a/api/tests/unit_tests/core/helper/test_creators.py b/api/tests/unit_tests/core/helper/test_creators.py index df67d3f513..8750f6d907 100644 --- a/api/tests/unit_tests/core/helper/test_creators.py +++ b/api/tests/unit_tests/core/helper/test_creators.py @@ -8,7 +8,7 @@ from yarl import URL @pytest.fixture(autouse=True) -def _patch_creators_url(monkeypatch): +def _patch_creators_url(monkeypatch: pytest.MonkeyPatch): """Patch the module-level creators_platform_api_url for all tests.""" monkeypatch.setattr( "core.helper.creators.creators_platform_api_url", diff --git a/api/tests/unit_tests/core/helper/test_moderation.py b/api/tests/unit_tests/core/helper/test_moderation.py index a0dfa86d20..c33002329b 100644 --- a/api/tests/unit_tests/core/helper/test_moderation.py +++ b/api/tests/unit_tests/core/helper/test_moderation.py @@ -68,8 +68,8 @@ def test_check_moderation_returns_true_when_model_accepts_text(mocker: MockerFix mocker.patch("core.helper.moderation.secrets.choice", return_value="chunk") moderation_model = SimpleNamespace(invoke=lambda **invoke_kwargs: invoke_kwargs["text"] == "chunk") - factory = SimpleNamespace(get_model_type_instance=lambda **_factory_kwargs: moderation_model) - mocker.patch("core.helper.moderation.create_plugin_model_provider_factory", return_value=factory) + assembly = SimpleNamespace(create_model_type_instance=lambda **_factory_kwargs: moderation_model) + mocker.patch("core.helper.moderation.create_plugin_model_assembly", return_value=assembly) assert ( check_moderation( @@ -91,7 +91,7 @@ def test_check_moderation_returns_true_when_text_is_empty(mocker: MockerFixture) provider_map={openai_provider: hosting_openai}, ), ) - factory_mock = mocker.patch("core.helper.moderation.create_plugin_model_provider_factory") + factory_mock = mocker.patch("core.helper.moderation.create_plugin_model_assembly") choice_mock = mocker.patch("core.helper.moderation.secrets.choice") assert ( @@ -119,8 +119,8 @@ def test_check_moderation_returns_false_when_model_rejects_text(mocker: MockerFi mocker.patch("core.helper.moderation.secrets.choice", return_value="chunk") moderation_model = SimpleNamespace(invoke=lambda **_invoke_kwargs: False) - factory = SimpleNamespace(get_model_type_instance=lambda **_factory_kwargs: moderation_model) - mocker.patch("core.helper.moderation.create_plugin_model_provider_factory", return_value=factory) + assembly = SimpleNamespace(create_model_type_instance=lambda **_factory_kwargs: moderation_model) + mocker.patch("core.helper.moderation.create_plugin_model_assembly", return_value=assembly) assert ( check_moderation( @@ -147,8 +147,8 @@ def test_check_moderation_raises_bad_request_when_provider_call_fails(mocker: Mo failing_model = SimpleNamespace( invoke=lambda **_invoke_kwargs: (_ for _ in ()).throw(RuntimeError("boom")), ) - factory = SimpleNamespace(get_model_type_instance=lambda **_factory_kwargs: failing_model) - mocker.patch("core.helper.moderation.create_plugin_model_provider_factory", return_value=factory) + assembly = SimpleNamespace(create_model_type_instance=lambda **_factory_kwargs: failing_model) + mocker.patch("core.helper.moderation.create_plugin_model_assembly", return_value=assembly) with pytest.raises(InvokeBadRequestError, match="Rate limit exceeded, please try again later."): check_moderation( diff --git a/api/tests/unit_tests/core/helper/test_trace_id_helper.py b/api/tests/unit_tests/core/helper/test_trace_id_helper.py index 27bfe1af05..96e2d44730 100644 --- a/api/tests/unit_tests/core/helper/test_trace_id_helper.py +++ b/api/tests/unit_tests/core/helper/test_trace_id_helper.py @@ -1,6 +1,12 @@ import pytest -from core.helper.trace_id_helper import extract_external_trace_id_from_args, get_external_trace_id, is_valid_trace_id +from core.helper.trace_id_helper import ( + ParentTraceContext, + extract_external_trace_id_from_args, + extract_parent_trace_context_from_args, + get_external_trace_id, + is_valid_trace_id, +) class DummyRequest: @@ -84,3 +90,92 @@ class TestTraceIdHelper: def test_extract_external_trace_id_from_args(self, args, expected): """Test extraction of external_trace_id from args mapping""" assert extract_external_trace_id_from_args(args) == expected + + @pytest.mark.parametrize( + ("args", "expected"), + [ + ( + { + "parent_trace_context": { + "parent_workflow_run_id": "workflow-run-1", + "parent_node_execution_id": "node-execution-1", + } + }, + { + "parent_trace_context": ParentTraceContext( + parent_workflow_run_id="workflow-run-1", + parent_node_execution_id="node-execution-1", + ) + }, + ), + ( + { + "parent_trace_context": { + "parent_workflow_run_id": "workflow-run-1", + } + }, + {}, + ), + ( + { + "parent_trace_context": { + "parent_node_execution_id": "node-execution-1", + } + }, + {}, + ), + ( + { + "parent_trace_context": { + "parent_workflow_run_id": 123, + "parent_node_execution_id": "node-execution-1", + } + }, + {}, + ), + ( + { + "parent_trace_context": { + "parent_workflow_run_id": "workflow-run-1", + "parent_node_execution_id": None, + } + }, + {}, + ), + ({}, {}), + ], + ) + def test_extract_parent_trace_context_from_args(self, args, expected): + """Test extraction of parent_trace_context from args mapping""" + assert extract_parent_trace_context_from_args(args) == expected + + def test_extract_parent_trace_context_returns_typed_context(self): + """Parent trace context is parsed into a Pydantic value object.""" + result = extract_parent_trace_context_from_args( + { + "parent_trace_context": { + "parent_workflow_run_id": "workflow-run-1", + "parent_node_execution_id": "node-execution-1", + } + } + ) + + assert result == { + "parent_trace_context": ParentTraceContext( + parent_workflow_run_id="workflow-run-1", + parent_node_execution_id="node-execution-1", + ) + } + + def test_extract_parent_trace_context_rejects_incomplete_typed_context(self): + """Typed parent trace context follows the same completeness rule as raw mappings.""" + result = extract_parent_trace_context_from_args( + { + "parent_trace_context": ParentTraceContext( + parent_workflow_run_id="workflow-run-1", + parent_node_execution_id=None, + ) + } + ) + + assert result == {} diff --git a/api/tests/unit_tests/core/memory/test_token_buffer_memory.py b/api/tests/unit_tests/core/memory/test_token_buffer_memory.py index f459250b8e..72c24bda96 100644 --- a/api/tests/unit_tests/core/memory/test_token_buffer_memory.py +++ b/api/tests/unit_tests/core/memory/test_token_buffer_memory.py @@ -198,6 +198,48 @@ class TestBuildPromptMessageWithFiles: assert isinstance(result.content[-1], TextPromptMessageContent) assert result.content[-1].data == "user text" + def test_replay_does_not_pass_config_to_file_factory(self): + """Replay contract: history files were validated on upload, so this + path must not forward a FileUploadConfig. The factory's signature + no longer accepts ``config``; this test guards against a future + regression that re-introduces it.""" + conv = _make_conversation(AppMode.CHAT) + mem = TokenBufferMemory(conversation=conv, model_instance=_make_model_instance()) + + mock_file_extra_config = MagicMock() + mock_file_extra_config.image_config = None + + real_image_content = ImagePromptMessageContent( + url="http://example.com/img.png", format="png", mime_type="image/png" + ) + mock_app_record = MagicMock() + mock_app_record.tenant_id = "tenant-1" + + with ( + patch( + "core.memory.token_buffer_memory.FileUploadConfigManager.convert", + return_value=mock_file_extra_config, + ), + patch( + "core.memory.token_buffer_memory.file_factory.build_from_message_file", + return_value=MagicMock(), + ) as mock_build, + patch( + "core.memory.token_buffer_memory.file_manager.to_prompt_message_content", + return_value=real_image_content, + ), + ): + mem._build_prompt_message_with_files( + message_files=[MagicMock()], + text_content="user text", + message=_make_message(), + app_record=mock_app_record, + is_user_message=True, + ) + + mock_build.assert_called_once() + assert "config" not in mock_build.call_args.kwargs + @pytest.mark.parametrize("mode", [AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.COMPLETION]) def test_chat_mode_with_files_assistant_message(self, mode): """When files are present, returns AssistantPromptMessage with list content.""" diff --git a/api/tests/unit_tests/core/model_runtime/test_model_provider_factory.py b/api/tests/unit_tests/core/model_runtime/test_model_provider_factory.py index c4fd970562..2b51dc8182 100644 --- a/api/tests/unit_tests/core/model_runtime/test_model_provider_factory.py +++ b/api/tests/unit_tests/core/model_runtime/test_model_provider_factory.py @@ -2,6 +2,7 @@ from unittest.mock import Mock import pytest +from core.plugin.impl.model_runtime_factory import create_model_type_instance from graphon.model_runtime.entities.common_entities import I18nObject from graphon.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType from graphon.model_runtime.entities.provider_entities import ( @@ -73,7 +74,7 @@ def test_model_provider_factory_resolves_runtime_provider_name() -> None: supported_model_types=[ModelType.LLM], configurate_methods=[ConfigurateMethod.PREDEFINED_MODEL], ) - factory = ModelProviderFactory(model_runtime=_FakeModelRuntime([provider])) + factory = ModelProviderFactory(runtime=_FakeModelRuntime([provider])) provider_schema = factory.get_model_provider("openai") @@ -98,7 +99,7 @@ def test_model_provider_factory_resolves_canonical_short_name_independent_of_pro configurate_methods=[ConfigurateMethod.PREDEFINED_MODEL], ), ] - factory = ModelProviderFactory(model_runtime=_FakeModelRuntime(providers)) + factory = ModelProviderFactory(runtime=_FakeModelRuntime(providers)) provider_schema = factory.get_model_provider("openai") @@ -107,8 +108,8 @@ def test_model_provider_factory_resolves_canonical_short_name_independent_of_pro def test_model_provider_factory_requires_runtime() -> None: - with pytest.raises(ValueError, match="model_runtime is required"): - ModelProviderFactory(model_runtime=None) # type: ignore[arg-type] + with pytest.raises(ValueError, match="runtime is required"): + ModelProviderFactory(runtime=None) # type: ignore[arg-type] def test_model_provider_factory_get_providers_returns_runtime_providers() -> None: @@ -119,7 +120,7 @@ def test_model_provider_factory_get_providers_returns_runtime_providers() -> Non supported_model_types=[ModelType.LLM], ) ] - factory = ModelProviderFactory(model_runtime=_FakeModelRuntime(providers)) + factory = ModelProviderFactory(runtime=_FakeModelRuntime(providers)) result = factory.get_providers() @@ -133,7 +134,7 @@ def test_model_provider_factory_get_provider_schema_delegates_to_provider_lookup provider_name="openai", supported_model_types=[ModelType.LLM], ) - factory = ModelProviderFactory(model_runtime=_FakeModelRuntime([provider])) + factory = ModelProviderFactory(runtime=_FakeModelRuntime([provider])) result = factory.get_provider_schema("openai") @@ -142,7 +143,7 @@ def test_model_provider_factory_get_provider_schema_delegates_to_provider_lookup def test_model_provider_factory_raises_for_unknown_provider() -> None: factory = ModelProviderFactory( - model_runtime=_FakeModelRuntime( + runtime=_FakeModelRuntime( [ _build_provider( provider="langgenius/openai/openai", @@ -172,7 +173,7 @@ def test_model_provider_factory_get_models_filters_provider_and_model_type() -> models=[_build_model("rerank-v3", ModelType.RERANK)], ), ] - factory = ModelProviderFactory(model_runtime=_FakeModelRuntime(providers)) + factory = ModelProviderFactory(runtime=_FakeModelRuntime(providers)) results = factory.get_models(provider="openai", model_type=ModelType.LLM) @@ -196,7 +197,7 @@ def test_model_provider_factory_get_models_skips_providers_without_requested_mod models=[_build_model("eleven_multilingual_v2", ModelType.TTS)], ), ] - factory = ModelProviderFactory(model_runtime=_FakeModelRuntime(providers)) + factory = ModelProviderFactory(runtime=_FakeModelRuntime(providers)) results = factory.get_models(model_type=ModelType.TTS) @@ -214,7 +215,7 @@ def test_model_provider_factory_get_models_without_model_type_keeps_all_provider models=[_build_model("gpt-4o-mini", ModelType.LLM), _build_model("tts-1", ModelType.TTS)], ) ] - factory = ModelProviderFactory(model_runtime=_FakeModelRuntime(providers)) + factory = ModelProviderFactory(runtime=_FakeModelRuntime(providers)) results = factory.get_models(provider="openai") @@ -242,7 +243,7 @@ def test_model_provider_factory_validates_provider_credentials() -> None: ) ] ) - factory = ModelProviderFactory(model_runtime=runtime) + factory = ModelProviderFactory(runtime=runtime) filtered = factory.provider_credentials_validate( provider="openai", @@ -258,7 +259,7 @@ def test_model_provider_factory_validates_provider_credentials() -> None: def test_model_provider_factory_provider_credentials_validate_requires_schema() -> None: factory = ModelProviderFactory( - model_runtime=_FakeModelRuntime( + runtime=_FakeModelRuntime( [ _build_provider( provider="langgenius/openai/openai", @@ -294,7 +295,7 @@ def test_model_provider_factory_validates_model_credentials() -> None: ) ] ) - factory = ModelProviderFactory(model_runtime=runtime) + factory = ModelProviderFactory(runtime=runtime) filtered = factory.model_credentials_validate( provider="openai", @@ -314,7 +315,7 @@ def test_model_provider_factory_validates_model_credentials() -> None: def test_model_provider_factory_model_credentials_validate_requires_schema() -> None: factory = ModelProviderFactory( - model_runtime=_FakeModelRuntime( + runtime=_FakeModelRuntime( [ _build_provider( provider="langgenius/openai/openai", @@ -346,7 +347,7 @@ def test_model_provider_factory_get_model_schema_and_icon_use_canonical_provider ) runtime.get_model_schema.return_value = "schema" runtime.get_provider_icon.return_value = (b"icon", "image/png") - factory = ModelProviderFactory(model_runtime=runtime) + factory = ModelProviderFactory(runtime=runtime) assert ( factory.get_model_schema( @@ -382,39 +383,43 @@ def test_model_provider_factory_get_model_schema_and_icon_use_canonical_provider (ModelType.TTS, TTSModel), ], ) -def test_model_provider_factory_builds_model_type_instances( +def test_create_model_type_instance_builds_model_wrappers( model_type: ModelType, expected_type: type[object], ) -> None: - factory = ModelProviderFactory( - model_runtime=_FakeModelRuntime( - [ - _build_provider( - provider="langgenius/openai/openai", - provider_name="openai", - supported_model_types=[model_type], - ) - ] - ) + runtime = _FakeModelRuntime( + [ + _build_provider( + provider="langgenius/openai/openai", + provider_name="openai", + supported_model_types=[model_type], + ) + ] ) - instance = factory.get_model_type_instance("openai", model_type) + instance = create_model_type_instance( + runtime=runtime, + provider_schema=runtime.fetch_model_providers()[0], + model_type=model_type, + ) assert isinstance(instance, expected_type) -def test_model_provider_factory_rejects_unsupported_model_type() -> None: - factory = ModelProviderFactory( - model_runtime=_FakeModelRuntime( - [ - _build_provider( - provider="langgenius/openai/openai", - provider_name="openai", - supported_model_types=[ModelType.LLM], - ) - ] - ) +def test_create_model_type_instance_rejects_unsupported_model_type() -> None: + runtime = _FakeModelRuntime( + [ + _build_provider( + provider="langgenius/openai/openai", + provider_name="openai", + supported_model_types=[ModelType.LLM], + ) + ] ) with pytest.raises(ValueError, match="Unsupported model type: unsupported"): - factory.get_model_type_instance("openai", "unsupported") # type: ignore[arg-type] + create_model_type_instance( + runtime=runtime, + provider_schema=runtime.fetch_model_providers()[0], + model_type="unsupported", # type: ignore[arg-type] + ) diff --git a/api/tests/unit_tests/core/ops/test_base_trace_instance.py b/api/tests/unit_tests/core/ops/test_base_trace_instance.py index ac65d13454..15a2af17ca 100644 --- a/api/tests/unit_tests/core/ops/test_base_trace_instance.py +++ b/api/tests/unit_tests/core/ops/test_base_trace_instance.py @@ -18,7 +18,7 @@ class ConcreteTraceInstance(BaseTraceInstance): @pytest.fixture -def mock_db_session(monkeypatch): +def mock_db_session(monkeypatch: pytest.MonkeyPatch): mock_session = MagicMock(spec=Session) mock_session.__enter__.return_value = mock_session mock_session.__exit__.return_value = None diff --git a/api/tests/unit_tests/core/ops/test_ops_trace_manager.py b/api/tests/unit_tests/core/ops/test_ops_trace_manager.py index e47df0121e..33a3293682 100644 --- a/api/tests/unit_tests/core/ops/test_ops_trace_manager.py +++ b/api/tests/unit_tests/core/ops/test_ops_trace_manager.py @@ -203,7 +203,7 @@ class DummySessionContext: @pytest.fixture(autouse=True) -def patch_provider_map(monkeypatch): +def patch_provider_map(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.ops.ops_trace_manager.provider_config_map", FakeProviderMap({"dummy": FAKE_PROVIDER_ENTRY}) ) @@ -212,7 +212,7 @@ def patch_provider_map(monkeypatch): @pytest.fixture(autouse=True) -def patch_timer_and_current_app(monkeypatch): +def patch_timer_and_current_app(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr("core.ops.ops_trace_manager.threading.Timer", DummyTimer) monkeypatch.setattr("core.ops.ops_trace_manager.trace_manager_queue", queue.Queue()) monkeypatch.setattr("core.ops.ops_trace_manager.trace_manager_timer", None) @@ -227,12 +227,12 @@ def patch_timer_and_current_app(monkeypatch): @pytest.fixture(autouse=True) -def patch_sqlalchemy_session(monkeypatch): +def patch_sqlalchemy_session(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr("core.ops.ops_trace_manager.Session", DummySessionContext) @pytest.fixture -def encryption_mocks(monkeypatch): +def encryption_mocks(monkeypatch: pytest.MonkeyPatch): encrypt_mock = MagicMock(side_effect=lambda tenant, value: f"enc-{value}") batch_decrypt_mock = MagicMock(side_effect=lambda tenant, values: [f"dec-{value}" for value in values]) obfuscate_mock = MagicMock(side_effect=lambda value: f"ob-{value}") @@ -243,7 +243,7 @@ def encryption_mocks(monkeypatch): @pytest.fixture -def mock_db(monkeypatch): +def mock_db(monkeypatch: pytest.MonkeyPatch): session = MagicMock() session.scalars.return_value.all.return_value = ["chat"] db_mock = MagicMock() @@ -254,7 +254,7 @@ def mock_db(monkeypatch): @pytest.fixture -def workflow_repo_fixture(monkeypatch): +def workflow_repo_fixture(monkeypatch: pytest.MonkeyPatch): repo = MagicMock() repo.get_workflow_run_by_id_without_tenant.return_value = make_workflow_run() monkeypatch.setattr(TraceTask, "_get_workflow_run_repo", classmethod(lambda cls: repo)) @@ -340,13 +340,13 @@ def test_get_ops_trace_instance_handles_none_app(mock_db): assert OpsTraceManager.get_ops_trace_instance("app-id") is None -def test_get_ops_trace_instance_returns_none_when_disabled(mock_db, monkeypatch): +def test_get_ops_trace_instance_returns_none_when_disabled(mock_db, monkeypatch: pytest.MonkeyPatch): app = SimpleNamespace(id="app-id", tracing=json.dumps({"enabled": False})) mock_db.get.return_value = app assert OpsTraceManager.get_ops_trace_instance("app-id") is None -def test_get_ops_trace_instance_invalid_provider(mock_db, monkeypatch): +def test_get_ops_trace_instance_invalid_provider(mock_db, monkeypatch: pytest.MonkeyPatch): app = SimpleNamespace(id="app-id", tracing=json.dumps({"enabled": True, "tracing_provider": "missing"})) mock_db.get.return_value = app monkeypatch.setattr("core.ops.ops_trace_manager.provider_config_map", FakeProviderMap({})) @@ -388,7 +388,7 @@ def test_get_app_config_through_message_id_app_model_config(mock_db): assert result.id == "cfg" -def test_update_app_tracing_config_invalid_provider(mock_db, monkeypatch): +def test_update_app_tracing_config_invalid_provider(mock_db, monkeypatch: pytest.MonkeyPatch): mock_db.get.return_value = None with pytest.raises(ValueError, match="Invalid tracing provider"): OpsTraceManager.update_app_tracing_config("app", True, "bad") @@ -407,21 +407,21 @@ def test_update_app_tracing_config_success(mock_db): def test_get_app_tracing_config_errors_when_missing(mock_db): mock_db.get.return_value = None with pytest.raises(ValueError, match="App not found"): - OpsTraceManager.get_app_tracing_config("app") + OpsTraceManager.get_app_tracing_config("app", mock_db) def test_get_app_tracing_config_returns_defaults(mock_db): mock_db.get.return_value = SimpleNamespace(tracing=None) - assert OpsTraceManager.get_app_tracing_config("app-id") == {"enabled": False, "tracing_provider": None} + assert OpsTraceManager.get_app_tracing_config("app-id", mock_db) == {"enabled": False, "tracing_provider": None} def test_get_app_tracing_config_returns_payload(mock_db): payload = {"enabled": True, "tracing_provider": "dummy"} mock_db.get.return_value = SimpleNamespace(tracing=json.dumps(payload)) - assert OpsTraceManager.get_app_tracing_config("app-id") == payload + assert OpsTraceManager.get_app_tracing_config("app-id", mock_db) == payload -def test_check_and_project_helpers(monkeypatch): +def test_check_and_project_helpers(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.ops.ops_trace_manager.provider_config_map", FakeProviderMap( @@ -449,7 +449,7 @@ def test_check_and_project_helpers(monkeypatch): assert OpsTraceManager.get_trace_config_project_url({}, "dummy") == "url" -def test_trace_task_conversation_and_extract(monkeypatch): +def test_trace_task_conversation_and_extract(monkeypatch: pytest.MonkeyPatch): task = TraceTask(trace_type=TraceTaskName.CONVERSATION_TRACE, message_id="msg") assert task.conversation_trace(foo="bar") == {"foo": "bar"} assert task._extract_streaming_metrics(make_message_data(message_metadata="not json")) == {} @@ -525,7 +525,7 @@ def test_extract_streaming_metrics_invalid_json(): assert task._extract_streaming_metrics(fake_message) == {} -def test_trace_queue_manager_add_and_collect(monkeypatch): +def test_trace_queue_manager_add_and_collect(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.ops.ops_trace_manager.OpsTraceManager.get_ops_trace_instance", classmethod(lambda cls, aid: True) ) @@ -536,7 +536,7 @@ def test_trace_queue_manager_add_and_collect(monkeypatch): assert tasks == [task] -def test_trace_queue_manager_run_invokes_send(monkeypatch): +def test_trace_queue_manager_run_invokes_send(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.ops.ops_trace_manager.OpsTraceManager.get_ops_trace_instance", classmethod(lambda cls, aid: True) ) @@ -556,7 +556,7 @@ def test_trace_queue_manager_run_invokes_send(monkeypatch): assert called["tasks"] == [task] -def test_trace_queue_manager_send_to_celery(monkeypatch): +def test_trace_queue_manager_send_to_celery(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.ops.ops_trace_manager.OpsTraceManager.get_ops_trace_instance", classmethod(lambda cls, aid: True) ) diff --git a/api/tests/unit_tests/core/ops/test_trace_queue_manager.py b/api/tests/unit_tests/core/ops/test_trace_queue_manager.py index a4903054e0..13cf01651e 100644 --- a/api/tests/unit_tests/core/ops/test_trace_queue_manager.py +++ b/api/tests/unit_tests/core/ops/test_trace_queue_manager.py @@ -19,7 +19,7 @@ import pytest @pytest.fixture -def trace_queue_manager_and_task(monkeypatch): +def trace_queue_manager_and_task(monkeypatch: pytest.MonkeyPatch): """Fixture to provide TraceQueueManager and TraceTask with delayed imports.""" module_name = "core.ops.ops_trace_manager" if module_name not in sys.modules: diff --git a/api/tests/unit_tests/core/plugin/impl/test_agent_client.py b/api/tests/unit_tests/core/plugin/impl/test_agent_client.py index 1537ffacf5..d8843f0eeb 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_agent_client.py +++ b/api/tests/unit_tests/core/plugin/impl/test_agent_client.py @@ -1,5 +1,7 @@ from types import SimpleNamespace +from pytest_mock import MockerFixture + from core.plugin.entities.request import PluginInvokeContext from core.plugin.impl.agent import PluginAgentClient @@ -15,7 +17,7 @@ def _agent_provider(name: str = "agent") -> SimpleNamespace: class TestPluginAgentClient: - def test_fetch_agent_strategy_providers(self, mocker): + def test_fetch_agent_strategy_providers(self, mocker: MockerFixture): client = PluginAgentClient() provider = _agent_provider("remote") @@ -43,7 +45,7 @@ class TestPluginAgentClient: assert result[0].declaration.identity.name == "org/plugin/remote" assert result[0].declaration.strategies[0].identity.provider == "org/plugin/remote" - def test_fetch_agent_strategy_provider(self, mocker): + def test_fetch_agent_strategy_provider(self, mocker: MockerFixture): client = PluginAgentClient() provider = _agent_provider("provider") @@ -63,7 +65,7 @@ class TestPluginAgentClient: assert result.declaration.identity.name == "org/plugin/provider" assert result.declaration.strategies[0].identity.provider == "org/plugin/provider" - def test_invoke_merges_chunks_and_passes_context(self, mocker): + def test_invoke_merges_chunks_and_passes_context(self, mocker: MockerFixture): client = PluginAgentClient() stream_mock = mocker.patch.object( client, "_request_with_plugin_daemon_response_stream", return_value=iter(["raw"]) diff --git a/api/tests/unit_tests/core/plugin/impl/test_asset_manager.py b/api/tests/unit_tests/core/plugin/impl/test_asset_manager.py index 5f564062d5..c2cce5d691 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_asset_manager.py +++ b/api/tests/unit_tests/core/plugin/impl/test_asset_manager.py @@ -1,12 +1,13 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.plugin.impl.asset import PluginAssetManager class TestPluginAssetManager: - def test_fetch_asset_success(self, mocker): + def test_fetch_asset_success(self, mocker: MockerFixture): manager = PluginAssetManager() response = MagicMock(status_code=200, content=b"asset-bytes") request_mock = mocker.patch.object(manager, "_request", return_value=response) @@ -16,14 +17,14 @@ class TestPluginAssetManager: assert result == b"asset-bytes" request_mock.assert_called_once_with(method="GET", path="plugin/tenant-1/asset/asset-1") - def test_fetch_asset_not_found_raises(self, mocker): + def test_fetch_asset_not_found_raises(self, mocker: MockerFixture): manager = PluginAssetManager() mocker.patch.object(manager, "_request", return_value=MagicMock(status_code=404, content=b"")) with pytest.raises(ValueError, match="can not found asset asset-1"): manager.fetch_asset("tenant-1", "asset-1") - def test_extract_asset_success(self, mocker): + def test_extract_asset_success(self, mocker: MockerFixture): manager = PluginAssetManager() response = MagicMock(status_code=200, content=b"file-content") request_mock = mocker.patch.object(manager, "_request", return_value=response) @@ -37,7 +38,7 @@ class TestPluginAssetManager: params={"plugin_unique_identifier": "org/plugin:1", "file_path": "README.md"}, ) - def test_extract_asset_not_found_raises(self, mocker): + def test_extract_asset_not_found_raises(self, mocker: MockerFixture): manager = PluginAssetManager() mocker.patch.object(manager, "_request", return_value=MagicMock(status_code=404, content=b"")) diff --git a/api/tests/unit_tests/core/plugin/impl/test_base_client_impl.py b/api/tests/unit_tests/core/plugin/impl/test_base_client_impl.py index 23894bd417..b154f056ca 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_base_client_impl.py +++ b/api/tests/unit_tests/core/plugin/impl/test_base_client_impl.py @@ -1,6 +1,7 @@ import json import pytest +from pytest_mock import MockerFixture from core.plugin.endpoint.exc import EndpointSetupFailedError from core.plugin.entities.plugin_daemon import PluginDaemonInnerError @@ -39,7 +40,7 @@ class _StreamContext: class TestBasePluginClientImpl: - def test_inject_trace_headers(self, mocker): + def test_inject_trace_headers(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch("core.plugin.impl.base.dify_config.ENABLE_OTEL", True) trace_header = "00-abc-xyz-01" @@ -54,7 +55,7 @@ class TestBasePluginClientImpl: client._inject_trace_headers(headers_with_existing) assert headers_with_existing["TraceParent"] == "exists" - def test_stream_request_handles_data_lines_and_dict_payload(self, mocker): + def test_stream_request_handles_data_lines_and_dict_payload(self, mocker: MockerFixture): client = BasePluginClient() stream_mock = mocker.patch( "httpx.Client.stream", @@ -66,14 +67,14 @@ class TestBasePluginClientImpl: assert result == ["hello", "world"] assert stream_mock.call_args.kwargs["data"] == {"k": "v"} - def test_request_with_plugin_daemon_response_handles_request_exception(self, mocker): + def test_request_with_plugin_daemon_response_handles_request_exception(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch.object(client, "_request", side_effect=RuntimeError("boom")) with pytest.raises(ValueError, match="Failed to request plugin daemon"): client._request_with_plugin_daemon_response("GET", "plugin/tenant/path", bool) - def test_request_with_plugin_daemon_response_applies_transformer(self, mocker): + def test_request_with_plugin_daemon_response_applies_transformer(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch.object(client, "_request", return_value=_ResponseStub({"code": 0, "message": "", "data": True})) @@ -88,14 +89,14 @@ class TestBasePluginClientImpl: assert result is True assert transformed == {"code": 0, "message": "", "data": True} - def test_request_with_plugin_daemon_response_stream_malformed_json_error(self, mocker): + def test_request_with_plugin_daemon_response_stream_malformed_json_error(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch.object(client, "_stream_request", return_value=iter(['{"error":"bad-line"}'])) with pytest.raises(ValueError, match="bad-line"): list(client._request_with_plugin_daemon_response_stream("GET", "p", bool)) - def test_request_with_plugin_daemon_response_stream_plugin_daemon_inner_error(self, mocker): + def test_request_with_plugin_daemon_response_stream_plugin_daemon_inner_error(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch.object( client, "_stream_request", return_value=iter(['{"code":-500,"message":"not-json","data":null}']) @@ -105,14 +106,14 @@ class TestBasePluginClientImpl: list(client._request_with_plugin_daemon_response_stream("GET", "p", bool)) assert exc_info.value.message == "not-json" - def test_request_with_plugin_daemon_response_stream_plugin_daemon_error(self, mocker): + def test_request_with_plugin_daemon_response_stream_plugin_daemon_error(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch.object(client, "_stream_request", return_value=iter(['{"code":-1,"message":"err","data":null}'])) with pytest.raises(ValueError, match="plugin daemon: err, code: -1"): list(client._request_with_plugin_daemon_response_stream("GET", "p", bool)) - def test_request_with_plugin_daemon_response_stream_empty_data_error(self, mocker): + def test_request_with_plugin_daemon_response_stream_empty_data_error(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch.object(client, "_stream_request", return_value=iter(['{"code":0,"message":"","data":null}'])) diff --git a/api/tests/unit_tests/core/plugin/impl/test_datasource_manager.py b/api/tests/unit_tests/core/plugin/impl/test_datasource_manager.py index 4c5987d759..94723dcfe2 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_datasource_manager.py +++ b/api/tests/unit_tests/core/plugin/impl/test_datasource_manager.py @@ -1,5 +1,7 @@ from types import SimpleNamespace +from pytest_mock import MockerFixture + from core.datasource.entities.datasource_entities import ( GetOnlineDocumentPageContentRequest, OnlineDriveBrowseFilesRequest, @@ -19,7 +21,7 @@ def _datasource_provider(name: str = "provider") -> SimpleNamespace: class TestPluginDatasourceManager: - def test_fetch_datasource_providers(self, mocker): + def test_fetch_datasource_providers(self, mocker: MockerFixture): manager = PluginDatasourceManager() provider = _datasource_provider("remote") repack = mocker.patch("core.plugin.impl.datasource.ToolTransformService.repack_provider") @@ -52,7 +54,7 @@ class TestPluginDatasourceManager: assert result[1].declaration.datasources[0].identity.provider == "org/plugin/remote" repack.assert_called_once_with(tenant_id="tenant-1", provider=provider) - def test_fetch_installed_datasource_providers(self, mocker): + def test_fetch_installed_datasource_providers(self, mocker: MockerFixture): manager = PluginDatasourceManager() provider = _datasource_provider("remote") repack = mocker.patch("core.plugin.impl.datasource.ToolTransformService.repack_provider") @@ -83,7 +85,7 @@ class TestPluginDatasourceManager: assert result[0].declaration.datasources[0].identity.provider == "org/plugin/remote" repack.assert_called_once_with(tenant_id="tenant-1", provider=provider) - def test_fetch_datasource_provider_local_and_remote(self, mocker): + def test_fetch_datasource_provider_local_and_remote(self, mocker: MockerFixture): manager = PluginDatasourceManager() local = manager.fetch_datasource_provider("tenant-1", "langgenius/file/file") @@ -113,7 +115,7 @@ class TestPluginDatasourceManager: assert result.declaration.identity.name == "org/plugin/provider" assert result.declaration.datasources[0].identity.provider == "org/plugin/provider" - def test_get_website_crawl_streaming(self, mocker): + def test_get_website_crawl_streaming(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter(["crawl"]) @@ -132,7 +134,7 @@ class TestPluginDatasourceManager: assert stream_mock.call_count == 1 - def test_get_online_document_pages_streaming(self, mocker): + def test_get_online_document_pages_streaming(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter(["pages"]) @@ -151,7 +153,7 @@ class TestPluginDatasourceManager: assert stream_mock.call_count == 1 - def test_get_online_document_page_content_streaming(self, mocker): + def test_get_online_document_page_content_streaming(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter(["content"]) @@ -170,7 +172,7 @@ class TestPluginDatasourceManager: assert stream_mock.call_count == 1 - def test_online_drive_browse_files_streaming(self, mocker): + def test_online_drive_browse_files_streaming(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter(["browse"]) @@ -189,7 +191,7 @@ class TestPluginDatasourceManager: assert stream_mock.call_count == 1 - def test_online_drive_download_file_streaming(self, mocker): + def test_online_drive_download_file_streaming(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter(["download"]) @@ -208,14 +210,14 @@ class TestPluginDatasourceManager: assert stream_mock.call_count == 1 - def test_validate_provider_credentials_returns_true_when_stream_yields_result(self, mocker): + def test_validate_provider_credentials_returns_true_when_stream_yields_result(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter([SimpleNamespace(result=True)]) assert manager.validate_provider_credentials("tenant-1", "user-1", "provider", "org/plugin", {"k": "v"}) is True - def test_validate_provider_credentials_returns_false_when_stream_empty(self, mocker): + def test_validate_provider_credentials_returns_false_when_stream_empty(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter([]) diff --git a/api/tests/unit_tests/core/plugin/impl/test_debugging_client.py b/api/tests/unit_tests/core/plugin/impl/test_debugging_client.py index c80785aee0..05959207b1 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_debugging_client.py +++ b/api/tests/unit_tests/core/plugin/impl/test_debugging_client.py @@ -1,10 +1,12 @@ from types import SimpleNamespace +from pytest_mock import MockerFixture + from core.plugin.impl.debugging import PluginDebuggingClient class TestPluginDebuggingClient: - def test_get_debugging_key(self, mocker): + def test_get_debugging_key(self, mocker: MockerFixture): client = PluginDebuggingClient() request_mock = mocker.patch.object( client, diff --git a/api/tests/unit_tests/core/plugin/impl/test_endpoint_client_impl.py b/api/tests/unit_tests/core/plugin/impl/test_endpoint_client_impl.py index 4cf657a050..7a24cc01d1 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_endpoint_client_impl.py +++ b/api/tests/unit_tests/core/plugin/impl/test_endpoint_client_impl.py @@ -1,11 +1,12 @@ import pytest +from pytest_mock import MockerFixture from core.plugin.impl.endpoint import PluginEndpointClient from core.plugin.impl.exc import PluginDaemonInternalServerError class TestPluginEndpointClientImpl: - def test_create_endpoint(self, mocker): + def test_create_endpoint(self, mocker: MockerFixture): client = PluginEndpointClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response", return_value=True) @@ -18,7 +19,7 @@ class TestPluginEndpointClientImpl: assert args[:3] == ("POST", "plugin/tenant-1/endpoint/setup", bool) assert kwargs["data"]["plugin_unique_identifier"] == "org/plugin:1" - def test_list_endpoints(self, mocker): + def test_list_endpoints(self, mocker: MockerFixture): client = PluginEndpointClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response", return_value=["endpoint"]) @@ -28,7 +29,7 @@ class TestPluginEndpointClientImpl: assert request_mock.call_args.args[1] == "plugin/tenant-1/endpoint/list" assert request_mock.call_args.kwargs["params"] == {"page": 2, "page_size": 20} - def test_list_endpoints_for_single_plugin(self, mocker): + def test_list_endpoints_for_single_plugin(self, mocker: MockerFixture): client = PluginEndpointClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response", return_value=["endpoint"]) @@ -38,7 +39,7 @@ class TestPluginEndpointClientImpl: assert request_mock.call_args.args[1] == "plugin/tenant-1/endpoint/list/plugin" assert request_mock.call_args.kwargs["params"] == {"plugin_id": "org/plugin", "page": 1, "page_size": 10} - def test_update_endpoint(self, mocker): + def test_update_endpoint(self, mocker: MockerFixture): client = PluginEndpointClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response", return_value=True) @@ -47,7 +48,7 @@ class TestPluginEndpointClientImpl: assert result is True assert request_mock.call_args.args[:3] == ("POST", "plugin/tenant-1/endpoint/update", bool) - def test_enable_and_disable_endpoint(self, mocker): + def test_enable_and_disable_endpoint(self, mocker: MockerFixture): client = PluginEndpointClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response", return_value=True) @@ -58,7 +59,7 @@ class TestPluginEndpointClientImpl: assert calls[0].args[1] == "plugin/tenant-1/endpoint/enable" assert calls[1].args[1] == "plugin/tenant-1/endpoint/disable" - def test_delete_endpoint_idempotent_and_re_raise(self, mocker): + def test_delete_endpoint_idempotent_and_re_raise(self, mocker: MockerFixture): client = PluginEndpointClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response") diff --git a/api/tests/unit_tests/core/plugin/impl/test_exc_impl.py b/api/tests/unit_tests/core/plugin/impl/test_exc_impl.py index 8c6f1c6b7f..d99a8c114f 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_exc_impl.py +++ b/api/tests/unit_tests/core/plugin/impl/test_exc_impl.py @@ -1,11 +1,13 @@ import json +from pytest_mock import MockerFixture + from core.plugin.impl import exc as exc_module from core.plugin.impl.exc import PluginDaemonError, PluginInvokeError class TestPluginImplExceptions: - def test_plugin_daemon_error_str_contains_request_id(self, mocker): + def test_plugin_daemon_error_str_contains_request_id(self, mocker: MockerFixture): mocker.patch("core.plugin.impl.exc.get_request_id", return_value="req-123") error = PluginDaemonError("bad") @@ -21,7 +23,7 @@ class TestPluginImplExceptions: assert "RateLimit" in friendly assert "too many" in friendly - def test_plugin_invoke_error_invalid_json_and_fallback(self, mocker): + def test_plugin_invoke_error_invalid_json_and_fallback(self, mocker: MockerFixture): err = PluginInvokeError("plain text") assert err._get_error_object() == {} @@ -32,7 +34,7 @@ class TestPluginImplExceptions: err2 = PluginInvokeError("plain text") assert err2.get_error_message() == "plain text" - def test_plugin_invoke_error_get_error_object_handles_adapter_exception(self, mocker): + def test_plugin_invoke_error_get_error_object_handles_adapter_exception(self, mocker: MockerFixture): adapter = mocker.patch.object(exc_module, "TypeAdapter") adapter.return_value.validate_json.side_effect = RuntimeError("invalid") diff --git a/api/tests/unit_tests/core/plugin/impl/test_model_client.py b/api/tests/unit_tests/core/plugin/impl/test_model_client.py index bcbebbb38b..6dc572310c 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_model_client.py +++ b/api/tests/unit_tests/core/plugin/impl/test_model_client.py @@ -4,13 +4,14 @@ import io from types import SimpleNamespace import pytest +from pytest_mock import MockerFixture from core.plugin.entities.plugin_daemon import PluginDaemonInnerError from core.plugin.impl.model import PluginModelClient class TestPluginModelClient: - def test_fetch_model_providers(self, mocker): + def test_fetch_model_providers(self, mocker: MockerFixture): client = PluginModelClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response", return_value=["provider-a"]) @@ -23,7 +24,7 @@ class TestPluginModelClient: ) assert request_mock.call_args.kwargs["params"] == {"page": 1, "page_size": 256} - def test_get_model_schema(self, mocker): + def test_get_model_schema(self, mocker: MockerFixture): client = PluginModelClient() schema = SimpleNamespace(name="schema") stream_mock = mocker.patch.object( @@ -45,7 +46,7 @@ class TestPluginModelClient: assert result is schema assert stream_mock.call_args.args[:2] == ("POST", "plugin/tenant-1/dispatch/model/schema") - def test_get_model_schema_empty_stream_returns_none(self, mocker): + def test_get_model_schema_empty_stream_returns_none(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -53,7 +54,7 @@ class TestPluginModelClient: assert result is None - def test_validate_provider_credentials(self, mocker): + def test_validate_provider_credentials(self, mocker: MockerFixture): client = PluginModelClient() stream_mock = mocker.patch.object( client, @@ -77,7 +78,7 @@ class TestPluginModelClient: "plugin/tenant-1/dispatch/model/validate_provider_credentials", ) - def test_validate_provider_credentials_without_dict_update(self, mocker): + def test_validate_provider_credentials_without_dict_update(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object( client, @@ -91,13 +92,13 @@ class TestPluginModelClient: assert result is False assert credentials == {"api_key": "same"} - def test_validate_provider_credentials_empty_returns_false(self, mocker): + def test_validate_provider_credentials_empty_returns_false(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) assert client.validate_provider_credentials("tenant-1", "user-1", "org/plugin:1", "provider-a", {}) is False - def test_validate_model_credentials(self, mocker): + def test_validate_model_credentials(self, mocker: MockerFixture): client = PluginModelClient() stream_mock = mocker.patch.object( client, @@ -123,7 +124,7 @@ class TestPluginModelClient: "plugin/tenant-1/dispatch/model/validate_model_credentials", ) - def test_validate_model_credentials_empty_returns_false(self, mocker): + def test_validate_model_credentials_empty_returns_false(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -132,7 +133,7 @@ class TestPluginModelClient: is False ) - def test_invoke_llm(self, mocker): + def test_invoke_llm(self, mocker: MockerFixture): client = PluginModelClient() stream_mock = mocker.patch.object( client, "_request_with_plugin_daemon_response_stream", return_value=iter(["chunk-1"]) @@ -160,7 +161,7 @@ class TestPluginModelClient: assert call_kwargs["data"]["data"]["stream"] is False assert call_kwargs["data"]["data"]["model_parameters"] == {"temperature": 0.1} - def test_invoke_llm_wraps_plugin_daemon_inner_error(self, mocker): + def test_invoke_llm_wraps_plugin_daemon_inner_error(self, mocker: MockerFixture): client = PluginModelClient() def _boom(): @@ -182,7 +183,7 @@ class TestPluginModelClient: ) ) - def test_get_llm_num_tokens(self, mocker): + def test_get_llm_num_tokens(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object( client, @@ -204,7 +205,7 @@ class TestPluginModelClient: assert result == 42 - def test_get_llm_num_tokens_empty_returns_zero(self, mocker): + def test_get_llm_num_tokens_empty_returns_zero(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -213,7 +214,7 @@ class TestPluginModelClient: == 0 ) - def test_invoke_text_embedding(self, mocker): + def test_invoke_text_embedding(self, mocker: MockerFixture): client = PluginModelClient() embedding_result = SimpleNamespace(data=[[0.1, 0.2]]) mocker.patch.object( @@ -233,7 +234,7 @@ class TestPluginModelClient: assert result is embedding_result - def test_invoke_text_embedding_empty_raises(self, mocker): + def test_invoke_text_embedding_empty_raises(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -242,7 +243,7 @@ class TestPluginModelClient: "tenant-1", "user-1", "org/plugin:1", "provider-a", "embedding-a", {}, ["hello"], "x" ) - def test_invoke_multimodal_embedding(self, mocker): + def test_invoke_multimodal_embedding(self, mocker: MockerFixture): client = PluginModelClient() embedding_result = SimpleNamespace(data=[[0.3, 0.4]]) mocker.patch.object( @@ -262,7 +263,7 @@ class TestPluginModelClient: assert result is embedding_result - def test_invoke_multimodal_embedding_empty_raises(self, mocker): + def test_invoke_multimodal_embedding_empty_raises(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -271,7 +272,7 @@ class TestPluginModelClient: "tenant-1", "user-1", "org/plugin:1", "provider-a", "embedding-a", {}, [{"type": "image"}], "x" ) - def test_get_text_embedding_num_tokens(self, mocker): + def test_get_text_embedding_num_tokens(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object( client, @@ -287,7 +288,7 @@ class TestPluginModelClient: 3, ] - def test_get_text_embedding_num_tokens_empty_returns_list(self, mocker): + def test_get_text_embedding_num_tokens_empty_returns_list(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -298,7 +299,7 @@ class TestPluginModelClient: == [] ) - def test_invoke_rerank(self, mocker): + def test_invoke_rerank(self, mocker: MockerFixture): client = PluginModelClient() rerank_result = SimpleNamespace(scores=[0.9]) mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([rerank_result])) @@ -318,14 +319,14 @@ class TestPluginModelClient: assert result is rerank_result - def test_invoke_rerank_empty_raises(self, mocker): + def test_invoke_rerank_empty_raises(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) with pytest.raises(ValueError, match="Failed to invoke rerank"): client.invoke_rerank("tenant-1", "user-1", "org/plugin:1", "provider-a", "rerank-a", {}, "q", ["doc-1"]) - def test_invoke_multimodal_rerank(self, mocker): + def test_invoke_multimodal_rerank(self, mocker: MockerFixture): client = PluginModelClient() rerank_result = SimpleNamespace(scores=[0.8]) mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([rerank_result])) @@ -345,7 +346,7 @@ class TestPluginModelClient: assert result is rerank_result - def test_invoke_multimodal_rerank_empty_raises(self, mocker): + def test_invoke_multimodal_rerank_empty_raises(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -361,7 +362,7 @@ class TestPluginModelClient: [{"type": "image"}], ) - def test_invoke_tts(self, mocker): + def test_invoke_tts(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object( client, @@ -384,7 +385,7 @@ class TestPluginModelClient: assert result == [b"hello", b"!"] - def test_invoke_tts_wraps_plugin_daemon_inner_error(self, mocker): + def test_invoke_tts_wraps_plugin_daemon_inner_error(self, mocker: MockerFixture): client = PluginModelClient() def _boom(): @@ -396,7 +397,7 @@ class TestPluginModelClient: with pytest.raises(ValueError, match="tts error-400"): list(client.invoke_tts("tenant-1", "user-1", "org/plugin:1", "provider-a", "tts-a", {}, "hello", "alloy")) - def test_get_tts_model_voices(self, mocker): + def test_get_tts_model_voices(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object( client, @@ -425,13 +426,13 @@ class TestPluginModelClient: assert result == [{"name": "Alloy", "value": "alloy"}, {"name": "Echo", "value": "echo"}] - def test_get_tts_model_voices_empty_returns_list(self, mocker): + def test_get_tts_model_voices_empty_returns_list(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) assert client.get_tts_model_voices("tenant-1", "user-1", "org/plugin:1", "provider-a", "tts-a", {}) == [] - def test_invoke_speech_to_text(self, mocker): + def test_invoke_speech_to_text(self, mocker: MockerFixture): client = PluginModelClient() stream_mock = mocker.patch.object( client, @@ -452,7 +453,7 @@ class TestPluginModelClient: assert result == "transcribed text" assert stream_mock.call_args.kwargs["data"]["data"]["file"] == "616263" - def test_invoke_speech_to_text_empty_raises(self, mocker): + def test_invoke_speech_to_text_empty_raises(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -461,7 +462,7 @@ class TestPluginModelClient: "tenant-1", "user-1", "org/plugin:1", "provider-a", "stt-a", {}, io.BytesIO(b"abc") ) - def test_invoke_moderation(self, mocker): + def test_invoke_moderation(self, mocker: MockerFixture): client = PluginModelClient() stream_mock = mocker.patch.object( client, @@ -482,7 +483,7 @@ class TestPluginModelClient: assert result is True assert stream_mock.call_args.kwargs["path"] == "plugin/tenant-1/dispatch/moderation/invoke" - def test_invoke_moderation_empty_raises(self, mocker): + def test_invoke_moderation_empty_raises(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) diff --git a/api/tests/unit_tests/core/plugin/impl/test_model_runtime_factory.py b/api/tests/unit_tests/core/plugin/impl/test_model_runtime_factory.py index 7491e79f30..52da674f06 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_model_runtime_factory.py +++ b/api/tests/unit_tests/core/plugin/impl/test_model_runtime_factory.py @@ -31,6 +31,6 @@ def test_plugin_model_assembly_reuses_single_runtime_across_views(): assert assembly.model_manager is model_manager mock_runtime_factory.assert_called_once_with(tenant_id="tenant-1", user_id="user-1") - mock_provider_factory_cls.assert_called_once_with(model_runtime=runtime) + mock_provider_factory_cls.assert_called_once_with(runtime=runtime) mock_provider_manager_cls.assert_called_once_with(model_runtime=runtime) mock_model_manager_cls.assert_called_once_with(provider_manager=provider_manager) diff --git a/api/tests/unit_tests/core/plugin/impl/test_oauth_handler.py b/api/tests/unit_tests/core/plugin/impl/test_oauth_handler.py index 6fb4c99432..f6c9b1c669 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_oauth_handler.py +++ b/api/tests/unit_tests/core/plugin/impl/test_oauth_handler.py @@ -2,6 +2,7 @@ from io import BytesIO from types import SimpleNamespace import pytest +from pytest_mock import MockerFixture from werkzeug import Request from core.plugin.impl.oauth import OAuthHandler @@ -25,7 +26,7 @@ def _build_request(body: bytes = b"payload") -> Request: class TestOAuthHandler: - def test_get_authorization_url(self, mocker): + def test_get_authorization_url(self, mocker: MockerFixture): handler = OAuthHandler() stream_mock = mocker.patch.object( handler, @@ -45,7 +46,7 @@ class TestOAuthHandler: assert response.authorization_url == "https://auth.example.com" assert stream_mock.call_count == 1 - def test_get_authorization_url_no_response_raises(self, mocker): + def test_get_authorization_url_no_response_raises(self, mocker: MockerFixture): handler = OAuthHandler() mocker.patch.object(handler, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -59,7 +60,7 @@ class TestOAuthHandler: system_credentials={}, ) - def test_get_credentials(self, mocker): + def test_get_credentials(self, mocker: MockerFixture): handler = OAuthHandler() captured_data = {} @@ -85,7 +86,7 @@ class TestOAuthHandler: assert "raw_http_request" in captured_data["data"] assert stream_mock.call_count == 1 - def test_get_credentials_no_response_raises(self, mocker): + def test_get_credentials_no_response_raises(self, mocker: MockerFixture): handler = OAuthHandler() mocker.patch.object(handler, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -100,7 +101,7 @@ class TestOAuthHandler: request=_build_request(), ) - def test_refresh_credentials(self, mocker): + def test_refresh_credentials(self, mocker: MockerFixture): handler = OAuthHandler() stream_mock = mocker.patch.object( handler, @@ -121,7 +122,7 @@ class TestOAuthHandler: assert response.credentials == {"token": "new"} assert stream_mock.call_count == 1 - def test_refresh_credentials_no_response_raises(self, mocker): + def test_refresh_credentials_no_response_raises(self, mocker: MockerFixture): handler = OAuthHandler() mocker.patch.object(handler, "_request_with_plugin_daemon_response_stream", return_value=iter([])) diff --git a/api/tests/unit_tests/core/plugin/impl/test_tool_manager.py b/api/tests/unit_tests/core/plugin/impl/test_tool_manager.py index 80cf46f9bb..3ae3cc18e4 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_tool_manager.py +++ b/api/tests/unit_tests/core/plugin/impl/test_tool_manager.py @@ -1,5 +1,7 @@ from types import SimpleNamespace +from pytest_mock import MockerFixture + from core.plugin.entities.plugin_daemon import CredentialType from core.plugin.impl.tool import PluginToolManager @@ -15,7 +17,7 @@ def _tool_provider(name: str = "provider") -> SimpleNamespace: class TestPluginToolManager: - def test_fetch_tool_providers(self, mocker): + def test_fetch_tool_providers(self, mocker: MockerFixture): manager = PluginToolManager() provider = _tool_provider("remote") mocker.patch("core.plugin.impl.tool.resolve_dify_schema_refs", return_value={"resolved": True}) @@ -44,7 +46,7 @@ class TestPluginToolManager: assert result[0].declaration.identity.name == "org/plugin/remote" assert result[0].declaration.tools[0].identity.provider == "org/plugin/remote" - def test_fetch_tool_provider(self, mocker): + def test_fetch_tool_provider(self, mocker: MockerFixture): manager = PluginToolManager() provider = _tool_provider("provider") mocker.patch("core.plugin.impl.tool.resolve_dify_schema_refs", return_value={"resolved": True}) @@ -68,7 +70,7 @@ class TestPluginToolManager: assert result.declaration.identity.name == "org/plugin/provider" assert result.declaration.tools[0].identity.provider == "org/plugin/provider" - def test_invoke_merges_chunks(self, mocker): + def test_invoke_merges_chunks(self, mocker: MockerFixture): manager = PluginToolManager() stream_mock = mocker.patch.object( manager, "_request_with_plugin_daemon_response_stream", return_value=iter(["chunk"]) @@ -92,7 +94,7 @@ class TestPluginToolManager: assert merge_mock.call_count == 1 assert stream_mock.call_args.kwargs["headers"]["X-Plugin-ID"] == "org/plugin" - def test_validate_credentials_paths(self, mocker): + def test_validate_credentials_paths(self, mocker: MockerFixture): manager = PluginToolManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") @@ -108,7 +110,7 @@ class TestPluginToolManager: stream_mock.return_value = iter([]) assert manager.validate_datasource_credentials("tenant-1", "user-1", "org/plugin/provider", {"k": "v"}) is False - def test_get_runtime_parameters_paths(self, mocker): + def test_get_runtime_parameters_paths(self, mocker: MockerFixture): manager = PluginToolManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") diff --git a/api/tests/unit_tests/core/plugin/impl/test_trigger_client.py b/api/tests/unit_tests/core/plugin/impl/test_trigger_client.py index 76da51c2c8..811bb7e50d 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_trigger_client.py +++ b/api/tests/unit_tests/core/plugin/impl/test_trigger_client.py @@ -2,6 +2,7 @@ from io import BytesIO from types import SimpleNamespace import pytest +from pytest_mock import MockerFixture from werkzeug import Request from core.plugin.entities.plugin_daemon import CredentialType @@ -62,7 +63,7 @@ def _subscription_call_kwargs(method_name: str) -> dict: class TestPluginTriggerClient: - def test_fetch_trigger_providers(self, mocker): + def test_fetch_trigger_providers(self, mocker: MockerFixture): client = PluginTriggerClient() provider = _trigger_provider("remote") @@ -89,7 +90,7 @@ class TestPluginTriggerClient: assert result[0].declaration.identity.name == "org/plugin/remote" assert result[0].declaration.events[0].identity.provider == "org/plugin/remote" - def test_fetch_trigger_provider(self, mocker): + def test_fetch_trigger_provider(self, mocker: MockerFixture): client = PluginTriggerClient() provider = _trigger_provider("provider") @@ -108,7 +109,7 @@ class TestPluginTriggerClient: assert result.declaration.identity.name == "org/plugin/provider" assert result.declaration.events[0].identity.provider == "org/plugin/provider" - def test_invoke_trigger_event(self, mocker): + def test_invoke_trigger_event(self, mocker: MockerFixture): client = PluginTriggerClient() stream_mock = mocker.patch.object( client, @@ -132,7 +133,7 @@ class TestPluginTriggerClient: assert result.variables == {"ok": True} assert stream_mock.call_count == 1 - def test_invoke_trigger_event_no_response_raises(self, mocker): + def test_invoke_trigger_event_no_response_raises(self, mocker: MockerFixture): client = PluginTriggerClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -150,7 +151,7 @@ class TestPluginTriggerClient: payload={"payload": 1}, ) - def test_validate_provider_credentials(self, mocker): + def test_validate_provider_credentials(self, mocker: MockerFixture): client = PluginTriggerClient() stream_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response_stream") @@ -163,7 +164,7 @@ class TestPluginTriggerClient: ): client.validate_provider_credentials("tenant-1", "user-1", "org/plugin/provider", {"k": "v"}) - def test_dispatch_event(self, mocker): + def test_dispatch_event(self, mocker: MockerFixture): client = PluginTriggerClient() stream_mock = mocker.patch.object( client, @@ -195,7 +196,7 @@ class TestPluginTriggerClient: ) @pytest.mark.parametrize("method_name", ["subscribe", "unsubscribe", "refresh"]) - def test_subscription_operations_success(self, mocker, method_name): + def test_subscription_operations_success(self, mocker: MockerFixture, method_name): client = PluginTriggerClient() stream_mock = mocker.patch.object( client, @@ -217,7 +218,7 @@ class TestPluginTriggerClient: ("refresh", "No response received from plugin daemon for refresh"), ], ) - def test_subscription_operations_no_response(self, mocker, method_name, expected): + def test_subscription_operations_no_response(self, mocker: MockerFixture, method_name, expected): client = PluginTriggerClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) method = getattr(client, method_name) diff --git a/api/tests/unit_tests/core/plugin/test_backwards_invocation_app.py b/api/tests/unit_tests/core/plugin/test_backwards_invocation_app.py index 3feb4159ad..2ed7c70ed9 100644 --- a/api/tests/unit_tests/core/plugin/test_backwards_invocation_app.py +++ b/api/tests/unit_tests/core/plugin/test_backwards_invocation_app.py @@ -4,6 +4,7 @@ from unittest.mock import MagicMock import pytest from pydantic import BaseModel +from pytest_mock import MockerFixture from core.app.layers.pause_state_persist_layer import PauseStateLayerConfig from core.plugin.backwards_invocation.app import PluginAppBackwardsInvocation @@ -41,7 +42,7 @@ class TestBaseBackwardsInvocation: class TestPluginAppBackwardsInvocation: - def test_fetch_app_info_workflow_path(self, mocker): + def test_fetch_app_info_workflow_path(self, mocker: MockerFixture): workflow = MagicMock() workflow.features_dict = {"feature": "v"} workflow.user_input_form.return_value = [{"name": "foo"}] @@ -57,7 +58,7 @@ class TestPluginAppBackwardsInvocation: assert result == {"data": {"mapped": True}} mapper.assert_called_once_with(features_dict={"feature": "v"}, user_input_form=[{"name": "foo"}]) - def test_fetch_app_info_model_config_path(self, mocker): + def test_fetch_app_info_model_config_path(self, mocker: MockerFixture): model_config = MagicMock() model_config.to_dict.return_value = {"user_input_form": [{"name": "bar"}], "k": "v"} app = MagicMock(mode=AppMode.COMPLETION, app_model_config=model_config) @@ -81,7 +82,7 @@ class TestPluginAppBackwardsInvocation: (AppMode.COMPLETION, "invoke_completion_app"), ], ) - def test_invoke_app_routes_by_mode(self, mocker, mode, route_method): + def test_invoke_app_routes_by_mode(self, mocker: MockerFixture, mode, route_method): app = MagicMock(mode=mode) user = MagicMock() mocker.patch.object(PluginAppBackwardsInvocation, "_get_app", return_value=app) @@ -102,7 +103,7 @@ class TestPluginAppBackwardsInvocation: assert result == {"routed": True} assert route.call_count == 1 - def test_invoke_app_uses_end_user_when_user_id_missing(self, mocker): + def test_invoke_app_uses_end_user_when_user_id_missing(self, mocker: MockerFixture): app = MagicMock(mode=AppMode.WORKFLOW) end_user = MagicMock() mocker.patch.object(PluginAppBackwardsInvocation, "_get_app", return_value=app) @@ -127,7 +128,7 @@ class TestPluginAppBackwardsInvocation: get_or_create.assert_called_once_with(app) assert route.call_args.args[1] is end_user - def test_invoke_app_missing_query_for_chat_raises(self, mocker): + def test_invoke_app_missing_query_for_chat_raises(self, mocker: MockerFixture): mocker.patch.object(PluginAppBackwardsInvocation, "_get_app", return_value=MagicMock(mode=AppMode.CHAT)) mocker.patch.object(PluginAppBackwardsInvocation, "_get_user", return_value=MagicMock()) @@ -143,7 +144,7 @@ class TestPluginAppBackwardsInvocation: files=[], ) - def test_invoke_app_unexpected_mode_raises(self, mocker): + def test_invoke_app_unexpected_mode_raises(self, mocker: MockerFixture): mocker.patch.object(PluginAppBackwardsInvocation, "_get_app", return_value=MagicMock(mode="other")) mocker.patch.object(PluginAppBackwardsInvocation, "_get_user", return_value=MagicMock()) @@ -166,7 +167,7 @@ class TestPluginAppBackwardsInvocation: (AppMode.CHAT, "core.plugin.backwards_invocation.app.ChatAppGenerator.generate"), ], ) - def test_invoke_chat_app_agent_and_chat(self, mocker, mode, generator_path): + def test_invoke_chat_app_agent_and_chat(self, mocker: MockerFixture, mode, generator_path): app = MagicMock(mode=mode, workflow=None) spy = mocker.patch(generator_path, return_value={"result": "ok"}) @@ -183,7 +184,7 @@ class TestPluginAppBackwardsInvocation: assert result == {"result": "ok"} assert spy.call_count == 1 - def test_invoke_chat_app_advanced_chat_injects_pause_state_config(self, mocker): + def test_invoke_chat_app_advanced_chat_injects_pause_state_config(self, mocker: MockerFixture): workflow = MagicMock() workflow.created_by = "owner-id" @@ -242,7 +243,7 @@ class TestPluginAppBackwardsInvocation: files=[], ) - def test_invoke_workflow_app_injects_pause_state_config(self, mocker): + def test_invoke_workflow_app_injects_pause_state_config(self, mocker: MockerFixture): workflow = MagicMock() workflow.created_by = "owner-id" @@ -284,7 +285,7 @@ class TestPluginAppBackwardsInvocation: files=[], ) - def test_invoke_completion_app(self, mocker): + def test_invoke_completion_app(self, mocker: MockerFixture): spy = mocker.patch( "core.plugin.backwards_invocation.app.CompletionAppGenerator.generate", return_value={"ok": 1} ) @@ -295,7 +296,7 @@ class TestPluginAppBackwardsInvocation: assert result == {"ok": 1} assert spy.call_count == 1 - def test_get_user_returns_end_user(self, mocker): + def test_get_user_returns_end_user(self, mocker: MockerFixture): session = MagicMock() session.scalar.side_effect = [MagicMock(id="end-user")] session_ctx = MagicMock() @@ -307,7 +308,7 @@ class TestPluginAppBackwardsInvocation: user = PluginAppBackwardsInvocation._get_user("uid") assert user.id == "end-user" - def test_get_user_falls_back_to_account_user(self, mocker): + def test_get_user_falls_back_to_account_user(self, mocker: MockerFixture): session = MagicMock() session.scalar.side_effect = [None, MagicMock(id="account-user")] session_ctx = MagicMock() @@ -319,7 +320,7 @@ class TestPluginAppBackwardsInvocation: user = PluginAppBackwardsInvocation._get_user("uid") assert user.id == "account-user" - def test_get_user_raises_when_user_not_found(self, mocker): + def test_get_user_raises_when_user_not_found(self, mocker: MockerFixture): session = MagicMock() session.scalar.side_effect = [None, None] session_ctx = MagicMock() @@ -331,21 +332,21 @@ class TestPluginAppBackwardsInvocation: with pytest.raises(ValueError, match="user not found"): PluginAppBackwardsInvocation._get_user("uid") - def test_get_app_returns_app(self, mocker): + def test_get_app_returns_app(self, mocker: MockerFixture): app_obj = MagicMock(id="app") db = SimpleNamespace(session=MagicMock(scalar=MagicMock(return_value=app_obj))) mocker.patch("core.plugin.backwards_invocation.app.db", db) assert PluginAppBackwardsInvocation._get_app("app", "tenant") is app_obj - def test_get_app_raises_when_missing(self, mocker): + def test_get_app_raises_when_missing(self, mocker: MockerFixture): db = SimpleNamespace(session=MagicMock(scalar=MagicMock(return_value=None))) mocker.patch("core.plugin.backwards_invocation.app.db", db) with pytest.raises(ValueError, match="app not found"): PluginAppBackwardsInvocation._get_app("app", "tenant") - def test_get_app_raises_when_query_fails(self, mocker): + def test_get_app_raises_when_query_fails(self, mocker: MockerFixture): db = SimpleNamespace(session=MagicMock(scalar=MagicMock(side_effect=RuntimeError("db down")))) mocker.patch("core.plugin.backwards_invocation.app.db", db) diff --git a/api/tests/unit_tests/core/plugin/test_model_runtime_adapter.py b/api/tests/unit_tests/core/plugin/test_model_runtime_adapter.py index 88bf555594..b1ecaa4ead 100644 --- a/api/tests/unit_tests/core/plugin/test_model_runtime_adapter.py +++ b/api/tests/unit_tests/core/plugin/test_model_runtime_adapter.py @@ -3,7 +3,7 @@ import datetime import uuid from types import SimpleNamespace -from unittest.mock import Mock, sentinel +from unittest.mock import Mock, patch, sentinel import pytest @@ -13,6 +13,8 @@ from core.plugin.impl.model import PluginModelClient from core.plugin.impl.model_runtime import TENANT_SCOPE_SCHEMA_CACHE_USER_ID, PluginModelRuntime from core.plugin.impl.model_runtime_factory import create_plugin_model_runtime from graphon.model_runtime.entities.common_entities import I18nObject +from graphon.model_runtime.entities.llm_entities import LLMResultChunk, LLMResultChunkDelta, LLMUsage +from graphon.model_runtime.entities.message_entities import AssistantPromptMessage from graphon.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType from graphon.model_runtime.entities.provider_entities import ConfigurateMethod, ProviderEntity @@ -146,7 +148,31 @@ class TestPluginModelRuntime: def test_invoke_llm_resolves_plugin_fields(self) -> None: client = Mock(spec=PluginModelClient) - client.invoke_llm.return_value = sentinel.result + usage = LLMUsage.empty_usage() + client.invoke_llm.return_value = iter( + [ + LLMResultChunk( + model="gpt-4o-mini", + prompt_messages=[], + system_fingerprint="fp-plugin", + delta=LLMResultChunkDelta( + index=0, + message=AssistantPromptMessage(content="plugin "), + ), + ), + LLMResultChunk( + model="gpt-4o-mini", + prompt_messages=[], + system_fingerprint="fp-plugin", + delta=LLMResultChunkDelta( + index=1, + message=AssistantPromptMessage(content="response"), + usage=usage, + finish_reason="stop", + ), + ), + ] + ) runtime = PluginModelRuntime(tenant_id="tenant", user_id="user", client=client) result = runtime.invoke_llm( @@ -160,7 +186,11 @@ class TestPluginModelRuntime: stream=False, ) - assert result is sentinel.result + assert result.model == "gpt-4o-mini" + assert result.prompt_messages == [] + assert result.message.content == "plugin response" + assert result.usage == usage + assert result.system_fingerprint == "fp-plugin" client.invoke_llm.assert_called_once_with( tenant_id="tenant", user_id="user", @@ -175,6 +205,38 @@ class TestPluginModelRuntime: stream=False, ) + def test_invoke_llm_returns_plugin_stream_directly(self) -> None: + client = Mock(spec=PluginModelClient) + stream_result = iter([]) + client.invoke_llm.return_value = stream_result + runtime = PluginModelRuntime(tenant_id="tenant", user_id="user", client=client) + + result = runtime.invoke_llm( + provider="langgenius/openai/openai", + model="gpt-4o-mini", + credentials={"api_key": "secret"}, + model_parameters={"temperature": 0.3}, + prompt_messages=[], + tools=None, + stop=("END",), + stream=True, + ) + + assert result is stream_result + client.invoke_llm.assert_called_once_with( + tenant_id="tenant", + user_id="user", + plugin_id="langgenius/openai", + provider="openai", + model="gpt-4o-mini", + credentials={"api_key": "secret"}, + model_parameters={"temperature": 0.3}, + prompt_messages=[], + tools=None, + stop=["END"], + stream=True, + ) + def test_invoke_llm_rejects_per_call_user_override(self) -> None: client = Mock(spec=PluginModelClient) client.invoke_llm.return_value = sentinel.result @@ -267,6 +329,129 @@ def test_get_model_schema_uses_cached_schema_without_hitting_client(monkeypatch: client.get_model_schema.assert_not_called() +def test_structured_output_adapter_invokes_bound_runtime_streaming() -> None: + runtime = Mock() + runtime.invoke_llm.return_value = sentinel.stream_result + adapter = model_runtime_module._PluginStructuredOutputModelInstance( + runtime=runtime, + provider="langgenius/openai/openai", + model="gpt-4o-mini", + credentials={"api_key": "secret"}, + ) + tool = Mock() + + result = adapter.invoke_llm( + prompt_messages=[], + model_parameters=None, + tools=[tool], + stop=["END"], + stream=True, + callbacks=sentinel.callbacks, + ) + + assert result is sentinel.stream_result + runtime.invoke_llm.assert_called_once_with( + provider="langgenius/openai/openai", + model="gpt-4o-mini", + credentials={"api_key": "secret"}, + model_parameters={}, + prompt_messages=[], + tools=[tool], + stop=["END"], + stream=True, + ) + + +def test_structured_output_adapter_invokes_bound_runtime_non_streaming() -> None: + runtime = Mock() + runtime.invoke_llm.return_value = sentinel.result + adapter = model_runtime_module._PluginStructuredOutputModelInstance( + runtime=runtime, + provider="langgenius/openai/openai", + model="gpt-4o-mini", + credentials={"api_key": "secret"}, + ) + + result = adapter.invoke_llm( + prompt_messages=[], + model_parameters={"temperature": 0}, + tools=None, + stop=None, + stream=False, + ) + + assert result is sentinel.result + runtime.invoke_llm.assert_called_once_with( + provider="langgenius/openai/openai", + model="gpt-4o-mini", + credentials={"api_key": "secret"}, + model_parameters={"temperature": 0}, + prompt_messages=[], + tools=None, + stop=None, + stream=False, + ) + + +def test_invoke_llm_with_structured_output_delegates_with_bound_adapter() -> None: + client = Mock(spec=PluginModelClient) + runtime = PluginModelRuntime(tenant_id="tenant", user_id="user", client=client) + schema = _build_model_schema() + runtime.get_model_schema = Mock(return_value=schema) # type: ignore[method-assign] + + with patch.object( + model_runtime_module, + "invoke_llm_with_structured_output_helper", + return_value=sentinel.structured_result, + ) as mock_helper: + result = runtime.invoke_llm_with_structured_output( + provider="langgenius/openai/openai", + model="gpt-4o-mini", + credentials={"api_key": "secret"}, + json_schema={"type": "object"}, + model_parameters={"temperature": 0}, + prompt_messages=[], + stop=("END",), + stream=False, + ) + + assert result is sentinel.structured_result + runtime.get_model_schema.assert_called_once_with( + provider="langgenius/openai/openai", + model_type=ModelType.LLM, + model="gpt-4o-mini", + credentials={"api_key": "secret"}, + ) + helper_kwargs = mock_helper.call_args.kwargs + assert helper_kwargs["provider"] == "langgenius/openai/openai" + assert helper_kwargs["model_schema"] == schema + assert helper_kwargs["json_schema"] == {"type": "object"} + assert helper_kwargs["model_parameters"] == {"temperature": 0} + assert helper_kwargs["prompt_messages"] == [] + assert helper_kwargs["tools"] is None + assert helper_kwargs["stop"] == ["END"] + assert helper_kwargs["stream"] is False + assert isinstance(helper_kwargs["model_instance"], model_runtime_module._PluginStructuredOutputModelInstance) + + +def test_invoke_llm_with_structured_output_raises_when_model_schema_is_missing() -> None: + client = Mock(spec=PluginModelClient) + runtime = PluginModelRuntime(tenant_id="tenant", user_id="user", client=client) + runtime.get_model_schema = Mock(return_value=None) # type: ignore[method-assign] + + with pytest.raises(ValueError, match="Model schema not found for gpt-4o-mini"): + runtime.invoke_llm_with_structured_output( + provider="langgenius/openai/openai", + model="gpt-4o-mini", + credentials={"api_key": "secret"}, + json_schema={"type": "object"}, + model_parameters={}, + prompt_messages=[], + stop=None, + stream=False, + ) + + def test_get_model_schema_deletes_invalid_cache_and_refetches(monkeypatch: pytest.MonkeyPatch) -> None: client = Mock(spec=PluginModelClient) schema = _build_model_schema() diff --git a/api/tests/unit_tests/core/plugin/test_plugin_entities.py b/api/tests/unit_tests/core/plugin/test_plugin_entities.py index f1c4c7e700..deac0ba1da 100644 --- a/api/tests/unit_tests/core/plugin/test_plugin_entities.py +++ b/api/tests/unit_tests/core/plugin/test_plugin_entities.py @@ -5,6 +5,7 @@ from enum import StrEnum import pytest from flask import Response from pydantic import ValidationError +from pytest_mock import MockerFixture from core.plugin.entities.endpoint import EndpointEntityWithInstance from core.plugin.entities.marketplace import MarketplacePluginDeclaration, MarketplacePluginSnapshot @@ -34,7 +35,7 @@ from graphon.model_runtime.entities.message_entities import ( class TestEndpointEntity: - def test_endpoint_entity_with_instance_renders_url(self, mocker): + def test_endpoint_entity_with_instance_renders_url(self, mocker: MockerFixture): mocker.patch("core.plugin.entities.endpoint.dify_config.ENDPOINT_URL_TEMPLATE", "https://dify.test/{hook_id}") now = datetime.datetime.now(datetime.UTC) diff --git a/api/tests/unit_tests/core/plugin/utils/test_http_parser.py b/api/tests/unit_tests/core/plugin/utils/test_http_parser.py index 71144695bc..e0419d3266 100644 --- a/api/tests/unit_tests/core/plugin/utils/test_http_parser.py +++ b/api/tests/unit_tests/core/plugin/utils/test_http_parser.py @@ -323,6 +323,50 @@ class TestDeserializeResponse: with pytest.raises(ValueError, match="Invalid status line"): deserialize_response(raw_data) + def test_deserialize_response_preserves_duplicate_set_cookie_headers(self): + # Regression test for https://github.com/langgenius/dify/issues/35722 + # Multiple Set-Cookie headers must be preserved per RFC 9110, not collapsed + # into a single value by dict-style assignment. + raw_data = ( + b"HTTP/1.1 200 OK\r\n" + b"Content-Type: text/plain\r\n" + b"Set-Cookie: session=abc; Path=/; HttpOnly\r\n" + b"Set-Cookie: tracking=xyz; Path=/; Secure\r\n" + b"\r\n" + b"ok" + ) + + response = deserialize_response(raw_data) + + cookies = response.headers.getlist("Set-Cookie") + assert cookies == [ + "session=abc; Path=/; HttpOnly", + "tracking=xyz; Path=/; Secure", + ] + # Single-valued headers should still be readable normally. + assert response.headers.get("Content-Type") == "text/plain" + + def test_deserialize_response_preserves_duplicate_generic_headers(self): + # Any header name (not just Set-Cookie) may legitimately repeat; verify the + # parser preserves all values rather than overwriting earlier ones. + raw_data = b"HTTP/1.1 200 OK\r\nX-Custom: first\r\nX-Custom: second\r\n\r\n" + + response = deserialize_response(raw_data) + + assert response.headers.getlist("X-Custom") == ["first", "second"] + + def test_deserialize_response_does_not_inject_default_content_type(self): + # Flask's Response constructor adds a default Content-Type header. When the + # raw response has no Content-Type, the parsed response should not silently + # gain one from the framework default. + raw_data = b"HTTP/1.1 204 No Content\r\nX-Trace-Id: abc\r\n\r\n" + + response = deserialize_response(raw_data) + + header_names = [name for name, _ in response.headers.items()] + assert "Content-Type" not in header_names + assert response.headers.get("X-Trace-Id") == "abc" + def test_roundtrip_response(self): # Test that serialize -> deserialize produces equivalent response original_response = Response( diff --git a/api/tests/unit_tests/core/prompt/test_extract_thread_messages.py b/api/tests/unit_tests/core/prompt/test_extract_thread_messages.py index 1b114b369a..1f46634b89 100644 --- a/api/tests/unit_tests/core/prompt/test_extract_thread_messages.py +++ b/api/tests/unit_tests/core/prompt/test_extract_thread_messages.py @@ -1,5 +1,7 @@ from uuid import uuid4 +from pytest_mock import MockerFixture + from constants import UUID_NIL from core.prompt.utils.extract_thread_messages import extract_thread_messages from core.prompt.utils.get_thread_messages_length import get_thread_messages_length @@ -103,7 +105,7 @@ def test_extract_thread_messages_breaks_when_parent_is_none(): assert result[0].id == id2 -def test_get_thread_messages_length_excludes_newly_created_empty_answer(mocker): +def test_get_thread_messages_length_excludes_newly_created_empty_answer(mocker: MockerFixture): id1, id2 = str(uuid4()), str(uuid4()) messages = [ MockMessage(id2, id1, answer=""), # newest generated message should be excluded @@ -119,7 +121,7 @@ def test_get_thread_messages_length_excludes_newly_created_empty_answer(mocker): mock_scalars.assert_called_once() -def test_get_thread_messages_length_keeps_non_empty_latest_answer(mocker): +def test_get_thread_messages_length_keeps_non_empty_latest_answer(mocker: MockerFixture): id1, id2 = str(uuid4()), str(uuid4()) messages = [ MockMessage(id2, id1, answer="latest-answer"), diff --git a/api/tests/unit_tests/core/prompt/test_prompt_transform.py b/api/tests/unit_tests/core/prompt/test_prompt_transform.py index 5308c8e7b3..3d71e73496 100644 --- a/api/tests/unit_tests/core/prompt/test_prompt_transform.py +++ b/api/tests/unit_tests/core/prompt/test_prompt_transform.py @@ -209,7 +209,7 @@ class TestPromptTransform: assert result == ["only"] memory.get_history_prompt_messages.assert_called_with(max_token_limit=10, message_limit=None) - def test_append_chat_histories_extends_prompt_messages(self, monkeypatch): + def test_append_chat_histories_extends_prompt_messages(self, monkeypatch: pytest.MonkeyPatch): transform = PromptTransform() memory = MagicMock() memory_config = SimpleNamespace(window=SimpleNamespace(enabled=False, size=None)) diff --git a/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba.py b/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba.py index 1e91c2dd88..e233bd2ef0 100644 --- a/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba.py +++ b/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba.py @@ -67,7 +67,7 @@ def _dataset(dataset_keyword_table=None, keyword_number=None): @pytest.fixture -def patched_runtime(monkeypatch): +def patched_runtime(monkeypatch: pytest.MonkeyPatch): session = MagicMock() db = SimpleNamespace(session=session) storage = MagicMock() @@ -151,7 +151,7 @@ def test_add_texts_without_keywords_list_always_uses_extractor(monkeypatch, patc assert set(keyword._update_segment_keywords.call_args.args[2]) == {"from-extractor"} -def test_text_exists_handles_missing_and_existing_keyword_table(monkeypatch): +def test_text_exists_handles_missing_and_existing_keyword_table(monkeypatch: pytest.MonkeyPatch): keyword = Jieba(_dataset(_dataset_keyword_table())) monkeypatch.setattr(keyword, "_get_dataset_keyword_table", MagicMock(return_value=None)) @@ -308,7 +308,7 @@ def test_add_and_delete_ids_from_keyword_table_helpers(): assert deleted["kw2"] == {"node-2"} -def test_retrieve_ids_by_query_ranks_by_keyword_frequency(monkeypatch): +def test_retrieve_ids_by_query_ranks_by_keyword_frequency(monkeypatch: pytest.MonkeyPatch): keyword = Jieba(_dataset(_dataset_keyword_table())) handler = MagicMock() handler.extract_keywords.return_value = ["kw-a", "kw-b"] @@ -350,7 +350,7 @@ def test_update_segment_keywords_updates_when_segment_exists(monkeypatch, patche patched_runtime.session.commit.assert_not_called() -def test_create_segment_keywords_and_update_segment_keywords_index(monkeypatch): +def test_create_segment_keywords_and_update_segment_keywords_index(monkeypatch: pytest.MonkeyPatch): keyword = Jieba(_dataset(_dataset_keyword_table())) monkeypatch.setattr(keyword, "_get_dataset_keyword_table", MagicMock(return_value={})) monkeypatch.setattr(keyword, "_update_segment_keywords", MagicMock()) @@ -365,7 +365,7 @@ def test_create_segment_keywords_and_update_segment_keywords_index(monkeypatch): keyword._save_dataset_keyword_table.assert_called_once() -def test_multi_create_segment_keywords_uses_provided_and_extracted_keywords(monkeypatch): +def test_multi_create_segment_keywords_uses_provided_and_extracted_keywords(monkeypatch: pytest.MonkeyPatch): keyword = Jieba(_dataset(_dataset_keyword_table(), keyword_number=2)) handler = MagicMock() handler.extract_keywords.return_value = {"auto"} diff --git a/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba_keyword_table_handler.py b/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba_keyword_table_handler.py index a4586c141b..c8ee75bf43 100644 --- a/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba_keyword_table_handler.py +++ b/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba_keyword_table_handler.py @@ -2,6 +2,8 @@ import sys import types from types import SimpleNamespace +import pytest + from core.rag.datasource.keyword.jieba.jieba_keyword_table_handler import JiebaKeywordTableHandler from core.rag.datasource.keyword.jieba.stopwords import STOPWORDS @@ -38,7 +40,7 @@ def _install_fake_jieba_modules( monkeypatch.delitem(sys.modules, "jieba.analyse.tfidf", raising=False) -def test_init_uses_existing_default_tfidf(monkeypatch): +def test_init_uses_existing_default_tfidf(monkeypatch: pytest.MonkeyPatch): analyse_module = types.ModuleType("jieba.analyse") default_tfidf = _DummyTFIDF() analyse_module.default_tfidf = default_tfidf @@ -51,7 +53,7 @@ def test_init_uses_existing_default_tfidf(monkeypatch): assert handler._tfidf.stop_words == STOPWORDS -def test_load_tfidf_extractor_uses_tfidf_class_and_caches_default(monkeypatch): +def test_load_tfidf_extractor_uses_tfidf_class_and_caches_default(monkeypatch: pytest.MonkeyPatch): analyse_module = types.ModuleType("jieba.analyse") analyse_module.default_tfidf = None @@ -67,7 +69,7 @@ def test_load_tfidf_extractor_uses_tfidf_class_and_caches_default(monkeypatch): assert analyse_module.default_tfidf is handler._tfidf -def test_load_tfidf_extractor_imports_from_tfidf_submodule(monkeypatch): +def test_load_tfidf_extractor_imports_from_tfidf_submodule(monkeypatch: pytest.MonkeyPatch): analyse_module = types.ModuleType("jieba.analyse") analyse_module.default_tfidf = None @@ -85,7 +87,7 @@ def test_load_tfidf_extractor_imports_from_tfidf_submodule(monkeypatch): assert analyse_module.default_tfidf is handler._tfidf -def test_load_tfidf_extractor_falls_back_when_tfidf_unavailable(monkeypatch): +def test_load_tfidf_extractor_falls_back_when_tfidf_unavailable(monkeypatch: pytest.MonkeyPatch): analyse_module = types.ModuleType("jieba.analyse") analyse_module.default_tfidf = None _install_fake_jieba_modules(monkeypatch, analyse_module) @@ -96,7 +98,7 @@ def test_load_tfidf_extractor_falls_back_when_tfidf_unavailable(monkeypatch): assert fallback_keywords == ["two"] -def test_build_fallback_tfidf_uses_lcut_when_available(monkeypatch): +def test_build_fallback_tfidf_uses_lcut_when_available(monkeypatch: pytest.MonkeyPatch): analyse_module = types.ModuleType("jieba.analyse") _install_fake_jieba_modules(monkeypatch, analyse_module, jieba_attrs={"lcut": lambda _: ["x", "x", "y"]}) @@ -105,7 +107,7 @@ def test_build_fallback_tfidf_uses_lcut_when_available(monkeypatch): assert tfidf.extract_tags("ignored", topK=1) == ["x"] -def test_build_fallback_tfidf_uses_cut_when_lcut_is_missing(monkeypatch): +def test_build_fallback_tfidf_uses_cut_when_lcut_is_missing(monkeypatch: pytest.MonkeyPatch): analyse_module = types.ModuleType("jieba.analyse") _install_fake_jieba_modules( monkeypatch, diff --git a/api/tests/unit_tests/core/rag/datasource/keyword/test_keyword_factory.py b/api/tests/unit_tests/core/rag/datasource/keyword/test_keyword_factory.py index 0d969a3270..e1765b17cb 100644 --- a/api/tests/unit_tests/core/rag/datasource/keyword/test_keyword_factory.py +++ b/api/tests/unit_tests/core/rag/datasource/keyword/test_keyword_factory.py @@ -10,7 +10,7 @@ from core.rag.datasource.keyword.keyword_type import KeyWordType from core.rag.models.document import Document -def test_get_keyword_factory_returns_jieba_factory(monkeypatch): +def test_get_keyword_factory_returns_jieba_factory(monkeypatch: pytest.MonkeyPatch): fake_module = types.ModuleType("core.rag.datasource.keyword.jieba.jieba") class FakeJieba: @@ -27,7 +27,7 @@ def test_get_keyword_factory_raises_for_unsupported_type(): Keyword.get_keyword_factory("unsupported") -def test_keyword_initialization_uses_configured_factory(monkeypatch): +def test_keyword_initialization_uses_configured_factory(monkeypatch: pytest.MonkeyPatch): dataset = SimpleNamespace(id="dataset-1") fake_processor = MagicMock() diff --git a/api/tests/unit_tests/core/rag/datasource/test_datasource_retrieval.py b/api/tests/unit_tests/core/rag/datasource/test_datasource_retrieval.py index b0ecad4d0c..f72351ffa2 100644 --- a/api/tests/unit_tests/core/rag/datasource/test_datasource_retrieval.py +++ b/api/tests/unit_tests/core/rag/datasource/test_datasource_retrieval.py @@ -182,7 +182,7 @@ class TestRetrievalServiceInternals: app.app_context.return_value.__exit__.return_value = False return app - def test_retrieve_with_attachment_ids_only(self, monkeypatch, internal_dataset): + def test_retrieve_with_attachment_ids_only(self, monkeypatch: pytest.MonkeyPatch, internal_dataset): with ( patch("core.rag.datasource.retrieval_service.RetrievalService._get_dataset", return_value=internal_dataset), patch("core.rag.datasource.retrieval_service.RetrievalService._retrieve") as mock_retrieve, @@ -699,7 +699,9 @@ class TestRetrievalServiceInternals: assert RetrievalService.format_retrieval_documents(documents) == [] - def test_format_retrieval_documents_with_parent_child_summary_and_attachments(self, monkeypatch): + def test_format_retrieval_documents_with_parent_child_summary_and_attachments( + self, monkeypatch: pytest.MonkeyPatch + ): dataset_doc_parent = SimpleNamespace( id="doc-parent", doc_form=IndexStructureType.PARENT_CHILD_INDEX, @@ -877,7 +879,7 @@ class TestRetrievalServiceInternals: assert result_by_segment_id["segment-parent-summary"].summary == "summary for parent" assert result_by_segment_id["segment-parent-summary"].child_chunks == [] - def test_format_retrieval_documents_rolls_back_and_raises_when_db_fails(self, monkeypatch): + def test_format_retrieval_documents_rolls_back_and_raises_when_db_fails(self, monkeypatch: pytest.MonkeyPatch): rollback = Mock() monkeypatch.setattr(retrieval_service_module.db.session, "rollback", rollback) monkeypatch.setattr(retrieval_service_module.db.session, "scalars", Mock(side_effect=RuntimeError("db error"))) @@ -936,7 +938,7 @@ class TestRetrievalServiceInternals: future_ok.cancel.assert_called() def test_retrieve_internal_raises_value_error_when_exceptions_exist( - self, monkeypatch, internal_dataset, internal_flask_app + self, monkeypatch: pytest.MonkeyPatch, internal_dataset, internal_flask_app ): executor = _ImmediateExecutor() monkeypatch.setattr(retrieval_service_module, "ThreadPoolExecutor", lambda *args, **kwargs: executor) @@ -958,7 +960,9 @@ class TestRetrievalServiceInternals: query="query", ) - def test_retrieve_internal_hybrid_weighted_attachment_flow(self, monkeypatch, internal_dataset, internal_flask_app): + def test_retrieve_internal_hybrid_weighted_attachment_flow( + self, monkeypatch: pytest.MonkeyPatch, internal_dataset, internal_flask_app + ): executor = _ImmediateExecutor() monkeypatch.setattr(retrieval_service_module, "ThreadPoolExecutor", lambda *args, **kwargs: executor) monkeypatch.setattr( @@ -1034,7 +1038,7 @@ class TestRetrievalServiceInternals: assert any(doc.metadata["doc_id"] == "processed-doc" for doc in all_documents) processor_instance.invoke.assert_called_once() - @patch("core.rag.datasource.retrieval_service.sign_upload_file", return_value="signed://file") + @patch("core.rag.datasource.retrieval_service.sign_upload_file_preview_url", return_value="signed://file") def test_get_segment_attachment_info_success(self, mock_sign): upload_file = SimpleNamespace( id="upload-1", @@ -1114,7 +1118,7 @@ class TestRetrievalServiceInternals: assert result == [] - @patch("core.rag.datasource.retrieval_service.sign_upload_file", return_value="signed://file") + @patch("core.rag.datasource.retrieval_service.sign_upload_file_preview_url", return_value="signed://file") def test_get_segment_attachment_infos_success(self, mock_sign): upload_file_1 = SimpleNamespace( id="upload-1", diff --git a/api/tests/unit_tests/core/rag/datasource/vdb/test_vector_factory.py b/api/tests/unit_tests/core/rag/datasource/vdb/test_vector_factory.py index f84ce2771f..067159398d 100644 --- a/api/tests/unit_tests/core/rag/datasource/vdb/test_vector_factory.py +++ b/api/tests/unit_tests/core/rag/datasource/vdb/test_vector_factory.py @@ -102,7 +102,9 @@ def test_gen_index_struct_dict(vector_factory_module): ("HOLOGRES", "dify_vdb_hologres.hologres_vector", "HologresVectorFactory"), ], ) -def test_get_vector_factory_supported(vector_factory_module, monkeypatch, vector_type, module_path, class_name): +def test_get_vector_factory_supported( + vector_factory_module, monkeypatch: pytest.MonkeyPatch, vector_type, module_path, class_name +): expected_cls = _register_fake_factory_module(monkeypatch, module_path, class_name) result_cls = vector_factory_module.Vector.get_vector_factory(getattr(vector_factory_module.VectorType, vector_type)) @@ -119,7 +121,7 @@ class _PluginChromaFactory: """Stub used only for entry-point override test.""" -def test_get_vector_factory_entry_point_overrides_builtin(vector_factory_module, monkeypatch): +def test_get_vector_factory_entry_point_overrides_builtin(vector_factory_module, monkeypatch: pytest.MonkeyPatch): from importlib.metadata import EntryPoint from core.rag.datasource.vdb import vector_backend_registry as reg @@ -171,7 +173,7 @@ def test_vector_init_uses_default_and_custom_attributes(vector_factory_module): assert default_vector._vector_processor == "processor" -def test_lazy_embeddings_defer_real_load_until_first_embed_call(vector_factory_module, monkeypatch): +def test_lazy_embeddings_defer_real_load_until_first_embed_call(vector_factory_module, monkeypatch: pytest.MonkeyPatch): """``Vector(dataset)`` must not transitively call ``ModelManager`` during construction. The real embedding model should only be materialized on the first ``embed_*`` call (i.e. create / search paths) so cleanup paths @@ -214,7 +216,7 @@ def test_lazy_embeddings_defer_real_load_until_first_embed_call(vector_factory_m inner_model.embed_documents.assert_called_once_with(["world"]) -def test_init_vector_prefers_dataset_index_struct(vector_factory_module, monkeypatch): +def test_init_vector_prefers_dataset_index_struct(vector_factory_module, monkeypatch: pytest.MonkeyPatch): calls = {"vector_type": None, "init_args": None} class _Factory: @@ -242,7 +244,7 @@ def test_init_vector_prefers_dataset_index_struct(vector_factory_module, monkeyp assert calls["init_args"] == (vector._dataset, ["doc_id"], "embeddings") -def test_init_vector_uses_whitelist_override(vector_factory_module, monkeypatch): +def test_init_vector_uses_whitelist_override(vector_factory_module, monkeypatch: pytest.MonkeyPatch): class _Expr: def __eq__(self, _other): return "expr" @@ -279,7 +281,7 @@ def test_init_vector_uses_whitelist_override(vector_factory_module, monkeypatch) assert calls["vector_type"] == vector_factory_module.VectorType.TIDB_ON_QDRANT -def test_init_vector_raises_when_vector_store_missing(vector_factory_module, monkeypatch): +def test_init_vector_raises_when_vector_store_missing(vector_factory_module, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(vector_factory_module.dify_config, "VECTOR_STORE", None) monkeypatch.setattr(vector_factory_module.dify_config, "VECTOR_STORE_WHITELIST_ENABLE", False) @@ -316,7 +318,34 @@ def test_create_batches_texts_and_skips_empty_input(vector_factory_module): vector._vector_processor.create.assert_not_called() -def test_create_multimodal_filters_missing_uploads(vector_factory_module, monkeypatch): +def test_create_skips_empty_text_documents_before_embedding(vector_factory_module): + vector = vector_factory_module.Vector.__new__(vector_factory_module.Vector) + vector._embeddings = MagicMock() + vector._embeddings.embed_documents.return_value = [[0.1], [0.2]] + vector._vector_processor = MagicMock() + + docs = [ + Document(page_content="foo", metadata={"doc_id": "id-1"}), + Document(page_content="", metadata={"doc_id": "id-empty"}), + Document(page_content=" \n", metadata={"doc_id": "id-blank"}), + Document(page_content="bar", metadata={"doc_id": "id-2"}), + ] + + vector.create(texts=docs, request_id="r-1") + + vector._embeddings.embed_documents.assert_called_once_with(["foo", "bar"]) + vector._vector_processor.create.assert_called_once_with( + texts=[docs[0], docs[3]], embeddings=[[0.1], [0.2]], request_id="r-1" + ) + + vector._embeddings.embed_documents.reset_mock() + vector._vector_processor.create.reset_mock() + vector.create(texts=[docs[1], docs[2]]) + vector._embeddings.embed_documents.assert_not_called() + vector._vector_processor.create.assert_not_called() + + +def test_create_multimodal_filters_missing_uploads(vector_factory_module, monkeypatch: pytest.MonkeyPatch): class _Field: def in_(self, value): return value @@ -396,6 +425,48 @@ def test_add_texts_with_optional_duplicate_check(vector_factory_module): vector._vector_processor.create.assert_called_once() +def test_add_texts_skips_empty_text_documents(vector_factory_module): + vector = vector_factory_module.Vector.__new__(vector_factory_module.Vector) + vector._embeddings = MagicMock() + vector._embeddings.embed_documents.return_value = [[0.1]] + vector._vector_processor = MagicMock() + + docs = [ + Document(page_content="keep", metadata={"doc_id": "id-1"}), + Document(page_content="", metadata={"doc_id": "id-empty"}), + ] + + vector.add_texts(docs, source="api") + + vector._embeddings.embed_documents.assert_called_once_with(["keep"]) + vector._vector_processor.create.assert_called_once_with(texts=[docs[0]], embeddings=[[0.1]], source="api") + + vector._embeddings.embed_documents.reset_mock() + vector._vector_processor.create.reset_mock() + vector.add_texts([docs[1]]) + vector._embeddings.embed_documents.assert_not_called() + vector._vector_processor.create.assert_not_called() + + +def test_add_texts_filters_empty_documents_before_duplicate_check(vector_factory_module): + vector = vector_factory_module.Vector.__new__(vector_factory_module.Vector) + vector._embeddings = MagicMock() + vector._embeddings.embed_documents.return_value = [[0.1]] + vector._vector_processor = MagicMock() + vector._filter_duplicate_texts = MagicMock(return_value=[]) + + docs = [ + Document(page_content="keep", metadata={"doc_id": "id-1"}), + Document(page_content=" ", metadata={"doc_id": "id-empty"}), + ] + + vector.add_texts(docs, duplicate_check=True) + + vector._filter_duplicate_texts.assert_called_once_with([docs[0]]) + vector._embeddings.embed_documents.assert_not_called() + vector._vector_processor.create.assert_not_called() + + def test_vector_delegation_methods(vector_factory_module): vector = vector_factory_module.Vector.__new__(vector_factory_module.Vector) vector._embeddings = MagicMock() @@ -415,7 +486,7 @@ def test_vector_delegation_methods(vector_factory_module): vector._vector_processor.delete_by_metadata_field.assert_called_once_with("doc_id", "doc-1") -def test_search_by_file_handles_missing_and_existing_upload(vector_factory_module, monkeypatch): +def test_search_by_file_handles_missing_and_existing_upload(vector_factory_module, monkeypatch: pytest.MonkeyPatch): vector = vector_factory_module.Vector.__new__(vector_factory_module.Vector) vector._embeddings = MagicMock() vector._vector_processor = MagicMock() @@ -438,7 +509,7 @@ def test_search_by_file_handles_missing_and_existing_upload(vector_factory_modul assert payload["file_id"] == "file-2" -def test_delete_clears_redis_cache_when_collection_exists(vector_factory_module, monkeypatch): +def test_delete_clears_redis_cache_when_collection_exists(vector_factory_module, monkeypatch: pytest.MonkeyPatch): delete_mock = MagicMock() redis_delete = MagicMock() monkeypatch.setattr(vector_factory_module.redis_client, "delete", redis_delete) @@ -457,7 +528,7 @@ def test_delete_clears_redis_cache_when_collection_exists(vector_factory_module, redis_delete.assert_not_called() -def test_get_embeddings_builds_cache_embedding(vector_factory_module, monkeypatch): +def test_get_embeddings_builds_cache_embedding(vector_factory_module, monkeypatch: pytest.MonkeyPatch): model_manager = MagicMock() model_manager.get_model_instance.return_value = "model-instance" diff --git a/api/tests/unit_tests/core/rag/extractor/test_csv_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_csv_extractor.py index e6a06f163e..2e1c5715c2 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_csv_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_csv_extractor.py @@ -39,7 +39,7 @@ class TestCSVExtractor: with pytest.raises(ValueError, match="Source column 'missing_col' not found"): extractor.extract() - def test_extract_wraps_unicode_error_when_autodetect_disabled(self, monkeypatch): + def test_extract_wraps_unicode_error_when_autodetect_disabled(self, monkeypatch: pytest.MonkeyPatch): extractor = CSVExtractor("dummy.csv", autodetect_encoding=False) def raise_decode(*args, **kwargs): @@ -50,7 +50,7 @@ class TestCSVExtractor: with pytest.raises(RuntimeError, match="Error loading dummy.csv"): extractor.extract() - def test_extract_autodetect_encoding_success(self, monkeypatch): + def test_extract_autodetect_encoding_success(self, monkeypatch: pytest.MonkeyPatch): extractor = CSVExtractor("dummy.csv", autodetect_encoding=True) attempted_encodings: list[str | None] = [] @@ -75,7 +75,7 @@ class TestCSVExtractor: assert docs[0].page_content == "id: source-1;body: hello" assert attempted_encodings == [None, "bad", "utf-8"] - def test_extract_autodetect_encoding_all_attempts_fail_returns_empty(self, monkeypatch): + def test_extract_autodetect_encoding_all_attempts_fail_returns_empty(self, monkeypatch: pytest.MonkeyPatch): extractor = CSVExtractor("dummy.csv", autodetect_encoding=True) def always_raise(*args, **kwargs): @@ -86,7 +86,7 @@ class TestCSVExtractor: assert extractor.extract() == [] - def test_read_from_file_re_raises_csv_error(self, monkeypatch): + def test_read_from_file_re_raises_csv_error(self, monkeypatch: pytest.MonkeyPatch): extractor = CSVExtractor("dummy.csv") monkeypatch.setattr(pd, "read_csv", lambda *args, **kwargs: (_ for _ in ()).throw(csv.Error("bad csv"))) diff --git a/api/tests/unit_tests/core/rag/extractor/test_excel_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_excel_extractor.py index d2bcc1e2c4..2b42adc716 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_excel_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_excel_extractor.py @@ -45,7 +45,7 @@ class _FakeWorkbook: class TestExcelExtractor: - def test_extract_xlsx_with_hyperlinks_and_sheet_skip(self, monkeypatch): + def test_extract_xlsx_with_hyperlinks_and_sheet_skip(self, monkeypatch: pytest.MonkeyPatch): sheet_with_data = _FakeSheet( header_rows=[("Name", "Link")], data_rows=[ @@ -68,7 +68,7 @@ class TestExcelExtractor: assert docs[1].page_content == '"Name":"";"Link":"123"' assert all(doc.metadata["source"] == "/tmp/sample.xlsx" for doc in docs) - def test_extract_xls_path(self, monkeypatch): + def test_extract_xls_path(self, monkeypatch: pytest.MonkeyPatch): class FakeExcelFile: sheet_names = ["Sheet1"] diff --git a/api/tests/unit_tests/core/rag/extractor/test_extract_processor.py b/api/tests/unit_tests/core/rag/extractor/test_extract_processor.py index 5beed88971..b4b08f57ec 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_extract_processor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_extract_processor.py @@ -56,7 +56,7 @@ def _patch_all_extractors(monkeypatch) -> _ExtractorFactory: class TestExtractProcessorLoaders: - def test_load_from_upload_file_return_docs_and_text(self, monkeypatch): + def test_load_from_upload_file_return_docs_and_text(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(processor_module, "ExtractSetting", lambda **kwargs: SimpleNamespace(**kwargs)) monkeypatch.setattr( @@ -93,7 +93,9 @@ class TestExtractProcessorLoaders: ), ], ) - def test_load_from_url_builds_temp_file_with_correct_suffix(self, monkeypatch, url, headers, expected_suffix): + def test_load_from_url_builds_temp_file_with_correct_suffix( + self, monkeypatch: pytest.MonkeyPatch, url, headers, expected_suffix + ): response = SimpleNamespace(headers=headers, content=b"body") monkeypatch.setattr(processor_module.ssrf_proxy, "get", lambda *args, **kwargs: response) monkeypatch.setattr(processor_module, "ExtractSetting", lambda **kwargs: SimpleNamespace(**kwargs)) @@ -119,11 +121,13 @@ class TestExtractProcessorLoaders: class TestExtractProcessorFileRouting: @pytest.fixture(autouse=True) - def _set_unstructured_config(self, monkeypatch): + def _set_unstructured_config(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(processor_module.dify_config, "UNSTRUCTURED_API_URL", "https://unstructured") monkeypatch.setattr(processor_module.dify_config, "UNSTRUCTURED_API_KEY", "key") - def _run_extract_for_extension(self, monkeypatch, extension: str, etl_type: str, is_automatic: bool = False): + def _run_extract_for_extension( + self, monkeypatch: pytest.MonkeyPatch, extension: str, etl_type: str, is_automatic: bool = False + ): factory = _patch_all_extractors(monkeypatch) monkeypatch.setattr(processor_module.dify_config, "ETL_TYPE", etl_type) @@ -167,7 +171,7 @@ class TestExtractProcessorFileRouting: ], ) def test_extract_routes_file_extensions_for_unstructured_mode( - self, monkeypatch, extension, expected_extractor, is_automatic + self, monkeypatch: pytest.MonkeyPatch, extension, expected_extractor, is_automatic ): extractor_name, args, kwargs = self._run_extract_for_extension( monkeypatch, extension, etl_type="Unstructured", is_automatic=is_automatic @@ -189,7 +193,9 @@ class TestExtractProcessorFileRouting: (".txt", "TextExtractor"), ], ) - def test_extract_routes_file_extensions_for_default_mode(self, monkeypatch, extension, expected_extractor): + def test_extract_routes_file_extensions_for_default_mode( + self, monkeypatch: pytest.MonkeyPatch, extension, expected_extractor + ): extractor_name, _, _ = self._run_extract_for_extension(monkeypatch, extension, etl_type="SelfHosted") assert extractor_name == expected_extractor @@ -202,7 +208,7 @@ class TestExtractProcessorFileRouting: class TestExtractProcessorDatasourceRouting: - def test_extract_routes_notion_datasource(self, monkeypatch): + def test_extract_routes_notion_datasource(self, monkeypatch: pytest.MonkeyPatch): factory = _patch_all_extractors(monkeypatch) notion_info = SimpleNamespace( @@ -228,7 +234,9 @@ class TestExtractProcessorDatasourceRouting: ("jinareader", "JinaReaderWebExtractor"), ], ) - def test_extract_routes_website_datasource_providers(self, monkeypatch, provider: str, expected: str): + def test_extract_routes_website_datasource_providers( + self, monkeypatch: pytest.MonkeyPatch, provider: str, expected: str + ): factory = _patch_all_extractors(monkeypatch) website_info = SimpleNamespace( diff --git a/api/tests/unit_tests/core/rag/extractor/test_helpers.py b/api/tests/unit_tests/core/rag/extractor/test_helpers.py index 74387f749d..1c6f97ec53 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_helpers.py +++ b/api/tests/unit_tests/core/rag/extractor/test_helpers.py @@ -21,7 +21,7 @@ class TestHelpers: # Assert the language field for full coverage assert encodings[0].language is not None - def test_detect_file_encodings_timeout(self, monkeypatch): + def test_detect_file_encodings_timeout(self, monkeypatch: pytest.MonkeyPatch): class FakeFuture: def result(self, timeout=None): raise helpers.concurrent.futures.TimeoutError() @@ -41,7 +41,7 @@ class TestHelpers: with pytest.raises(TimeoutError, match="Timeout reached while detecting encoding"): detect_file_encodings("file.txt", timeout=1) - def test_detect_file_encodings_raises_when_encoding_not_detected(self, monkeypatch): + def test_detect_file_encodings_raises_when_encoding_not_detected(self, monkeypatch: pytest.MonkeyPatch): class FakeResult: encoding = None coherence = 0.0 diff --git a/api/tests/unit_tests/core/rag/extractor/test_markdown_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_markdown_extractor.py index 7e78c86c7d..8ede44ec04 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_markdown_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_markdown_extractor.py @@ -74,7 +74,7 @@ after assert "[link]" not in tups[1][1] assert "img.png" not in tups[1][1] - def test_parse_tups_autodetects_encoding_after_decode_error(self, monkeypatch): + def test_parse_tups_autodetects_encoding_after_decode_error(self, monkeypatch: pytest.MonkeyPatch): extractor = MarkdownExtractor(file_path="dummy_path", autodetect_encoding=True) calls: list[str | None] = [] @@ -99,7 +99,7 @@ after assert len(tups) == 2 assert calls == [None, "bad-encoding", "utf-8"] - def test_parse_tups_decode_error_with_autodetect_disabled_raises(self, monkeypatch): + def test_parse_tups_decode_error_with_autodetect_disabled_raises(self, monkeypatch: pytest.MonkeyPatch): extractor = MarkdownExtractor(file_path="dummy_path", autodetect_encoding=False) def raise_decode(self, encoding=None): @@ -110,7 +110,7 @@ after with pytest.raises(RuntimeError, match="Error loading dummy_path"): extractor.parse_tups("dummy_path") - def test_parse_tups_other_exceptions_are_wrapped(self, monkeypatch): + def test_parse_tups_other_exceptions_are_wrapped(self, monkeypatch: pytest.MonkeyPatch): extractor = MarkdownExtractor(file_path="dummy_path") def raise_other(self, encoding=None): @@ -121,7 +121,7 @@ after with pytest.raises(RuntimeError, match="Error loading dummy_path"): extractor.parse_tups("dummy_path") - def test_extract_builds_documents_for_header_and_non_header(self, monkeypatch): + def test_extract_builds_documents_for_header_and_non_header(self, monkeypatch: pytest.MonkeyPatch): extractor = MarkdownExtractor(file_path="dummy_path") monkeypatch.setattr(extractor, "parse_tups", lambda _: [(None, "plain"), ("Header", "value")]) diff --git a/api/tests/unit_tests/core/rag/extractor/test_notion_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_notion_extractor.py index 808e41867e..49f7b592dc 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_notion_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_notion_extractor.py @@ -28,7 +28,7 @@ class TestNotionExtractorInitAndPublicMethods: assert extractor._notion_access_token == "token" - def test_init_falls_back_to_env_token_when_credential_lookup_fails(self, monkeypatch): + def test_init_falls_back_to_env_token_when_credential_lookup_fails(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( notion_extractor.NotionExtractor, "_get_access_token", @@ -46,7 +46,7 @@ class TestNotionExtractorInitAndPublicMethods: assert extractor._notion_access_token == "env-token" - def test_init_raises_if_no_credential_and_no_env_token(self, monkeypatch): + def test_init_raises_if_no_credential_and_no_env_token(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( notion_extractor.NotionExtractor, "_get_access_token", @@ -63,7 +63,7 @@ class TestNotionExtractorInitAndPublicMethods: credential_id="cred", ) - def test_extract_updates_last_edited_and_loads_documents(self, monkeypatch): + def test_extract_updates_last_edited_and_loads_documents(self, monkeypatch: pytest.MonkeyPatch): extractor = notion_extractor.NotionExtractor( notion_workspace_id="ws", notion_obj_id="obj", @@ -83,7 +83,7 @@ class TestNotionExtractorInitAndPublicMethods: load_mock.assert_called_once_with("obj", "page") assert len(docs) == 1 - def test_load_data_as_documents_page_database_and_invalid(self, monkeypatch): + def test_load_data_as_documents_page_database_and_invalid(self, monkeypatch: pytest.MonkeyPatch): extractor = notion_extractor.NotionExtractor( notion_workspace_id="ws", notion_obj_id="obj", @@ -394,7 +394,7 @@ class TestNotionMetadataAndCredentialMethods: assert extractor.update_last_edited_time(None) is None - def test_update_last_edited_time_updates_document_and_commits(self, monkeypatch): + def test_update_last_edited_time_updates_document_and_commits(self, monkeypatch: pytest.MonkeyPatch): extractor = notion_extractor.NotionExtractor( notion_workspace_id="ws", notion_obj_id="obj", @@ -479,7 +479,7 @@ class TestNotionMetadataAndCredentialMethods: with pytest.raises(AssertionError, match="Notion access token is required"): extractor.get_notion_last_edited_time() - def test_get_access_token_success_and_errors(self, monkeypatch): + def test_get_access_token_success_and_errors(self, monkeypatch: pytest.MonkeyPatch): with pytest.raises(Exception, match="No credential id found"): notion_extractor.NotionExtractor._get_access_token("tenant", None) diff --git a/api/tests/unit_tests/core/rag/extractor/test_pdf_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_pdf_extractor.py index 47222a23a2..f2caf02d5e 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_pdf_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_pdf_extractor.py @@ -7,7 +7,7 @@ import core.rag.extractor.pdf_extractor as pe @pytest.fixture -def mock_dependencies(monkeypatch): +def mock_dependencies(monkeypatch: pytest.MonkeyPatch): # Mock storage saves = [] @@ -61,7 +61,9 @@ def mock_dependencies(monkeypatch): (b"\x89PNG\r\n\x1a\n some png", "image/png", "png", "test_file_id_png"), ], ) -def test_extract_images_formats(mock_dependencies, monkeypatch, image_bytes, expected_mime, expected_ext, file_id): +def test_extract_images_formats( + mock_dependencies, monkeypatch: pytest.MonkeyPatch, image_bytes, expected_mime, expected_ext, file_id +): saves = mock_dependencies.saves db_stub = mock_dependencies.db @@ -122,7 +124,7 @@ def test_extract_images_get_objects_scenarios(mock_dependencies, get_objects_sid assert result == "" -def test_extract_calls_extract_images(mock_dependencies, monkeypatch): +def test_extract_calls_extract_images(mock_dependencies, monkeypatch: pytest.MonkeyPatch): # Mock pypdfium2 mock_pdf_doc = MagicMock() mock_page = MagicMock() diff --git a/api/tests/unit_tests/core/rag/extractor/test_text_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_text_extractor.py index fb3c6e52c6..71046d73af 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_text_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_text_extractor.py @@ -19,7 +19,7 @@ class TestTextExtractor: assert docs[0].page_content == "hello world" assert docs[0].metadata == {"source": str(file_path)} - def test_extract_autodetect_success_after_decode_error(self, monkeypatch): + def test_extract_autodetect_success_after_decode_error(self, monkeypatch: pytest.MonkeyPatch): extractor = TextExtractor("dummy.txt", autodetect_encoding=True) calls = [] @@ -44,7 +44,7 @@ class TestTextExtractor: assert docs[0].page_content == "decoded text" assert calls == [None, "bad", "utf-8"] - def test_extract_autodetect_all_fail_raises_runtime_error(self, monkeypatch): + def test_extract_autodetect_all_fail_raises_runtime_error(self, monkeypatch: pytest.MonkeyPatch): extractor = TextExtractor("dummy.txt", autodetect_encoding=True) def always_decode_error(self, encoding=None): @@ -56,7 +56,7 @@ class TestTextExtractor: with pytest.raises(RuntimeError, match="all detected encodings failed"): extractor.extract() - def test_extract_decode_error_without_autodetect_raises_runtime_error(self, monkeypatch): + def test_extract_decode_error_without_autodetect_raises_runtime_error(self, monkeypatch: pytest.MonkeyPatch): extractor = TextExtractor("dummy.txt", autodetect_encoding=False) def always_decode_error(self, encoding=None): @@ -67,7 +67,7 @@ class TestTextExtractor: with pytest.raises(RuntimeError, match="specified encoding failed"): extractor.extract() - def test_extract_wraps_non_decode_exceptions(self, monkeypatch): + def test_extract_wraps_non_decode_exceptions(self, monkeypatch: pytest.MonkeyPatch): extractor = TextExtractor("dummy.txt") def raise_other(self, encoding=None): diff --git a/api/tests/unit_tests/core/rag/extractor/test_word_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_word_extractor.py index b9f2449cfb..513d232d7f 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_word_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_word_extractor.py @@ -61,7 +61,7 @@ def test_parse_row(): assert extractor._parse_row(row, {}, 3) == gt[idx] -def test_init_downloads_via_ssrf_proxy(monkeypatch): +def test_init_downloads_via_ssrf_proxy(monkeypatch: pytest.MonkeyPatch): doc = Document() doc.add_paragraph("hello") buf = io.BytesIO() @@ -97,7 +97,7 @@ def test_init_downloads_via_ssrf_proxy(monkeypatch): extractor.temp_file.close() -def test_extract_images_from_docx(monkeypatch): +def test_extract_images_from_docx(monkeypatch: pytest.MonkeyPatch): external_bytes = b"ext-bytes" internal_bytes = b"int-bytes" @@ -210,7 +210,7 @@ def test_extract_images_from_docx_uses_internal_files_url(): dify_config.INTERNAL_FILES_URL = original_internal_files_url -def test_extract_hyperlinks(monkeypatch): +def test_extract_hyperlinks(monkeypatch: pytest.MonkeyPatch): # Mock db and storage to avoid issues during image extraction (even if no images are present) monkeypatch.setattr(we, "storage", SimpleNamespace(save=lambda k, d: None)) db_stub = SimpleNamespace(session=SimpleNamespace(add=lambda o: None, commit=lambda: None)) @@ -255,7 +255,7 @@ def test_extract_hyperlinks(monkeypatch): os.remove(tmp_path) -def test_extract_legacy_hyperlinks(monkeypatch): +def test_extract_legacy_hyperlinks(monkeypatch: pytest.MonkeyPatch): # Mock db and storage monkeypatch.setattr(we, "storage", SimpleNamespace(save=lambda k, d: None)) db_stub = SimpleNamespace(session=SimpleNamespace(add=lambda o: None, commit=lambda: None)) @@ -317,7 +317,7 @@ def test_extract_legacy_hyperlinks(monkeypatch): os.remove(tmp_path) -def test_init_rejects_invalid_url_status(monkeypatch): +def test_init_rejects_invalid_url_status(monkeypatch: pytest.MonkeyPatch): class FakeResponse: status_code = 404 content = b"" @@ -392,7 +392,7 @@ def test_close_closes_awaitable_close_result(): extractor.temp_file.close.assert_called_once() -def test_extract_images_handles_invalid_external_cases(monkeypatch): +def test_extract_images_handles_invalid_external_cases(monkeypatch: pytest.MonkeyPatch): class FakeTargetRef: def __contains__(self, item): return item == "image" @@ -437,7 +437,7 @@ def test_extract_images_handles_invalid_external_cases(monkeypatch): db_stub.session.commit.assert_called_once() -def test_table_to_markdown_and_parse_helpers(monkeypatch): +def test_table_to_markdown_and_parse_helpers(monkeypatch: pytest.MonkeyPatch): extractor = object.__new__(WordExtractor) table = SimpleNamespace( @@ -500,7 +500,7 @@ def test_table_to_markdown_and_parse_helpers(monkeypatch): assert extractor._parse_cell(cell, image_map) == "EXT-IMGINT-IMGplain" -def test_parse_docx_covers_drawing_shapes_hyperlink_error_and_table_branch(monkeypatch): +def test_parse_docx_covers_drawing_shapes_hyperlink_error_and_table_branch(monkeypatch: pytest.MonkeyPatch): extractor = object.__new__(WordExtractor) ext_image_id = "ext-image" diff --git a/api/tests/unit_tests/core/rag/extractor/unstructured/test_unstructured_extractors.py b/api/tests/unit_tests/core/rag/extractor/unstructured/test_unstructured_extractors.py index 26ce333e11..19fb385a6d 100644 --- a/api/tests/unit_tests/core/rag/extractor/unstructured/test_unstructured_extractors.py +++ b/api/tests/unit_tests/core/rag/extractor/unstructured/test_unstructured_extractors.py @@ -45,7 +45,7 @@ def _install_chunk_by_title(monkeypatch: pytest.MonkeyPatch, chunks: list[Simple class TestUnstructuredMarkdownMsgXml: - def test_markdown_extractor_without_api(self, monkeypatch): + def test_markdown_extractor_without_api(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text=" chunk-1 "), SimpleNamespace(text=" chunk-2 ")]) _register_module( monkeypatch, "unstructured.partition.md", partition_md=lambda filename: [SimpleNamespace(text="x")] @@ -55,7 +55,7 @@ class TestUnstructuredMarkdownMsgXml: assert [doc.page_content for doc in docs] == ["chunk-1", "chunk-2"] - def test_markdown_extractor_with_api(self, monkeypatch): + def test_markdown_extractor_with_api(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text=" via-api ")]) calls = {} @@ -70,7 +70,7 @@ class TestUnstructuredMarkdownMsgXml: assert docs[0].page_content == "via-api" assert calls == {"filename": "/tmp/file.md", "api_url": "https://u", "api_key": "k"} - def test_msg_extractor_local(self, monkeypatch): + def test_msg_extractor_local(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text="msg-doc")]) _register_module( monkeypatch, "unstructured.partition.msg", partition_msg=lambda filename: [SimpleNamespace(text="x")] @@ -78,7 +78,7 @@ class TestUnstructuredMarkdownMsgXml: assert UnstructuredMsgExtractor("/tmp/file.msg").extract()[0].page_content == "msg-doc" - def test_msg_extractor_with_api(self, monkeypatch): + def test_msg_extractor_with_api(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text="msg-doc")]) calls = {} @@ -94,7 +94,7 @@ class TestUnstructuredMarkdownMsgXml: ) assert calls["filename"] == "/tmp/file.msg" - def test_xml_extractor_local_and_api(self, monkeypatch): + def test_xml_extractor_local_and_api(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text="xml-doc")]) xml_calls = {} @@ -124,7 +124,7 @@ class TestUnstructuredMarkdownMsgXml: class TestUnstructuredEmailAndEpub: - def test_email_extractor_local_decodes_html_and_suppresses_decode_errors(self, monkeypatch): + def test_email_extractor_local_decodes_html_and_suppresses_decode_errors(self, monkeypatch: pytest.MonkeyPatch): _register_unstructured_packages(monkeypatch) captured = {} @@ -150,7 +150,7 @@ class TestUnstructuredEmailAndEpub: assert "Hello Email" in chunk_elements[0].text assert chunk_elements[1].text == bad_base64 - def test_email_extractor_with_api(self, monkeypatch): + def test_email_extractor_with_api(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text="api-email")]) _register_module( monkeypatch, @@ -162,7 +162,7 @@ class TestUnstructuredEmailAndEpub: assert docs[0].page_content == "api-email" - def test_epub_extractor_local_and_api(self, monkeypatch): + def test_epub_extractor_local_and_api(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text="epub-doc")]) calls = {"download": 0, "partition": 0} @@ -198,7 +198,7 @@ class TestUnstructuredPPTAndPPTX: with pytest.raises(NotImplementedError, match="Unstructured API Url is not configured"): UnstructuredPPTExtractor("/tmp/file.ppt").extract() - def test_ppt_extractor_groups_text_by_page(self, monkeypatch): + def test_ppt_extractor_groups_text_by_page(self, monkeypatch: pytest.MonkeyPatch): _register_unstructured_packages(monkeypatch) _register_module( monkeypatch, @@ -215,7 +215,7 @@ class TestUnstructuredPPTAndPPTX: assert [doc.page_content for doc in docs] == ["A\nB", "C"] - def test_pptx_extractor_local_and_api(self, monkeypatch): + def test_pptx_extractor_local_and_api(self, monkeypatch: pytest.MonkeyPatch): _register_unstructured_packages(monkeypatch) _register_module( monkeypatch, @@ -244,7 +244,7 @@ class TestUnstructuredPPTAndPPTX: class TestUnstructuredWord: - def _install_doc_modules(self, monkeypatch, version: str, filetype_value): + def _install_doc_modules(self, monkeypatch: pytest.MonkeyPatch, version: str, filetype_value): _register_unstructured_packages(monkeypatch) class FileType: @@ -276,13 +276,13 @@ class TestUnstructuredWord: ], ) - def test_word_extractor_rejects_doc_on_old_unstructured_version(self, monkeypatch): + def test_word_extractor_rejects_doc_on_old_unstructured_version(self, monkeypatch: pytest.MonkeyPatch): self._install_doc_modules(monkeypatch, version="0.4.10", filetype_value="doc") with pytest.raises(ValueError, match="Partitioning .doc files is only supported"): UnstructuredWordExtractor("/tmp/file.doc", "https://u", "k").extract() - def test_word_extractor_doc_and_docx_paths(self, monkeypatch): + def test_word_extractor_doc_and_docx_paths(self, monkeypatch: pytest.MonkeyPatch): self._install_doc_modules(monkeypatch, version="0.4.11", filetype_value="doc") docs = UnstructuredWordExtractor("/tmp/file.doc", "https://u", "k").extract() @@ -292,7 +292,7 @@ class TestUnstructuredWord: docs = UnstructuredWordExtractor("/tmp/file.docx", "https://u", "k").extract() assert [doc.page_content for doc in docs] == ["chunk-1", "chunk-2"] - def test_word_extractor_magic_import_error_fallback_to_extension(self, monkeypatch): + def test_word_extractor_magic_import_error_fallback_to_extension(self, monkeypatch: pytest.MonkeyPatch): self._install_doc_modules(monkeypatch, version="0.4.10", filetype_value="not-used") monkeypatch.setitem(sys.modules, "magic", None) diff --git a/api/tests/unit_tests/core/rag/extractor/watercrawl/test_watercrawl.py b/api/tests/unit_tests/core/rag/extractor/watercrawl/test_watercrawl.py index d758be218a..95878fc688 100644 --- a/api/tests/unit_tests/core/rag/extractor/watercrawl/test_watercrawl.py +++ b/api/tests/unit_tests/core/rag/extractor/watercrawl/test_watercrawl.py @@ -59,7 +59,7 @@ class TestWaterCrawlExceptions: class TestBaseAPIClient: - def test_init_session_builds_expected_headers(self, monkeypatch): + def test_init_session_builds_expected_headers(self, monkeypatch: pytest.MonkeyPatch): captured = {} def fake_client(**kwargs): @@ -74,7 +74,7 @@ class TestBaseAPIClient: assert captured["headers"]["X-API-Key"] == "k" assert captured["headers"]["User-Agent"] == "WaterCrawl-Plugin" - def test_request_stream_and_non_stream_paths(self, monkeypatch): + def test_request_stream_and_non_stream_paths(self, monkeypatch: pytest.MonkeyPatch): class FakeSession: def __init__(self): self.request_calls = [] @@ -106,7 +106,7 @@ class TestBaseAPIClient: assert fake_session.build_calls assert fake_session.send_calls[0][1] is True - def test_http_method_helpers_delegate_to_request(self, monkeypatch): + def test_http_method_helpers_delegate_to_request(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(BaseAPIClient, "init_session", lambda self: MagicMock()) client = BaseAPIClient(api_key="k", base_url="https://watercrawl.dev") @@ -127,7 +127,7 @@ class TestBaseAPIClient: class TestWaterCrawlAPIClient: - def test_process_eventstream_and_download(self, monkeypatch): + def test_process_eventstream_and_download(self, monkeypatch: pytest.MonkeyPatch): client = WaterCrawlAPIClient(api_key="k") response = MagicMock() @@ -174,7 +174,7 @@ class TestWaterCrawlAPIClient: client.process_response(_response(200, content_type="application/octet-stream", content=b"bin")) == b"bin" ) - def test_process_response_event_stream_returns_generator(self, monkeypatch): + def test_process_response_event_stream_returns_generator(self, monkeypatch: pytest.MonkeyPatch): client = WaterCrawlAPIClient(api_key="k") generator = (item for item in [{"type": "result", "data": {}}]) monkeypatch.setattr(client, "process_eventstream", lambda response, download=False: generator) @@ -193,7 +193,7 @@ class TestWaterCrawlAPIClient: with pytest.raises(RuntimeError, match="http error"): client.process_response(response) - def test_endpoint_wrappers(self, monkeypatch): + def test_endpoint_wrappers(self, monkeypatch: pytest.MonkeyPatch): client = WaterCrawlAPIClient(api_key="k") monkeypatch.setattr(client, "process_response", lambda resp: "processed") @@ -208,7 +208,7 @@ class TestWaterCrawlAPIClient: assert client.download_crawl_request("id") == "processed" assert client.get_crawl_request_results("id") == "processed" - def test_monitor_crawl_request_generator_and_validation(self, monkeypatch): + def test_monitor_crawl_request_generator_and_validation(self, monkeypatch: pytest.MonkeyPatch): client = WaterCrawlAPIClient(api_key="k") monkeypatch.setattr(client, "process_response", lambda _: (x for x in [{"type": "result", "data": 1}])) @@ -221,7 +221,7 @@ class TestWaterCrawlAPIClient: with pytest.raises(ValueError, match="Generator expected"): list(client.monitor_crawl_request("job-1")) - def test_scrape_url_sync_and_async(self, monkeypatch): + def test_scrape_url_sync_and_async(self, monkeypatch: pytest.MonkeyPatch): client = WaterCrawlAPIClient(api_key="k") monkeypatch.setattr(client, "create_crawl_request", lambda **kwargs: {"uuid": "job-1"}) @@ -238,7 +238,7 @@ class TestWaterCrawlAPIClient: sync_result = client.scrape_url("https://example.com", sync=True) assert sync_result == {"url": "https://example.com"} - def test_download_result_fetches_json_and_closes(self, monkeypatch): + def test_download_result_fetches_json_and_closes(self, monkeypatch: pytest.MonkeyPatch): client = WaterCrawlAPIClient(api_key="k") response = _response(200, {"markdown": "body"}) @@ -251,7 +251,7 @@ class TestWaterCrawlAPIClient: class TestWaterCrawlProvider: - def test_crawl_url_builds_options_and_min_wait_time(self, monkeypatch): + def test_crawl_url_builds_options_and_min_wait_time(self, monkeypatch: pytest.MonkeyPatch): provider = WaterCrawlProvider(api_key="k") captured_kwargs = {} @@ -290,7 +290,7 @@ class TestWaterCrawlProvider: assert captured_kwargs["page_options"]["only_main_content"] is False assert captured_kwargs["page_options"]["wait_time"] == 1000 - def test_get_crawl_status_active_and_completed(self, monkeypatch): + def test_get_crawl_status_active_and_completed(self, monkeypatch: pytest.MonkeyPatch): provider = WaterCrawlProvider(api_key="k") monkeypatch.setattr( @@ -327,7 +327,7 @@ class TestWaterCrawlProvider: assert completed["status"] == "completed" assert completed["data"] == [{"url": "u"}] - def test_get_crawl_url_data_and_scrape(self, monkeypatch): + def test_get_crawl_url_data_and_scrape(self, monkeypatch: pytest.MonkeyPatch): provider = WaterCrawlProvider(api_key="k") monkeypatch.setattr(provider, "scrape_url", lambda url: {"source_url": url}) @@ -339,7 +339,7 @@ class TestWaterCrawlProvider: monkeypatch.setattr(provider, "_get_results", lambda job_id, query_params=None: iter([])) assert provider.get_crawl_url_data("job", "u1") is None - def test_structure_data_validation_and_get_results_pagination(self, monkeypatch): + def test_structure_data_validation_and_get_results_pagination(self, monkeypatch: pytest.MonkeyPatch): provider = WaterCrawlProvider(api_key="k") with pytest.raises(ValueError, match="Invalid result object"): @@ -380,7 +380,7 @@ class TestWaterCrawlProvider: assert len(results) == 1 assert results[0]["source_url"] == "https://a" - def test_scrape_url_uses_client_and_structure(self, monkeypatch): + def test_scrape_url_uses_client_and_structure(self, monkeypatch: pytest.MonkeyPatch): provider = WaterCrawlProvider(api_key="k") monkeypatch.setattr( provider.client, "scrape_url", lambda **kwargs: {"result": {"metadata": {}, "markdown": "m"}, "url": "u"} @@ -392,7 +392,7 @@ class TestWaterCrawlProvider: class TestWaterCrawlWebExtractor: - def test_extract_crawl_and_scrape_modes(self, monkeypatch): + def test_extract_crawl_and_scrape_modes(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.rag.extractor.watercrawl.extractor.WebsiteService.get_crawl_url_data", lambda job_id, provider, url, tenant_id: { @@ -418,7 +418,7 @@ class TestWaterCrawlWebExtractor: assert crawl_extractor.extract()[0].page_content == "crawl" assert scrape_extractor.extract()[0].page_content == "scrape" - def test_extract_crawl_returns_empty_when_service_returns_none(self, monkeypatch): + def test_extract_crawl_returns_empty_when_service_returns_none(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.rag.extractor.watercrawl.extractor.WebsiteService.get_crawl_url_data", lambda job_id, provider, url, tenant_id: None, diff --git a/api/tests/unit_tests/core/rag/retrieval/test_dataset_retrieval.py b/api/tests/unit_tests/core/rag/retrieval/test_dataset_retrieval.py index fd607210f1..9334ad9b2f 100644 --- a/api/tests/unit_tests/core/rag/retrieval/test_dataset_retrieval.py +++ b/api/tests/unit_tests/core/rag/retrieval/test_dataset_retrieval.py @@ -1106,11 +1106,11 @@ class TestRetrievalService: def test_deduplicate_documents_non_dify_provider(self): """ - Test deduplication with non-dify provider documents. + Test deduplication with non-dify provider documents that have no doc_id. Verifies: - - External provider documents use content-based deduplication - - Different providers are handled correctly + - External provider documents without doc_id use content-based deduplication + - Identical content from the same provider is collapsed to one result """ # Arrange doc1 = Document( @@ -1131,7 +1131,96 @@ class TestRetrievalService: # Assert # External documents without doc_id should use content-based dedup - assert len(result) >= 1 + assert len(result) == 1 + + def test_deduplicate_documents_non_dify_provider_with_doc_id_different_sources(self): + """ + Regression test for issue #35707. + + Two chunks from different source documents share identical text content but carry + different doc_ids. Before the fix, non-dify providers were forced into content-based + deduplication and the second chunk was silently dropped. After the fix, doc_id is used + as the dedup key for any provider that exposes it, so both chunks must be retained. + + Verifies: + - Non-dify provider documents with different doc_ids are NOT deduplicated even when + their page_content is identical. + """ + # Arrange — same content, different doc_ids, non-dify provider (e.g. Weaviate / Qdrant) + doc_a = Document( + page_content="Shared identical content", + metadata={"doc_id": "doc-from-file-a", "score": 0.85}, + provider="weaviate", + ) + doc_b = Document( + page_content="Shared identical content", + metadata={"doc_id": "doc-from-file-b", "score": 0.82}, + provider="weaviate", + ) + + # Act + result = RetrievalService._deduplicate_documents([doc_a, doc_b]) + + # Assert — both documents must be kept; losing either silently drops a source citation + assert len(result) == 2 + doc_ids = {doc.metadata["doc_id"] for doc in result} + assert doc_ids == {"doc-from-file-a", "doc-from-file-b"} + + def test_deduplicate_documents_non_dify_provider_with_same_doc_id(self): + """ + Test that non-dify provider documents sharing the same doc_id are deduplicated by + doc_id key (not by content), and the higher-scored duplicate is retained. + + Verifies: + - doc_id-based deduplication now applies to any provider, not only "dify" + - The document with the highest score wins when doc_ids collide + """ + # Arrange + doc_low = Document( + page_content="Content A", + metadata={"doc_id": "chunk-1", "score": 0.5}, + provider="qdrant", + ) + doc_high = Document( + page_content="Content A", + metadata={"doc_id": "chunk-1", "score": 0.9}, + provider="qdrant", + ) + + # Act + result = RetrievalService._deduplicate_documents([doc_low, doc_high]) + + # Assert + assert len(result) == 1 + assert result[0].metadata["score"] == 0.9 + + def test_deduplicate_documents_dify_provider_without_doc_id_falls_back_to_content(self): + """ + Test that a dify provider document without doc_id still falls back to content-based + deduplication (no regression from original behaviour). + + Verifies: + - Absence of doc_id triggers content-based dedup regardless of provider + - First occurrence is kept when content is identical + """ + # Arrange — dify docs with no doc_id, same content + doc1 = Document( + page_content="Same content", + metadata={"score": 0.8}, + provider="dify", + ) + doc2 = Document( + page_content="Same content", + metadata={"score": 0.9}, + provider="dify", + ) + + # Act + result = RetrievalService._deduplicate_documents([doc1, doc2]) + + # Assert — collapsed to one; first-seen wins (no score comparison in content branch) + assert len(result) == 1 + assert result[0].metadata["score"] == 0.8 # ==================== Metadata Filtering Tests ==================== @@ -4473,7 +4562,7 @@ class TestRetrieveCoverage: "core.rag.retrieval.dataset_retrieval.RetrievalService.format_retrieval_documents", return_value=[record], ), - patch("core.rag.retrieval.dataset_retrieval.sign_upload_file", return_value="https://signed"), + patch("core.rag.retrieval.dataset_retrieval.sign_upload_file_preview_url", return_value="https://signed"), patch("core.rag.retrieval.dataset_retrieval.db.session.execute") as mock_execute, ): bound_model_instance = Mock() diff --git a/api/tests/unit_tests/core/telemetry/test_facade.py b/api/tests/unit_tests/core/telemetry/test_facade.py index 36e8e1bbb1..95d653f55b 100644 --- a/api/tests/unit_tests/core/telemetry/test_facade.py +++ b/api/tests/unit_tests/core/telemetry/test_facade.py @@ -14,7 +14,7 @@ from core.telemetry.events import TelemetryContext, TelemetryEvent @pytest.fixture -def telemetry_test_setup(monkeypatch): +def telemetry_test_setup(monkeypatch: pytest.MonkeyPatch): module_name = "core.ops.ops_trace_manager" ops_stub = types.ModuleType(module_name) diff --git a/api/tests/unit_tests/core/test_provider_manager.py b/api/tests/unit_tests/core/test_provider_manager.py index a5a542c94f..e84fcba3d9 100644 --- a/api/tests/unit_tests/core/test_provider_manager.py +++ b/api/tests/unit_tests/core/test_provider_manager.py @@ -289,7 +289,7 @@ def test_get_default_model_uses_injected_runtime_for_existing_default_record(moc result = manager.get_default_model("tenant-id", ModelType.LLM) - mock_factory_cls.assert_called_once_with(model_runtime=manager._model_runtime) + mock_factory_cls.assert_called_once_with(runtime=manager._model_runtime) assert result is not None assert result.model == "gpt-4" assert result.provider.provider == "openai" @@ -316,7 +316,7 @@ def test_get_configurations_uses_injected_runtime_and_adds_provider_aliases(mock result = manager.get_configurations("tenant-id") expected_alias = str(ModelProviderID("openai")) - mock_factory_cls.assert_called_once_with(model_runtime=manager._model_runtime) + mock_factory_cls.assert_called_once_with(runtime=manager._model_runtime) assert result.tenant_id == "tenant-id" assert expected_alias in provider_records assert expected_alias in provider_model_records @@ -402,7 +402,7 @@ def test_get_configurations_reuses_cached_result_for_same_tenant(mocker: MockerF assert first is second mock_get_all_providers.assert_called_once_with("tenant-id") - mock_factory_cls.assert_called_once_with(model_runtime=manager._model_runtime) + mock_factory_cls.assert_called_once_with(runtime=manager._model_runtime) mock_provider_configuration.assert_called_once() provider_configuration.bind_model_runtime.assert_called_once_with(manager._model_runtime) @@ -570,8 +570,7 @@ def test_get_all_providers_normalizes_provider_names_with_model_provider_id() -> session.scalars.return_value = [openai_provider, gemini_provider] with ( - patch("core.provider_manager.db", SimpleNamespace(engine=object())), - patch("core.provider_manager.Session", return_value=_build_session_context(session)), + patch("core.provider_manager.session_factory.create_session", return_value=_build_session_context(session)), ): result = ProviderManager._get_all_providers("tenant-id") @@ -595,8 +594,7 @@ def test_provider_grouping_helpers_group_records_by_provider_name(method_name: s session.scalars.return_value = [openai_primary, openai_secondary, anthropic_record] with ( - patch("core.provider_manager.db", SimpleNamespace(engine=object())), - patch("core.provider_manager.Session", return_value=_build_session_context(session)), + patch("core.provider_manager.session_factory.create_session", return_value=_build_session_context(session)), ): result = getattr(ProviderManager, method_name)("tenant-id") @@ -611,8 +609,7 @@ def test_get_all_preferred_model_providers_returns_mapping_by_provider_name() -> session.scalars.return_value = [openai_preference, anthropic_preference] with ( - patch("core.provider_manager.db", SimpleNamespace(engine=object())), - patch("core.provider_manager.Session", return_value=_build_session_context(session)), + patch("core.provider_manager.session_factory.create_session", return_value=_build_session_context(session)), ): result = ProviderManager._get_all_preferred_model_providers("tenant-id") @@ -626,13 +623,13 @@ def test_get_all_provider_load_balancing_configs_returns_empty_when_cached_flag_ with ( patch("core.provider_manager.redis_client.get", return_value=b"False"), patch("core.provider_manager.FeatureService.get_features") as mock_get_features, - patch("core.provider_manager.Session") as mock_session_cls, + patch("core.provider_manager.session_factory.create_session") as mock_create_session, ): result = ProviderManager._get_all_provider_load_balancing_configs("tenant-id") assert result == {} mock_get_features.assert_not_called() - mock_session_cls.assert_not_called() + mock_create_session.assert_not_called() def test_get_all_provider_load_balancing_configs_populates_cache_and_groups_configs() -> None: @@ -642,14 +639,13 @@ def test_get_all_provider_load_balancing_configs_populates_cache_and_groups_conf session.scalars.return_value = [openai_config, anthropic_config] with ( - patch("core.provider_manager.db", SimpleNamespace(engine=object())), patch("core.provider_manager.redis_client.get", return_value=None), patch("core.provider_manager.redis_client.setex") as mock_setex, patch( "core.provider_manager.FeatureService.get_features", return_value=SimpleNamespace(model_load_balancing_enabled=True), ), - patch("core.provider_manager.Session", return_value=_build_session_context(session)), + patch("core.provider_manager.session_factory.create_session", return_value=_build_session_context(session)), ): result = ProviderManager._get_all_provider_load_balancing_configs("tenant-id") diff --git a/api/tests/unit_tests/core/tools/test_builtin_tool_provider.py b/api/tests/unit_tests/core/tools/test_builtin_tool_provider.py index ad6d5906ae..b21a5c3e24 100644 --- a/api/tests/unit_tests/core/tools/test_builtin_tool_provider.py +++ b/api/tests/unit_tests/core/tools/test_builtin_tool_provider.py @@ -78,7 +78,7 @@ def _tool_yaml() -> dict[str, Any]: } -def test_builtin_tool_provider_init_load_tools_and_basic_accessors(monkeypatch): +def test_builtin_tool_provider_init_load_tools_and_basic_accessors(monkeypatch: pytest.MonkeyPatch): yaml_payloads = [_provider_yaml(), _tool_yaml()] def _load_yaml(*args, **kwargs): diff --git a/api/tests/unit_tests/core/tools/test_builtin_tools_extra.py b/api/tests/unit_tests/core/tools/test_builtin_tools_extra.py index c7829fc0d7..3f6b1ec154 100644 --- a/api/tests/unit_tests/core/tools/test_builtin_tools_extra.py +++ b/api/tests/unit_tests/core/tools/test_builtin_tools_extra.py @@ -115,7 +115,7 @@ def test_weekday_tool(): list(weekday_tool.invoke(user_id="u", tool_parameters={"year": 2024, "day": 1})) -def test_simple_code_valid_execution(monkeypatch): +def test_simple_code_valid_execution(monkeypatch: pytest.MonkeyPatch): simple_code = _build_builtin_tool(SimpleCode) monkeypatch.setattr( @@ -138,7 +138,7 @@ def test_simple_code_invalid_language(): list(simple_code.invoke(user_id="u", tool_parameters={"language": "go", "code": "fmt.Println(1)"})) -def test_simple_code_execution_error(monkeypatch): +def test_simple_code_execution_error(monkeypatch: pytest.MonkeyPatch): simple_code = _build_builtin_tool(SimpleCode) monkeypatch.setattr( @@ -155,14 +155,14 @@ def test_webscraper_empty_url(): assert empty == "Please input url" -def test_webscraper_fetch(monkeypatch): +def test_webscraper_fetch(monkeypatch: pytest.MonkeyPatch): webscraper = _build_builtin_tool(WebscraperTool) monkeypatch.setattr("core.tools.builtin_tool.providers.webscraper.tools.webscraper.get_url", lambda *a, **k: "page") full = list(webscraper.invoke(user_id="u", tool_parameters={"url": "https://example.com"}))[0].message.text assert full == "page" -def test_webscraper_summary(monkeypatch): +def test_webscraper_summary(monkeypatch: pytest.MonkeyPatch): webscraper = _build_builtin_tool(WebscraperTool) monkeypatch.setattr("core.tools.builtin_tool.providers.webscraper.tools.webscraper.get_url", lambda *a, **k: "page") monkeypatch.setattr(webscraper, "summary", lambda user_id, content: "summary") @@ -175,7 +175,7 @@ def test_webscraper_summary(monkeypatch): assert summarized == "summary" -def test_webscraper_fetch_error(monkeypatch): +def test_webscraper_fetch_error(monkeypatch: pytest.MonkeyPatch): webscraper = _build_builtin_tool(WebscraperTool) monkeypatch.setattr( "core.tools.builtin_tool.providers.webscraper.tools.webscraper.get_url", @@ -192,7 +192,7 @@ def test_asr_invalid_file(): assert "not a valid audio file" in invalid_file -def test_asr_valid_file_invocation(monkeypatch): +def test_asr_valid_file_invocation(monkeypatch: pytest.MonkeyPatch): asr = _build_builtin_tool(ASRTool) model_instance = type("M", (), {"invoke_speech2text": lambda self, file: "transcript"})() model_manager = type("Mgr", (), {"get_model_instance": lambda *a, **k: model_instance})() @@ -209,7 +209,7 @@ def test_asr_valid_file_invocation(monkeypatch): assert captured_manager_kwargs == {"tenant_id": "tenant-1", "user_id": "u"} -def test_asr_available_models_and_runtime_parameters(monkeypatch): +def test_asr_available_models_and_runtime_parameters(monkeypatch: pytest.MonkeyPatch): asr = _build_builtin_tool(ASRTool) provider_model = type("PM", (), {"provider": "p", "models": [type("Model", (), {"model": "m"})()]})() monkeypatch.setattr( @@ -220,7 +220,7 @@ def test_asr_available_models_and_runtime_parameters(monkeypatch): assert asr.get_runtime_parameters()[0].name == "model" -def test_tts_invoke_returns_messages(monkeypatch): +def test_tts_invoke_returns_messages(monkeypatch: pytest.MonkeyPatch): tts = _build_builtin_tool(TTSTool) captured_manager_kwargs = {} voices_model_instance = type( @@ -280,7 +280,7 @@ def test_tts_tool_raises_when_voice_unavailable(monkeypatch, voices): list(tts.invoke(user_id="u", tool_parameters={"model": "p#m", "text": "hello"})) -def test_tts_tool_get_available_models_and_runtime_parameters(monkeypatch): +def test_tts_tool_get_available_models_and_runtime_parameters(monkeypatch: pytest.MonkeyPatch): tts = _build_builtin_tool(TTSTool) model_1 = SimpleNamespace( @@ -307,7 +307,7 @@ def test_tts_tool_get_available_models_and_runtime_parameters(monkeypatch): assert runtime_parameters[1].name == "voice#provider-a#model-a" -def test_provider_classes_and_builtin_sort(monkeypatch): +def test_provider_classes_and_builtin_sort(monkeypatch: pytest.MonkeyPatch): # Use object.__new__ to avoid YAML-loading __init__; only pass-through validation is exercised. # Ensure pass-through _validate_credentials methods are executed. AudioToolProvider._validate_credentials(object.__new__(AudioToolProvider), "u", {}) diff --git a/api/tests/unit_tests/core/tools/test_custom_tool.py b/api/tests/unit_tests/core/tools/test_custom_tool.py index f35546b025..f525baeaf2 100644 --- a/api/tests/unit_tests/core/tools/test_custom_tool.py +++ b/api/tests/unit_tests/core/tools/test_custom_tool.py @@ -47,7 +47,7 @@ def test_parsed_response_to_string(): assert ParsedResponse("ok", False).to_string() == "ok" -def test_api_tool_fork_runtime_and_validate_credentials(monkeypatch): +def test_api_tool_fork_runtime_and_validate_credentials(monkeypatch: pytest.MonkeyPatch): tool = _build_tool() forked = tool.fork_tool_runtime(ToolRuntime(tenant_id="tenant-2")) assert isinstance(forked, ApiTool) @@ -184,7 +184,7 @@ def test_get_parameter_value_and_type_conversion_helpers(): assert tool._convert_body_property_type({"anyOf": [{"type": "integer"}]}, "2") == 2 -def test_do_http_request_builds_arguments_and_handles_invalid_method(monkeypatch): +def test_do_http_request_builds_arguments_and_handles_invalid_method(monkeypatch: pytest.MonkeyPatch): openapi = { "parameters": [ {"name": "id", "in": "path", "required": True, "schema": {"type": "string"}}, @@ -236,7 +236,7 @@ def test_do_http_request_builds_arguments_and_handles_invalid_method(monkeypatch invalid_method_tool.do_http_request("https://api.example.com", "TRACE", headers={}, parameters={}) -def test_do_http_request_handles_file_upload_and_invoke_paths(monkeypatch): +def test_do_http_request_handles_file_upload_and_invoke_paths(monkeypatch: pytest.MonkeyPatch): openapi = { "parameters": [], "requestBody": { diff --git a/api/tests/unit_tests/core/tools/test_signature.py b/api/tests/unit_tests/core/tools/test_signature.py index 353988d7a6..a75fdee908 100644 --- a/api/tests/unit_tests/core/tools/test_signature.py +++ b/api/tests/unit_tests/core/tools/test_signature.py @@ -9,7 +9,7 @@ import pytest from core.tools.signature import ( get_signed_file_url_for_plugin, sign_tool_file, - sign_upload_file, + sign_upload_file_preview_url, verify_plugin_file_signature, verify_tool_file_signature, ) @@ -89,32 +89,32 @@ def test_verify_tool_file_signature_rejects_expired_signature(monkeypatch: pytes assert verify_tool_file_signature("tool-file-id", timestamp, nonce, sign) is False -def test_sign_upload_file_prefers_internal_url(monkeypatch: pytest.MonkeyPatch) -> None: +def test_sign_upload_file_preview_url_uses_files_url(monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr("core.tools.signature.time.time", lambda: 1700000000) monkeypatch.setattr("core.tools.signature.os.urandom", lambda _: b"\x03" * 16) monkeypatch.setattr("core.tools.signature.dify_config.SECRET_KEY", "unit-secret") monkeypatch.setattr("core.tools.signature.dify_config.FILES_URL", "https://files.example.com") monkeypatch.setattr("core.tools.signature.dify_config.INTERNAL_FILES_URL", "https://internal.example.com") - url = sign_upload_file("upload-id", ".png") + url = sign_upload_file_preview_url("upload-id", ".png") parsed = urlparse(url) query = parse_qs(parsed.query) - assert parsed.netloc == "internal.example.com" + assert parsed.netloc == "files.example.com" assert parsed.path == "/files/upload-id/image-preview" assert query["timestamp"][0] assert query["nonce"][0] assert query["sign"][0] -def test_sign_upload_file_uses_files_url_fallback(monkeypatch: pytest.MonkeyPatch) -> None: +def test_sign_upload_file_preview_url_ignores_internal_files_url(monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setattr("core.tools.signature.time.time", lambda: 1700000000) monkeypatch.setattr("core.tools.signature.os.urandom", lambda _: b"\x05" * 16) monkeypatch.setattr("core.tools.signature.dify_config.SECRET_KEY", "unit-secret") monkeypatch.setattr("core.tools.signature.dify_config.FILES_URL", "https://files.example.com") - monkeypatch.setattr("core.tools.signature.dify_config.INTERNAL_FILES_URL", "") + monkeypatch.setattr("core.tools.signature.dify_config.INTERNAL_FILES_URL", "https://internal.example.com") - url = sign_upload_file("upload-id", ".png") + url = sign_upload_file_preview_url("upload-id", ".png") parsed = urlparse(url) query = parse_qs(parsed.query) diff --git a/api/tests/unit_tests/core/tools/test_tool_manager.py b/api/tests/unit_tests/core/tools/test_tool_manager.py index 9ebaa0417b..7c7d6eec2d 100644 --- a/api/tests/unit_tests/core/tools/test_tool_manager.py +++ b/api/tests/unit_tests/core/tools/test_tool_manager.py @@ -648,7 +648,7 @@ def test_list_default_builtin_providers_for_postgres_and_mysql(): assert providers == provider_records -def test_list_providers_from_api_covers_builtin_api_workflow_and_mcp(monkeypatch): +def test_list_providers_from_api_covers_builtin_api_workflow_and_mcp(monkeypatch: pytest.MonkeyPatch): hardcoded_controller = SimpleNamespace(entity=SimpleNamespace(identity=SimpleNamespace(name="hardcoded"))) plugin_controller = object.__new__(PluginToolProviderController) plugin_controller.entity = SimpleNamespace(identity=SimpleNamespace(name="plugin-provider")) @@ -925,3 +925,78 @@ def test_convert_tool_parameters_type_constant_branch(): ) assert constant == {"text": "fixed"} + + +def test_convert_tool_parameters_type_model_selector_from_legacy_top_level_config(): + model_param = ToolParameter.get_simple_instance( + name="vision_llm_model", + llm_description="vision model", + typ=ToolParameter.ToolParameterType.MODEL_SELECTOR, + required=True, + ) + model_param.form = ToolParameter.ToolParameterForm.FORM + variable_pool = Mock() + + runtime_parameters = ToolManager._convert_tool_parameters_type( + parameters=[model_param], + variable_pool=variable_pool, + tool_configurations={ + "vision_llm_model": { + "type": "constant", + "value": "", + "provider": "langgenius/tongyi/tongyi", + "model": "qwen3-vl-plus", + "model_type": "llm", + "mode": "chat", + } + }, + typ="workflow", + ) + + assert runtime_parameters == { + "vision_llm_model": { + "provider": "langgenius/tongyi/tongyi", + "model": "qwen3-vl-plus", + "model_type": "llm", + "mode": "chat", + } + } + + +def test_convert_tool_parameters_type_model_selector_from_constant_value_config(): + model_param = ToolParameter.get_simple_instance( + name="tts_model", + llm_description="tts model", + typ=ToolParameter.ToolParameterType.MODEL_SELECTOR, + required=True, + ) + model_param.form = ToolParameter.ToolParameterForm.FORM + variable_pool = Mock() + + runtime_parameters = ToolManager._convert_tool_parameters_type( + parameters=[model_param], + variable_pool=variable_pool, + tool_configurations={ + "tts_model": { + "type": "constant", + "value": { + "provider": "langgenius/tongyi/tongyi", + "model": "qwen3-tts-flash", + "model_type": "tts", + "language": "Chinese", + "voice": "Cherry", + }, + } + }, + typ="workflow", + ) + + assert runtime_parameters == { + "tts_model": { + "provider": "langgenius/tongyi/tongyi", + "model": "qwen3-tts-flash", + "model_type": "tts", + "language": "Chinese", + "voice": "Cherry", + } + } diff --git a/api/tests/unit_tests/core/tools/utils/test_configuration.py b/api/tests/unit_tests/core/tools/utils/test_configuration.py index ae5638784c..9e179536de 100644 --- a/api/tests/unit_tests/core/tools/utils/test_configuration.py +++ b/api/tests/unit_tests/core/tools/utils/test_configuration.py @@ -4,6 +4,8 @@ from collections.abc import Generator from typing import Any from unittest.mock import patch +import pytest + from core.app.entities.app_invoke_entities import InvokeFrom from core.helper.tool_parameter_cache import ToolParameterCache from core.tools.__base.tool import Tool @@ -110,7 +112,7 @@ def test_encrypt_tool_parameters(): assert encrypted["plain"] == "x" -def test_decrypt_tool_parameters_cache_hit_and_miss(monkeypatch): +def test_decrypt_tool_parameters_cache_hit_and_miss(monkeypatch: pytest.MonkeyPatch): manager = _build_manager() with ( @@ -139,7 +141,7 @@ def test_delete_tool_parameters_cache(): mock_delete.assert_called_once() -def test_configuration_manager_decrypt_suppresses_errors(monkeypatch): +def test_configuration_manager_decrypt_suppresses_errors(monkeypatch: pytest.MonkeyPatch): manager = _build_manager() with ( patch.object(ToolParameterCache, "get", return_value=None), diff --git a/api/tests/unit_tests/core/tools/utils/test_message_transformer.py b/api/tests/unit_tests/core/tools/utils/test_message_transformer.py index 5f34135af4..354b395504 100644 --- a/api/tests/unit_tests/core/tools/utils/test_message_transformer.py +++ b/api/tests/unit_tests/core/tools/utils/test_message_transformer.py @@ -42,7 +42,7 @@ class _FakeToolFileManager: @pytest.fixture(autouse=True) -def _patch_tool_file_manager(monkeypatch): +def _patch_tool_file_manager(monkeypatch: pytest.MonkeyPatch): # Patch the manager used inside the transformer module monkeypatch.setattr(mt, "ToolFileManager", _FakeToolFileManager) # also ensure predictable URL generation (no need to patch; uses id and extension only) diff --git a/api/tests/unit_tests/core/tools/utils/test_parser.py b/api/tests/unit_tests/core/tools/utils/test_parser.py index 032b1377a4..99a90f3b67 100644 --- a/api/tests/unit_tests/core/tools/utils/test_parser.py +++ b/api/tests/unit_tests/core/tools/utils/test_parser.py @@ -17,7 +17,7 @@ def app(): return app -def test_parse_openapi_to_tool_bundle_operation_id(app): +def test_parse_openapi_to_tool_bundle_operation_id(app: Flask): openapi = { "openapi": "3.0.0", "info": {"title": "Simple API", "version": "1.0.0"}, @@ -63,7 +63,7 @@ def test_parse_openapi_to_tool_bundle_operation_id(app): assert tool_bundles[2].operation_id == "createResource" -def test_parse_openapi_to_tool_bundle_properties_all_of(app): +def test_parse_openapi_to_tool_bundle_properties_all_of(app: Flask): openapi = { "openapi": "3.0.0", "info": {"title": "Simple API", "version": "1.0.0"}, @@ -118,7 +118,7 @@ def test_parse_openapi_to_tool_bundle_properties_all_of(app): # assert set(tool_bundles[0].parameters[0].options) == {"option1", "option2", "option3"} -def test_parse_openapi_to_tool_bundle_default_value_type_casting(app): +def test_parse_openapi_to_tool_bundle_default_value_type_casting(app: Flask): """ Test that default values are properly cast to match parameter types. This addresses the issue where array default values like [] cause validation errors diff --git a/api/tests/unit_tests/core/tools/utils/test_system_oauth_encryption.py b/api/tests/unit_tests/core/tools/utils/test_system_oauth_encryption.py index 6bb86ebe78..081b189745 100644 --- a/api/tests/unit_tests/core/tools/utils/test_system_oauth_encryption.py +++ b/api/tests/unit_tests/core/tools/utils/test_system_oauth_encryption.py @@ -34,7 +34,7 @@ def test_system_encrypter_raises_error_for_invalid_ciphertext(): encrypter.decrypt_params("not-base64") -def test_system_helpers_use_global_cached_instance(monkeypatch): +def test_system_helpers_use_global_cached_instance(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(encryption, "_encrypter", None) monkeypatch.setattr("core.tools.utils.system_encryption.dify_config.SECRET_KEY", "global-secret") diff --git a/api/tests/unit_tests/core/tools/workflow_as_tool/test_tool.py b/api/tests/unit_tests/core/tools/workflow_as_tool/test_tool.py index 72a73dd936..6c563b0912 100644 --- a/api/tests/unit_tests/core/tools/workflow_as_tool/test_tool.py +++ b/api/tests/unit_tests/core/tools/workflow_as_tool/test_tool.py @@ -147,6 +147,142 @@ def test_workflow_tool_does_not_use_pause_state_config(monkeypatch: pytest.Monke assert call_kwargs["pause_state_config"] is None +def test_workflow_tool_passes_parent_trace_context_from_runtime(monkeypatch: pytest.MonkeyPatch): + """Ensure nested workflow runtime metadata is forwarded as parent trace context.""" + tool = _build_tool() + tool.set_parent_trace_context( + parent_workflow_run_id="outer-workflow-run-1", + parent_node_execution_id="outer-node-execution-1", + ) + + monkeypatch.setattr(tool, "_get_app", lambda *args, **kwargs: None) + monkeypatch.setattr(tool, "_get_workflow", lambda *args, **kwargs: None) + + mock_user = Mock() + monkeypatch.setattr(tool, "_resolve_user", lambda *args, **kwargs: mock_user) + + generate_mock = MagicMock(return_value={"data": {}}) + monkeypatch.setattr("core.app.apps.workflow.app_generator.WorkflowAppGenerator.generate", generate_mock) + monkeypatch.setattr("libs.login.current_user", lambda *args, **kwargs: None) + + list(tool.invoke("test_user", {})) + + call_kwargs = generate_mock.call_args.kwargs + assert call_kwargs["args"]["parent_trace_context"].model_dump() == { + "parent_workflow_run_id": "outer-workflow-run-1", + "parent_node_execution_id": "outer-node-execution-1", + } + + +def test_workflow_tool_keeps_user_inputs_named_like_trace_runtime_keys(monkeypatch: pytest.MonkeyPatch): + """Ensure private trace context does not overwrite same-named workflow inputs.""" + tool = _build_tool() + tool.entity.parameters = [ + ToolParameter.get_simple_instance( + name="outer_workflow_run_id", + llm_description="User workflow input", + typ=ToolParameter.ToolParameterType.STRING, + required=False, + ), + ToolParameter.get_simple_instance( + name="outer_node_execution_id", + llm_description="User node input", + typ=ToolParameter.ToolParameterType.STRING, + required=False, + ), + ] + tool.set_parent_trace_context( + parent_workflow_run_id="outer-workflow-run-1", + parent_node_execution_id="outer-node-execution-1", + ) + + monkeypatch.setattr(tool, "_get_app", lambda *args, **kwargs: None) + monkeypatch.setattr(tool, "_get_workflow", lambda *args, **kwargs: None) + + mock_user = Mock() + monkeypatch.setattr(tool, "_resolve_user", lambda *args, **kwargs: mock_user) + + generate_mock = MagicMock(return_value={"data": {}}) + monkeypatch.setattr("core.app.apps.workflow.app_generator.WorkflowAppGenerator.generate", generate_mock) + monkeypatch.setattr("libs.login.current_user", lambda *args, **kwargs: None) + + list( + tool.invoke( + "test_user", + { + "outer_workflow_run_id": "user-workflow-input", + "outer_node_execution_id": "user-node-input", + }, + ) + ) + + call_kwargs = generate_mock.call_args.kwargs + assert call_kwargs["args"]["inputs"]["outer_workflow_run_id"] == "user-workflow-input" + assert call_kwargs["args"]["inputs"]["outer_node_execution_id"] == "user-node-input" + assert call_kwargs["args"]["parent_trace_context"].model_dump() == { + "parent_workflow_run_id": "outer-workflow-run-1", + "parent_node_execution_id": "outer-node-execution-1", + } + + +def test_workflow_tool_can_clear_parent_trace_context(monkeypatch: pytest.MonkeyPatch): + """Ensure reused WorkflowTool instances do not keep stale parent trace context.""" + tool = _build_tool() + tool.set_parent_trace_context( + parent_workflow_run_id="outer-workflow-run-1", + parent_node_execution_id="outer-node-execution-1", + ) + tool.clear_parent_trace_context() + + monkeypatch.setattr(tool, "_get_app", lambda *args, **kwargs: None) + monkeypatch.setattr(tool, "_get_workflow", lambda *args, **kwargs: None) + + mock_user = Mock() + monkeypatch.setattr(tool, "_resolve_user", lambda *args, **kwargs: mock_user) + + generate_mock = MagicMock(return_value={"data": {}}) + monkeypatch.setattr("core.app.apps.workflow.app_generator.WorkflowAppGenerator.generate", generate_mock) + monkeypatch.setattr("libs.login.current_user", lambda *args, **kwargs: None) + + list(tool.invoke("test_user", {})) + + call_kwargs = generate_mock.call_args.kwargs + assert "parent_trace_context" not in call_kwargs["args"] + + +@pytest.mark.parametrize( + "runtime_parameters", + [ + {}, + {"outer_workflow_run_id": "outer-workflow-run-1"}, + {"outer_node_execution_id": "outer-node-execution-1"}, + {"outer_workflow_run_id": None, "outer_node_execution_id": None}, + ], +) +def test_workflow_tool_omits_parent_trace_context_when_runtime_is_incomplete( + monkeypatch: pytest.MonkeyPatch, + runtime_parameters: dict[str, Any], +): + """Ensure incomplete runtime metadata does not leak parent trace context into generator args.""" + tool = _build_tool() + tool.runtime.runtime_parameters = runtime_parameters + + monkeypatch.setattr(tool, "_get_app", lambda *args, **kwargs: None) + monkeypatch.setattr(tool, "_get_workflow", lambda *args, **kwargs: None) + + mock_user = Mock() + monkeypatch.setattr(tool, "_resolve_user", lambda *args, **kwargs: mock_user) + + generate_mock = MagicMock(return_value={"data": {}}) + monkeypatch.setattr("core.app.apps.workflow.app_generator.WorkflowAppGenerator.generate", generate_mock) + monkeypatch.setattr("libs.login.current_user", lambda *args, **kwargs: None) + + list(tool.invoke("test_user", {})) + + call_kwargs = generate_mock.call_args.kwargs + assert "parent_trace_context" not in call_kwargs["args"] + + def test_workflow_tool_should_generate_variable_messages_for_outputs(monkeypatch: pytest.MonkeyPatch): """Test that WorkflowTool should generate variable messages when there are outputs""" tool = _build_tool() diff --git a/api/tests/unit_tests/core/variables/test_segment_type.py b/api/tests/unit_tests/core/variables/test_segment_type.py index d4e862220a..009899a92d 100644 --- a/api/tests/unit_tests/core/variables/test_segment_type.py +++ b/api/tests/unit_tests/core/variables/test_segment_type.py @@ -233,8 +233,6 @@ class TestSegmentTypeAdditionalMethods: assert SegmentType.GROUP.is_valid([StringSegment(value="b")]) is True assert SegmentType.GROUP.is_valid(["not-segment"]) is False - def test_unreachable_assertion_branch(self, monkeypatch): - monkeypatch.setattr(SegmentType, "is_array_type", lambda self: False) - - with pytest.raises(AssertionError, match="unreachable"): - SegmentType.ARRAY_STRING.is_valid(["a"]) + def test_unreachable_assertion_branch(self): + with pytest.raises(AssertionError, match="Expected code to be unreachable"): + SegmentType.is_valid("not-a-segment-type", None) # type: ignore[arg-type] diff --git a/api/tests/unit_tests/core/workflow/graph_engine/layers/test_llm_quota.py b/api/tests/unit_tests/core/workflow/graph_engine/layers/test_llm_quota.py index 5d6667257f..12c7f8113c 100644 --- a/api/tests/unit_tests/core/workflow/graph_engine/layers/test_llm_quota.py +++ b/api/tests/unit_tests/core/workflow/graph_engine/layers/test_llm_quota.py @@ -1,12 +1,11 @@ +import logging import threading from datetime import datetime from types import SimpleNamespace from unittest.mock import MagicMock, patch -from core.app.entities.app_invoke_entities import DifyRunContext, InvokeFrom, UserFrom from core.app.workflow.layers.llm_quota import LLMQuotaLayer from core.errors.error import QuotaExceededError -from core.model_manager import ModelInstance from graphon.enums import BuiltinNodeTypes, WorkflowNodeExecutionStatus from graphon.graph_engine.entities.commands import CommandType from graphon.graph_events import NodeRunSucceededEvent @@ -14,17 +13,7 @@ from graphon.model_runtime.entities.llm_entities import LLMUsage from graphon.node_events import NodeRunResult -def _build_dify_context() -> DifyRunContext: - return DifyRunContext( - tenant_id="tenant-id", - app_id="app-id", - user_id="user-id", - user_from=UserFrom.ACCOUNT, - invoke_from=InvokeFrom.DEBUGGER, - ) - - -def _build_succeeded_event() -> NodeRunSucceededEvent: +def _build_succeeded_event(*, provider: str = "openai", model_name: str = "gpt-4o") -> NodeRunSucceededEvent: return NodeRunSucceededEvent( id="execution-id", node_id="llm-node-id", @@ -32,113 +21,162 @@ def _build_succeeded_event() -> NodeRunSucceededEvent: start_at=datetime.now(), node_run_result=NodeRunResult( status=WorkflowNodeExecutionStatus.SUCCEEDED, - inputs={"question": "hello"}, + inputs={ + "question": "hello", + "model_provider": provider, + "model_name": model_name, + }, llm_usage=LLMUsage.empty_usage(), ), ) -def _build_wrapped_model_instance() -> tuple[SimpleNamespace, ModelInstance]: - raw_model_instance = ModelInstance.__new__(ModelInstance) - return SimpleNamespace(_model_instance=raw_model_instance), raw_model_instance +def _build_public_model_identity(*, provider: str = "openai", model_name: str = "gpt-4o") -> SimpleNamespace: + return SimpleNamespace(provider=provider, name=model_name) + + +def _build_node_data(*, model: SimpleNamespace | None = None) -> SimpleNamespace: + return SimpleNamespace( + error_strategy=None, + retry_config=SimpleNamespace(retry_enabled=False), + model=model, + ) + + +def _build_node(*, node_type: BuiltinNodeTypes = BuiltinNodeTypes.LLM) -> MagicMock: + node = MagicMock() + node.id = "node-id" + node.execution_id = "execution-id" + node.node_type = node_type + node.node_data = _build_node_data(model=_build_public_model_identity()) + node.model_instance = SimpleNamespace(provider="stale-provider", model_name="stale-model") + return node + + +class _RunnableQuotaNode: + id = "node-id" + execution_id = "execution-id" + node_type = BuiltinNodeTypes.LLM + title = "LLM node" + + def __init__(self, *, stop_event: threading.Event, node_data: SimpleNamespace | None = None) -> None: + self.node_data = node_data or _build_node_data(model=_build_public_model_identity()) + self.graph_runtime_state = SimpleNamespace(stop_event=stop_event) + self.original_run_called = False + + def _run(self) -> NodeRunResult: + self.original_run_called = True + return NodeRunResult(status=WorkflowNodeExecutionStatus.SUCCEEDED) def test_deduct_quota_called_for_successful_llm_node() -> None: - layer = LLMQuotaLayer() - node = MagicMock() - node.id = "llm-node-id" - node.execution_id = "execution-id" - node.node_type = BuiltinNodeTypes.LLM - node.tenant_id = "tenant-id" - node.require_run_context_value.return_value = _build_dify_context() - node.model_instance, raw_model_instance = _build_wrapped_model_instance() - + layer = LLMQuotaLayer(tenant_id="tenant-id") + node = _build_node(node_type=BuiltinNodeTypes.LLM) result_event = _build_succeeded_event() - with patch("core.app.workflow.layers.llm_quota.deduct_llm_quota", autospec=True) as mock_deduct: + + with patch("core.app.workflow.layers.llm_quota.deduct_llm_quota_for_model", autospec=True) as mock_deduct: layer.on_node_run_end(node=node, error=None, result_event=result_event) mock_deduct.assert_called_once_with( tenant_id="tenant-id", - model_instance=raw_model_instance, + provider="openai", + model="gpt-4o", usage=result_event.node_run_result.llm_usage, ) def test_deduct_quota_called_for_question_classifier_node() -> None: - layer = LLMQuotaLayer() - node = MagicMock() - node.id = "question-classifier-node-id" - node.execution_id = "execution-id" - node.node_type = BuiltinNodeTypes.QUESTION_CLASSIFIER - node.tenant_id = "tenant-id" - node.require_run_context_value.return_value = _build_dify_context() - node.model_instance, raw_model_instance = _build_wrapped_model_instance() + layer = LLMQuotaLayer(tenant_id="tenant-id") + node = _build_node(node_type=BuiltinNodeTypes.QUESTION_CLASSIFIER) + result_event = _build_succeeded_event(provider="anthropic", model_name="claude-3-7-sonnet") - result_event = _build_succeeded_event() - with patch("core.app.workflow.layers.llm_quota.deduct_llm_quota", autospec=True) as mock_deduct: + with patch("core.app.workflow.layers.llm_quota.deduct_llm_quota_for_model", autospec=True) as mock_deduct: layer.on_node_run_end(node=node, error=None, result_event=result_event) mock_deduct.assert_called_once_with( tenant_id="tenant-id", - model_instance=raw_model_instance, + provider="anthropic", + model="claude-3-7-sonnet", usage=result_event.node_run_result.llm_usage, ) def test_non_llm_node_is_ignored() -> None: - layer = LLMQuotaLayer() - node = MagicMock() - node.id = "start-node-id" - node.execution_id = "execution-id" - node.node_type = BuiltinNodeTypes.START - node.tenant_id = "tenant-id" - node.require_run_context_value.return_value = _build_dify_context() - node._model_instance = object() - + layer = LLMQuotaLayer(tenant_id="tenant-id") + node = _build_node(node_type=BuiltinNodeTypes.START) result_event = _build_succeeded_event() - with patch("core.app.workflow.layers.llm_quota.deduct_llm_quota", autospec=True) as mock_deduct: + + with patch("core.app.workflow.layers.llm_quota.deduct_llm_quota_for_model", autospec=True) as mock_deduct: layer.on_node_run_end(node=node, error=None, result_event=result_event) mock_deduct.assert_not_called() -def test_quota_error_is_handled_in_layer() -> None: - layer = LLMQuotaLayer() - node = MagicMock() - node.id = "llm-node-id" - node.execution_id = "execution-id" - node.node_type = BuiltinNodeTypes.LLM - node.tenant_id = "tenant-id" - node.require_run_context_value.return_value = _build_dify_context() - node.model_instance = object() +def test_precheck_ignores_non_quota_node() -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") + node = _build_node(node_type=BuiltinNodeTypes.START) - result_event = _build_succeeded_event() - with patch( - "core.app.workflow.layers.llm_quota.deduct_llm_quota", - autospec=True, - side_effect=ValueError("quota exceeded"), - ): - layer.on_node_run_end(node=node, error=None, result_event=result_event) + with patch("core.app.workflow.layers.llm_quota.ensure_llm_quota_available_for_model", autospec=True) as mock_check: + layer.on_node_run_start(node) + + mock_check.assert_not_called() -def test_quota_deduction_exceeded_aborts_workflow_immediately() -> None: - layer = LLMQuotaLayer() +def test_quota_error_is_handled_in_layer(caplog) -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") stop_event = threading.Event() layer.command_channel = MagicMock() - node = MagicMock() - node.id = "llm-node-id" - node.execution_id = "execution-id" - node.node_type = BuiltinNodeTypes.LLM - node.tenant_id = "tenant-id" - node.require_run_context_value.return_value = _build_dify_context() - node.model_instance, _ = _build_wrapped_model_instance() + node = _build_node(node_type=BuiltinNodeTypes.LLM) + node.graph_runtime_state = MagicMock() + node.graph_runtime_state.stop_event = stop_event + result_event = _build_succeeded_event() + + with ( + caplog.at_level(logging.ERROR, logger="core.app.workflow.layers.llm_quota"), + patch( + "core.app.workflow.layers.llm_quota.deduct_llm_quota_for_model", + autospec=True, + side_effect=ValueError("quota exceeded"), + ) as mock_deduct, + ): + layer.on_node_run_end(node=node, error=None, result_event=result_event) + + mock_deduct.assert_called_once_with( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=result_event.node_run_result.llm_usage, + ) + assert "LLM quota deduction failed, node_id=node-id" in caplog.text + assert not stop_event.is_set() + layer.command_channel.send_command.assert_not_called() + + +def test_send_abort_command_is_noop_without_channel_or_after_abort() -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") + + layer._send_abort_command(reason="no channel") + + layer.command_channel = MagicMock() + layer._abort_sent = True + layer._send_abort_command(reason="already aborted") + + layer.command_channel.send_command.assert_not_called() + + +def test_quota_deduction_exceeded_aborts_workflow_immediately() -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") + stop_event = threading.Event() + layer.command_channel = MagicMock() + + node = _build_node(node_type=BuiltinNodeTypes.LLM) node.graph_runtime_state = MagicMock() node.graph_runtime_state.stop_event = stop_event result_event = _build_succeeded_event() with patch( - "core.app.workflow.layers.llm_quota.deduct_llm_quota", + "core.app.workflow.layers.llm_quota.deduct_llm_quota_for_model", autospec=True, side_effect=QuotaExceededError("No credits remaining"), ): @@ -152,19 +190,16 @@ def test_quota_deduction_exceeded_aborts_workflow_immediately() -> None: def test_quota_precheck_failure_aborts_workflow_immediately() -> None: - layer = LLMQuotaLayer() + layer = LLMQuotaLayer(tenant_id="tenant-id") stop_event = threading.Event() layer.command_channel = MagicMock() - node = MagicMock() - node.id = "llm-node-id" - node.node_type = BuiltinNodeTypes.LLM - node.model_instance, _ = _build_wrapped_model_instance() + node = _build_node(node_type=BuiltinNodeTypes.LLM) node.graph_runtime_state = MagicMock() node.graph_runtime_state.stop_event = stop_event with patch( - "core.app.workflow.layers.llm_quota.ensure_llm_quota_available", + "core.app.workflow.layers.llm_quota.ensure_llm_quota_available_for_model", autospec=True, side_effect=QuotaExceededError("Model provider openai quota exceeded."), ): @@ -177,21 +212,140 @@ def test_quota_precheck_failure_aborts_workflow_immediately() -> None: assert abort_command.reason == "Model provider openai quota exceeded." -def test_quota_precheck_passes_without_abort() -> None: - layer = LLMQuotaLayer() +def test_quota_precheck_failure_blocks_current_node_run() -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") stop_event = threading.Event() layer.command_channel = MagicMock() - node = MagicMock() - node.id = "llm-node-id" - node.node_type = BuiltinNodeTypes.LLM - node.model_instance, raw_model_instance = _build_wrapped_model_instance() + node = _RunnableQuotaNode(stop_event=stop_event) + + with patch( + "core.app.workflow.layers.llm_quota.ensure_llm_quota_available_for_model", + autospec=True, + side_effect=QuotaExceededError("Model provider openai quota exceeded."), + ): + layer.on_node_run_start(node) + + result = node._run() + assert not node.original_run_called + assert result.status == WorkflowNodeExecutionStatus.FAILED + assert result.error == "Model provider openai quota exceeded." + assert result.error_type == QuotaExceededError.__name__ + + +def test_missing_model_identity_blocks_current_node_run() -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") + stop_event = threading.Event() + layer.command_channel = MagicMock() + + node = _RunnableQuotaNode(stop_event=stop_event, node_data=_build_node_data()) + + with patch("core.app.workflow.layers.llm_quota.ensure_llm_quota_available_for_model", autospec=True) as mock_check: + layer.on_node_run_start(node) + + result = node._run() + assert not node.original_run_called + assert result.status == WorkflowNodeExecutionStatus.FAILED + assert result.error == "LLM quota check requires public node model identity before execution." + assert result.error_type == "LLMQuotaIdentityError" + mock_check.assert_not_called() + + +def test_quota_precheck_passes_without_abort() -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") + stop_event = threading.Event() + layer.command_channel = MagicMock() + + node = _build_node(node_type=BuiltinNodeTypes.LLM) node.graph_runtime_state = MagicMock() node.graph_runtime_state.stop_event = stop_event - with patch("core.app.workflow.layers.llm_quota.ensure_llm_quota_available", autospec=True) as mock_check: + with patch("core.app.workflow.layers.llm_quota.ensure_llm_quota_available_for_model", autospec=True) as mock_check: layer.on_node_run_start(node) assert not stop_event.is_set() - mock_check.assert_called_once_with(model_instance=raw_model_instance) + mock_check.assert_called_once_with( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + ) layer.command_channel.send_command.assert_not_called() + + +def test_precheck_reads_model_identity_from_data_when_node_data_is_absent() -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") + node = SimpleNamespace( + id="node-id", + node_type=BuiltinNodeTypes.LLM, + data=_build_node_data(model=_build_public_model_identity(provider="anthropic", model_name="claude")), + ) + + with patch("core.app.workflow.layers.llm_quota.ensure_llm_quota_available_for_model", autospec=True) as mock_check: + layer.on_node_run_start(node) + + mock_check.assert_called_once_with( + tenant_id="tenant-id", + provider="anthropic", + model="claude", + ) + + +def test_precheck_rejects_invalid_public_model_identity() -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") + stop_event = threading.Event() + layer.command_channel = MagicMock() + + node = _build_node(node_type=BuiltinNodeTypes.LLM) + node.node_data = _build_node_data(model=_build_public_model_identity(provider="", model_name="gpt-4o")) + node.graph_runtime_state = MagicMock() + node.graph_runtime_state.stop_event = stop_event + + with patch("core.app.workflow.layers.llm_quota.ensure_llm_quota_available_for_model", autospec=True) as mock_check: + layer.on_node_run_start(node) + + assert stop_event.is_set() + mock_check.assert_not_called() + layer.command_channel.send_command.assert_called_once() + + +def test_precheck_requires_public_node_model_config() -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") + stop_event = threading.Event() + layer.command_channel = MagicMock() + + node = _build_node(node_type=BuiltinNodeTypes.LLM) + node.node_data = _build_node_data() + node.graph_runtime_state = MagicMock() + node.graph_runtime_state.stop_event = stop_event + + with patch("core.app.workflow.layers.llm_quota.ensure_llm_quota_available_for_model", autospec=True) as mock_check: + layer.on_node_run_start(node) + + assert stop_event.is_set() + mock_check.assert_not_called() + layer.command_channel.send_command.assert_called_once() + abort_command = layer.command_channel.send_command.call_args.args[0] + assert abort_command.command_type == CommandType.ABORT + assert abort_command.reason == "LLM quota check requires public node model identity before execution." + + +def test_deduction_requires_public_event_model_identity() -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") + stop_event = threading.Event() + layer.command_channel = MagicMock() + + node = _build_node(node_type=BuiltinNodeTypes.LLM) + node.graph_runtime_state = MagicMock() + node.graph_runtime_state.stop_event = stop_event + result_event = _build_succeeded_event() + result_event.node_run_result.inputs = {"question": "hello"} + + with patch("core.app.workflow.layers.llm_quota.deduct_llm_quota_for_model", autospec=True) as mock_deduct: + layer.on_node_run_end(node=node, error=None, result_event=result_event) + + assert stop_event.is_set() + mock_deduct.assert_not_called() + layer.command_channel.send_command.assert_called_once() + abort_command = layer.command_channel.send_command.call_args.args[0] + assert abort_command.command_type == CommandType.ABORT + assert abort_command.reason == "LLM quota deduction requires model identity in the node result event." diff --git a/api/tests/unit_tests/core/workflow/graph_engine/test_mock_factory.py b/api/tests/unit_tests/core/workflow/graph_engine/test_mock_factory.py index 9f3e3b00b9..c721c7b0eb 100644 --- a/api/tests/unit_tests/core/workflow/graph_engine/test_mock_factory.py +++ b/api/tests/unit_tests/core/workflow/graph_engine/test_mock_factory.py @@ -96,7 +96,7 @@ class MockNodeFactory(DifyNodeFactory): if node_type == BuiltinNodeTypes.CODE: mock_instance = mock_class( node_id=node_id, - config=resolved_node_data, + data=resolved_node_data, graph_init_params=self.graph_init_params, graph_runtime_state=self.graph_runtime_state, mock_config=self.mock_config, @@ -106,7 +106,7 @@ class MockNodeFactory(DifyNodeFactory): elif node_type == BuiltinNodeTypes.HTTP_REQUEST: mock_instance = mock_class( node_id=node_id, - config=resolved_node_data, + data=resolved_node_data, graph_init_params=self.graph_init_params, graph_runtime_state=self.graph_runtime_state, mock_config=self.mock_config, @@ -122,7 +122,7 @@ class MockNodeFactory(DifyNodeFactory): }: mock_instance = mock_class( node_id=node_id, - config=resolved_node_data, + data=resolved_node_data, graph_init_params=self.graph_init_params, graph_runtime_state=self.graph_runtime_state, mock_config=self.mock_config, @@ -132,7 +132,7 @@ class MockNodeFactory(DifyNodeFactory): else: mock_instance = mock_class( node_id=node_id, - config=resolved_node_data, + data=resolved_node_data, graph_init_params=self.graph_init_params, graph_runtime_state=self.graph_runtime_state, mock_config=self.mock_config, diff --git a/api/tests/unit_tests/core/workflow/graph_engine/test_mock_nodes.py b/api/tests/unit_tests/core/workflow/graph_engine/test_mock_nodes.py index f9819c47ec..e0eb4e7361 100644 --- a/api/tests/unit_tests/core/workflow/graph_engine/test_mock_nodes.py +++ b/api/tests/unit_tests/core/workflow/graph_engine/test_mock_nodes.py @@ -56,7 +56,7 @@ class MockNodeMixin: def __init__( self, node_id: str, - config: Any, + data: Any, *, graph_init_params: "GraphInitParams", graph_runtime_state: "GraphRuntimeState", @@ -98,7 +98,7 @@ class MockNodeMixin: super().__init__( node_id=node_id, - config=config, + data=data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, **kwargs, diff --git a/api/tests/unit_tests/core/workflow/graph_engine/test_parallel_human_input_join_resume.py b/api/tests/unit_tests/core/workflow/graph_engine/test_parallel_human_input_join_resume.py index 75bc6d05f7..6156f7b576 100644 --- a/api/tests/unit_tests/core/workflow/graph_engine/test_parallel_human_input_join_resume.py +++ b/api/tests/unit_tests/core/workflow/graph_engine/test_parallel_human_input_join_resume.py @@ -111,7 +111,7 @@ class StaticRepo(HumanInputFormRepository): def _build_runtime_state() -> GraphRuntimeState: - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( user_id="user", app_id="app", @@ -140,7 +140,7 @@ def _build_graph(runtime_state: GraphRuntimeState, repo: HumanInputFormRepositor start_config = {"id": "start", "data": StartNodeData(title="Start", variables=[]).model_dump()} start_node = StartNode( node_id=start_config["id"], - config=StartNodeData(title="Start", variables=[]), + data=StartNodeData(title="Start", variables=[]), graph_init_params=graph_init_params, graph_runtime_state=runtime_state, ) @@ -155,7 +155,7 @@ def _build_graph(runtime_state: GraphRuntimeState, repo: HumanInputFormRepositor human_a_config = {"id": "human_a", "data": human_data.model_dump()} human_a = HumanInputNode( node_id=human_a_config["id"], - config=human_data, + data=human_data, graph_init_params=graph_init_params, graph_runtime_state=runtime_state, form_repository=repo, @@ -165,7 +165,7 @@ def _build_graph(runtime_state: GraphRuntimeState, repo: HumanInputFormRepositor human_b_config = {"id": "human_b", "data": human_data.model_dump()} human_b = HumanInputNode( node_id=human_b_config["id"], - config=human_data, + data=human_data, graph_init_params=graph_init_params, graph_runtime_state=runtime_state, form_repository=repo, @@ -183,7 +183,7 @@ def _build_graph(runtime_state: GraphRuntimeState, repo: HumanInputFormRepositor end_config = {"id": "end", "data": end_data.model_dump()} end_node = EndNode( node_id=end_config["id"], - config=end_data, + data=end_data, graph_init_params=graph_init_params, graph_runtime_state=runtime_state, ) diff --git a/api/tests/unit_tests/core/workflow/graph_engine/test_tool_in_chatflow.py b/api/tests/unit_tests/core/workflow/graph_engine/test_tool_in_chatflow.py index 12aec6edf2..ba1e74f3e0 100644 --- a/api/tests/unit_tests/core/workflow/graph_engine/test_tool_in_chatflow.py +++ b/api/tests/unit_tests/core/workflow/graph_engine/test_tool_in_chatflow.py @@ -5,6 +5,7 @@ from graphon.graph_events import ( NodeRunStreamChunkEvent, ) +from .test_mock_config import MockConfigBuilder from .test_table_runner import TableTestRunner @@ -44,3 +45,51 @@ def test_tool_in_chatflow(): assert stream_chunk_events[0].chunk == "hello, dify!", ( f"Expected chunk to be 'hello, dify!', but got {stream_chunk_events[0].chunk}" ) + + +def test_answer_can_render_llm_structured_output_in_chatflow(): + runner = TableTestRunner() + + fixture_data = runner.workflow_runner.load_fixture("basic_chatflow") + nodes = fixture_data["workflow"]["graph"]["nodes"] + answer_node = next(node for node in nodes if node["id"] == "answer") + answer_node["data"]["answer"] = "{{#llm.structured_output#}}" + + mock_config = ( + MockConfigBuilder() + .with_node_output( + "llm", + { + "text": "plain text", + "structured_output": {"type": "greeting"}, + "usage": { + "prompt_tokens": 10, + "completion_tokens": 5, + "total_tokens": 15, + }, + "finish_reason": "stop", + }, + ) + .build() + ) + + graph, graph_runtime_state = runner.workflow_runner.create_graph_from_fixture( + fixture_data=fixture_data, + query="hello", + use_mock_factory=True, + mock_config=mock_config, + ) + + engine = GraphEngine( + workflow_id="test_workflow", + graph=graph, + graph_runtime_state=graph_runtime_state, + command_channel=InMemoryChannel(), + config=GraphEngineConfig(), + ) + + events = list(engine.run()) + success_events = [e for e in events if isinstance(e, GraphRunSucceededEvent)] + + assert success_events, "Workflow should complete successfully" + assert success_events[-1].outputs["answer"] == '{\n "type": "greeting"\n}' diff --git a/api/tests/unit_tests/core/workflow/nodes/answer/test_answer.py b/api/tests/unit_tests/core/workflow/nodes/answer/test_answer.py index 76b4cd1ef4..2603e29be6 100644 --- a/api/tests/unit_tests/core/workflow/nodes/answer/test_answer.py +++ b/api/tests/unit_tests/core/workflow/nodes/answer/test_answer.py @@ -1,41 +1,36 @@ import time import uuid -from unittest.mock import MagicMock from core.app.entities.app_invoke_entities import InvokeFrom, UserFrom -from core.workflow.node_factory import DifyNodeFactory from core.workflow.system_variables import build_system_variables -from extensions.ext_database import db from graphon.enums import WorkflowNodeExecutionStatus -from graphon.graph import Graph from graphon.nodes.answer.answer_node import AnswerNode from graphon.nodes.answer.entities import AnswerNodeData from graphon.runtime import GraphRuntimeState, VariablePool from tests.workflow_test_utils import build_test_graph_init_params -def test_execute_answer(): +def _build_variable_pool() -> VariablePool: + return VariablePool.from_bootstrap( + system_variables=build_system_variables(user_id="aaa", files=[]), + user_inputs={}, + ) + + +def _build_answer_node(*, answer: str, variable_pool: VariablePool) -> AnswerNode: graph_config = { - "edges": [ - { - "id": "start-source-answer-target", - "source": "start", - "target": "answer", - }, - ], + "edges": [], "nodes": [ - {"data": {"type": "start", "title": "Start"}, "id": "start"}, { "data": { - "title": "123", + "title": "Answer", "type": "answer", - "answer": "Today's weather is {{#start.weather#}}\n{{#llm.text#}}\n{{img}}\nFin.", + "answer": answer, }, "id": "answer", - }, + } ], } - init_params = build_test_graph_init_params( workflow_id="1", graph_config=graph_config, @@ -46,43 +41,58 @@ def test_execute_answer(): invoke_from=InvokeFrom.DEBUGGER, call_depth=0, ) - - # construct variable pool - variable_pool = VariablePool( - system_variables=build_system_variables(user_id="aaa", files=[]), - user_inputs={}, - environment_variables=[], - conversation_variables=[], + graph_runtime_state = GraphRuntimeState( + variable_pool=variable_pool, + start_at=time.perf_counter(), ) - variable_pool.add(["start", "weather"], "sunny") - variable_pool.add(["llm", "text"], "You are a helpful AI.") - - graph_runtime_state = GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter()) - - # create node factory - node_factory = DifyNodeFactory( - graph_init_params=init_params, - graph_runtime_state=graph_runtime_state, - ) - - graph = Graph.init(graph_config=graph_config, node_factory=node_factory, root_node_id="start") - - node = AnswerNode( + return AnswerNode( node_id=str(uuid.uuid4()), graph_init_params=init_params, graph_runtime_state=graph_runtime_state, - config=AnswerNodeData( - title="123", + data=AnswerNodeData( + title="Answer", type="answer", - answer="Today's weather is {{#start.weather#}}\n{{#llm.text#}}\n{{img}}\nFin.", + answer=answer, ), ) - # Mock db.session.close() - db.session.close = MagicMock() - # execute node +def test_execute_answer_renders_variable_selectors() -> None: + variable_pool = _build_variable_pool() + variable_pool.add(["start", "weather"], "sunny") + variable_pool.add(["llm", "text"], "You are a helpful AI.") + node = _build_answer_node( + answer="Today's weather is {{#start.weather#}}\n{{#llm.text#}}\n{{img}}\nFin.", + variable_pool=variable_pool, + ) + result = node._run() assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED assert result.outputs["answer"] == "Today's weather is sunny\nYou are a helpful AI.\n{{img}}\nFin." + + +def test_execute_answer_renders_structured_output_object_as_json() -> None: + variable_pool = _build_variable_pool() + variable_pool.add(["1777539038857", "structured_output"], {"type": "greeting"}) + node = _build_answer_node( + answer="{{#1777539038857.structured_output#}}", + variable_pool=variable_pool, + ) + + result = node._run() + + assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED + assert result.outputs["answer"] == '{\n "type": "greeting"\n}' + + +def test_execute_answer_falls_back_to_plain_selector_text_when_structured_output_missing() -> None: + node = _build_answer_node( + answer="{{#1777539038857.structured_output#}}", + variable_pool=_build_variable_pool(), + ) + + result = node._run() + + assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED + assert result.outputs["answer"] == "1777539038857.structured_output" diff --git a/api/tests/unit_tests/core/workflow/nodes/datasource/test_datasource_node.py b/api/tests/unit_tests/core/workflow/nodes/datasource/test_datasource_node.py index d7ef781732..235d56e989 100644 --- a/api/tests/unit_tests/core/workflow/nodes/datasource/test_datasource_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/datasource/test_datasource_node.py @@ -1,3 +1,5 @@ +from pytest_mock import MockerFixture + from core.app.entities.app_invoke_entities import DIFY_RUN_CONTEXT_KEY from core.workflow.nodes.datasource.datasource_node import DatasourceNode from core.workflow.nodes.datasource.entities import DatasourceNodeData @@ -44,7 +46,7 @@ class _GraphParams: call_depth = 0 -def test_datasource_node_delegates_to_manager_stream(mocker): +def test_datasource_node_delegates_to_manager_stream(mocker: MockerFixture): # prepare sys variables sys_vars = { "sys": { @@ -79,7 +81,7 @@ def test_datasource_node_delegates_to_manager_stream(mocker): node = DatasourceNode( node_id="n", - config=DatasourceNodeData( + data=DatasourceNodeData( type="datasource", version="1", title="Datasource", diff --git a/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_executor.py b/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_executor.py index be7cc073db..796fc7719d 100644 --- a/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_executor.py +++ b/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_executor.py @@ -29,7 +29,7 @@ HTTP_REQUEST_CONFIG = HttpRequestNodeConfig( def test_executor_with_json_body_and_number_variable(): # Prepare the variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -85,7 +85,7 @@ def test_executor_with_json_body_and_number_variable(): def test_executor_with_json_body_and_object_variable(): # Prepare the variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -143,7 +143,7 @@ def test_executor_with_json_body_and_object_variable(): def test_executor_with_json_body_and_nested_object_variable(): # Prepare the variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -201,7 +201,7 @@ def test_executor_with_json_body_and_nested_object_variable(): def test_extract_selectors_from_template_with_newline(): - variable_pool = VariablePool(system_variables=default_system_variables()) + variable_pool = VariablePool.from_bootstrap(system_variables=default_system_variables()) variable_pool.add(("node_id", "custom_query"), "line1\nline2") node_data = HttpRequestNodeData( title="Test JSON Body with Nested Object Variable", @@ -230,7 +230,7 @@ def test_extract_selectors_from_template_with_newline(): def test_executor_with_form_data(): # Prepare the variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -320,7 +320,7 @@ def test_init_headers(): node_data=node_data, timeout=timeout, http_request_config=HTTP_REQUEST_CONFIG, - variable_pool=VariablePool(system_variables=default_system_variables()), + variable_pool=VariablePool.from_bootstrap(system_variables=default_system_variables()), http_client=ssrf_proxy, file_manager=file_manager, ) @@ -357,7 +357,7 @@ def test_init_params(): node_data=node_data, timeout=timeout, http_request_config=HTTP_REQUEST_CONFIG, - variable_pool=VariablePool(system_variables=default_system_variables()), + variable_pool=VariablePool.from_bootstrap(system_variables=default_system_variables()), http_client=ssrf_proxy, file_manager=file_manager, ) @@ -390,7 +390,7 @@ def test_init_params(): def test_empty_api_key_raises_error_bearer(): """Test that empty API key raises AuthorizationConfigError for bearer auth.""" - variable_pool = VariablePool(system_variables=default_system_variables()) + variable_pool = VariablePool.from_bootstrap(system_variables=default_system_variables()) node_data = HttpRequestNodeData( title="test", method="get", @@ -417,7 +417,7 @@ def test_empty_api_key_raises_error_bearer(): def test_empty_api_key_raises_error_basic(): """Test that empty API key raises AuthorizationConfigError for basic auth.""" - variable_pool = VariablePool(system_variables=default_system_variables()) + variable_pool = VariablePool.from_bootstrap(system_variables=default_system_variables()) node_data = HttpRequestNodeData( title="test", method="get", @@ -444,7 +444,7 @@ def test_empty_api_key_raises_error_basic(): def test_empty_api_key_raises_error_custom(): """Test that empty API key raises AuthorizationConfigError for custom auth.""" - variable_pool = VariablePool(system_variables=default_system_variables()) + variable_pool = VariablePool.from_bootstrap(system_variables=default_system_variables()) node_data = HttpRequestNodeData( title="test", method="get", @@ -471,7 +471,7 @@ def test_empty_api_key_raises_error_custom(): def test_whitespace_only_api_key_raises_error(): """Test that whitespace-only API key raises AuthorizationConfigError.""" - variable_pool = VariablePool(system_variables=default_system_variables()) + variable_pool = VariablePool.from_bootstrap(system_variables=default_system_variables()) node_data = HttpRequestNodeData( title="test", method="get", @@ -498,7 +498,7 @@ def test_whitespace_only_api_key_raises_error(): def test_valid_api_key_works(): """Test that valid API key works correctly for bearer auth.""" - variable_pool = VariablePool(system_variables=default_system_variables()) + variable_pool = VariablePool.from_bootstrap(system_variables=default_system_variables()) node_data = HttpRequestNodeData( title="test", method="get", @@ -536,7 +536,7 @@ def test_executor_with_json_body_and_unquoted_uuid_variable(): # UUID that triggers the json_repair truncation bug test_uuid = "57eeeeb1-450b-482c-81b9-4be77e95dee2" - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -583,7 +583,7 @@ def test_executor_with_json_body_and_unquoted_uuid_with_newlines(): """ test_uuid = "57eeeeb1-450b-482c-81b9-4be77e95dee2" - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -624,7 +624,7 @@ def test_executor_with_json_body_and_unquoted_uuid_with_newlines(): def test_executor_with_json_body_preserves_numbers_and_strings(): """Test that numbers are preserved and string values are properly quoted.""" - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) diff --git a/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_node.py b/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_node.py index 2e89a2da3c..afde541beb 100644 --- a/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_node.py @@ -110,12 +110,15 @@ def _build_http_node( call_depth=0, ) graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(user_id="user", files=[]), user_inputs={}), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(user_id="user", files=[]), + user_inputs={}, + ), start_at=time.perf_counter(), ) return HttpRequestNode( node_id="http-node", - config=HttpRequestNodeData.model_validate(node_data), + data=HttpRequestNodeData.model_validate(node_data), graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, http_request_config=HTTP_REQUEST_CONFIG, diff --git a/api/tests/unit_tests/core/workflow/nodes/human_input/test_entities.py b/api/tests/unit_tests/core/workflow/nodes/human_input/test_entities.py index 0659984c76..715292b85c 100644 --- a/api/tests/unit_tests/core/workflow/nodes/human_input/test_entities.py +++ b/api/tests/unit_tests/core/workflow/nodes/human_input/test_entities.py @@ -149,7 +149,7 @@ def _build_human_input_node( ) return HumanInputNode( node_id=node_id, - config=typed_node_data, + data=typed_node_data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, runtime=runtime, @@ -241,16 +241,16 @@ class TestUserAction: def test_user_action_length_boundaries(self): """Test user action id and title length boundaries.""" - action = UserAction(id="a" * 20, title="b" * 20) + action = UserAction(id="a" * 20, title="b" * 100) assert action.id == "a" * 20 - assert action.title == "b" * 20 + assert action.title == "b" * 100 @pytest.mark.parametrize( ("field_name", "value"), [ ("id", "a" * 21), - ("title", "b" * 21), + ("title", "b" * 101), ], ) def test_user_action_length_limits(self, field_name: str, value: str): @@ -427,7 +427,7 @@ class TestHumanInputNodeVariableResolution: """Tests for resolving variable-based defaults in HumanInputNode.""" def test_resolves_variable_defaults(self): - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( user_id="user", app_id="app", @@ -504,7 +504,7 @@ class TestHumanInputNodeVariableResolution: assert params.resolved_default_values == expected_values def test_debugger_falls_back_to_recipient_token_when_webapp_disabled(self): - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( user_id="user", app_id="app", @@ -565,7 +565,7 @@ class TestHumanInputNodeVariableResolution: assert not hasattr(pause_event.reason, "form_token") def test_webapp_runtime_keeps_form_visible_in_ui_when_webapp_delivery_is_enabled(self): - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( user_id="user", app_id="app", @@ -631,7 +631,7 @@ class TestHumanInputNodeVariableResolution: assert params.display_in_ui is True def test_debugger_debug_mode_overrides_email_recipients(self): - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( user_id="user-123", app_id="app", @@ -748,7 +748,7 @@ class TestHumanInputNodeRenderedContent: """Tests for rendering submitted content.""" def test_replaces_outputs_placeholders_after_submission(self): - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( user_id="user", app_id="app", diff --git a/api/tests/unit_tests/core/workflow/nodes/human_input/test_human_input_form_filled_event.py b/api/tests/unit_tests/core/workflow/nodes/human_input/test_human_input_form_filled_event.py index 4a9438b14f..741b104393 100644 --- a/api/tests/unit_tests/core/workflow/nodes/human_input/test_human_input_form_filled_event.py +++ b/api/tests/unit_tests/core/workflow/nodes/human_input/test_human_input_form_filled_event.py @@ -40,7 +40,7 @@ def _create_human_input_node( ) return HumanInputNode( node_id=config["id"], - config=node_data, + data=node_data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, form_repository=repo, @@ -51,7 +51,11 @@ def _create_human_input_node( def _build_node(form_content: str = "Please enter your name:\n\n{{#$output.name#}}") -> HumanInputNode: system_variables = default_system_variables() graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=system_variables, user_inputs={}, environment_variables=[]), + variable_pool=VariablePool.from_bootstrap( + system_variables=system_variables, + user_inputs={}, + environment_variables=[], + ), start_at=0.0, ) graph_init_params = GraphInitParams( @@ -114,7 +118,11 @@ def _build_node(form_content: str = "Please enter your name:\n\n{{#$output.name# def _build_timeout_node() -> HumanInputNode: system_variables = default_system_variables() graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=system_variables, user_inputs={}, environment_variables=[]), + variable_pool=VariablePool.from_bootstrap( + system_variables=system_variables, + user_inputs={}, + environment_variables=[], + ), start_at=0.0, ) graph_init_params = GraphInitParams( diff --git a/api/tests/unit_tests/core/workflow/nodes/iteration/test_iteration_child_engine_errors.py b/api/tests/unit_tests/core/workflow/nodes/iteration/test_iteration_child_engine_errors.py index 8ffce39cd6..18ed7a0b1d 100644 --- a/api/tests/unit_tests/core/workflow/nodes/iteration/test_iteration_child_engine_errors.py +++ b/api/tests/unit_tests/core/workflow/nodes/iteration/test_iteration_child_engine_errors.py @@ -32,7 +32,7 @@ class _MissingGraphBuilder: def _build_runtime_state() -> GraphRuntimeState: return GraphRuntimeState( - variable_pool=VariablePool(system_variables=default_system_variables(), user_inputs={}), + variable_pool=VariablePool.from_bootstrap(system_variables=default_system_variables(), user_inputs={}), start_at=0.0, ) @@ -46,7 +46,7 @@ def _build_iteration_node( init_params = build_test_graph_init_params(graph_config=graph_config) return IterationNode( node_id="iteration-node", - config=IterationNodeData( + data=IterationNodeData( type="iteration", title="Iteration", iterator_selector=["start", "items"], diff --git a/api/tests/unit_tests/core/workflow/nodes/knowledge_index/test_knowledge_index_node.py b/api/tests/unit_tests/core/workflow/nodes/knowledge_index/test_knowledge_index_node.py index f254fc3d09..0d760a2db7 100644 --- a/api/tests/unit_tests/core/workflow/nodes/knowledge_index/test_knowledge_index_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/knowledge_index/test_knowledge_index_node.py @@ -3,6 +3,7 @@ import uuid from unittest.mock import Mock import pytest +from pytest_mock import MockerFixture from core.app.entities.app_invoke_entities import InvokeFrom, UserFrom from core.rag.index_processor.constant.index_type import IndexTechniqueType @@ -40,7 +41,7 @@ def mock_graph_init_params(): @pytest.fixture def mock_graph_runtime_state(): """Create mock GraphRuntimeState.""" - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id=str(uuid.uuid4()), files=[]), user_inputs={}, environment_variables=[], @@ -50,7 +51,7 @@ def mock_graph_runtime_state(): @pytest.fixture -def mock_index_processor(mocker): +def mock_index_processor(mocker: MockerFixture): """Create mock IndexProcessorProtocol.""" mock_processor = Mock(spec=IndexProcessorProtocol) mocker.patch( @@ -61,7 +62,7 @@ def mock_index_processor(mocker): @pytest.fixture -def mock_summary_index_service(mocker): +def mock_summary_index_service(mocker: MockerFixture): """Create mock SummaryIndexServiceProtocol.""" mock_service = Mock(spec=SummaryIndexServiceProtocol) mocker.patch( @@ -102,7 +103,7 @@ def _build_node( ) -> KnowledgeIndexNode: return KnowledgeIndexNode( node_id=node_id, - config=( + data=( node_data if isinstance(node_data, KnowledgeIndexNodeData) else KnowledgeIndexNodeData.model_validate(node_data) diff --git a/api/tests/unit_tests/core/workflow/nodes/knowledge_retrieval/test_knowledge_retrieval_node.py b/api/tests/unit_tests/core/workflow/nodes/knowledge_retrieval/test_knowledge_retrieval_node.py index e923ee761b..3c821e75ba 100644 --- a/api/tests/unit_tests/core/workflow/nodes/knowledge_retrieval/test_knowledge_retrieval_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/knowledge_retrieval/test_knowledge_retrieval_node.py @@ -3,6 +3,7 @@ import uuid from unittest.mock import Mock import pytest +from pytest_mock import MockerFixture from core.app.entities.app_invoke_entities import InvokeFrom, UserFrom from core.workflow.nodes.knowledge_retrieval.entities import ( @@ -46,7 +47,7 @@ def mock_graph_init_params(): @pytest.fixture def mock_graph_runtime_state(): """Create mock GraphRuntimeState.""" - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id=str(uuid.uuid4()), files=[]), user_inputs={}, environment_variables=[], @@ -56,7 +57,7 @@ def mock_graph_runtime_state(): @pytest.fixture -def mock_rag_retrieval(mocker): +def mock_rag_retrieval(mocker: MockerFixture): """Create mock RAGRetrievalProtocol.""" mock_retrieval = Mock(spec=RAGRetrievalProtocol) mock_retrieval.knowledge_retrieval.return_value = [] @@ -117,7 +118,7 @@ class TestKnowledgeRetrievalNode: # Act node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -146,7 +147,7 @@ class TestKnowledgeRetrievalNode: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -205,7 +206,7 @@ class TestKnowledgeRetrievalNode: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -249,7 +250,7 @@ class TestKnowledgeRetrievalNode: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -285,7 +286,7 @@ class TestKnowledgeRetrievalNode: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -320,7 +321,7 @@ class TestKnowledgeRetrievalNode: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -361,7 +362,7 @@ class TestKnowledgeRetrievalNode: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -400,7 +401,7 @@ class TestKnowledgeRetrievalNode: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -481,7 +482,7 @@ class TestFetchDatasetRetriever: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -518,7 +519,7 @@ class TestFetchDatasetRetriever: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -573,7 +574,7 @@ class TestFetchDatasetRetriever: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -621,7 +622,7 @@ class TestFetchDatasetRetriever: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -682,7 +683,7 @@ class TestFetchDatasetRetriever: config = {"id": node_id, "data": node_data.model_dump()} node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) diff --git a/api/tests/unit_tests/core/workflow/nodes/list_operator/node_spec.py b/api/tests/unit_tests/core/workflow/nodes/list_operator/node_spec.py index 388654f279..20b94d5d50 100644 --- a/api/tests/unit_tests/core/workflow/nodes/list_operator/node_spec.py +++ b/api/tests/unit_tests/core/workflow/nodes/list_operator/node_spec.py @@ -16,10 +16,10 @@ class TestListOperatorNode: """Comprehensive tests for ListOperatorNode.""" @staticmethod - def _build_node(*, config, graph_init_params, graph_runtime_state): + def _build_node(*, data, graph_init_params, graph_runtime_state): return ListOperatorNode( node_id="test", - config=config if isinstance(config, ListOperatorNodeData) else ListOperatorNodeData.model_validate(config), + data=data if isinstance(data, ListOperatorNodeData) else ListOperatorNodeData.model_validate(data), graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, ) @@ -65,7 +65,7 @@ class TestListOperatorNode: def _create_node(config, mock_variable): mock_graph_runtime_state.variable_pool.get.return_value = mock_variable return self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -83,7 +83,7 @@ class TestListOperatorNode: } node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -127,7 +127,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -153,7 +153,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -177,7 +177,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -201,7 +201,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -228,7 +228,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -255,7 +255,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -282,7 +282,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -312,7 +312,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -335,7 +335,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = None node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -359,7 +359,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -384,7 +384,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -408,7 +408,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -432,7 +432,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -456,7 +456,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -483,7 +483,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) diff --git a/api/tests/unit_tests/core/workflow/nodes/llm/test_llm_utils.py b/api/tests/unit_tests/core/workflow/nodes/llm/test_llm_utils.py index 212ad07bd3..6a2fc81fef 100644 --- a/api/tests/unit_tests/core/workflow/nodes/llm/test_llm_utils.py +++ b/api/tests/unit_tests/core/workflow/nodes/llm/test_llm_utils.py @@ -613,7 +613,7 @@ def test_combine_message_content_with_role_handles_all_supported_roles(): SystemPromptMessage(content=contents) ) - with pytest.raises(NotImplementedError, match="Role custom is not supported"): + with pytest.raises(AssertionError, match="Expected code to be unreachable"): llm_utils.combine_message_content_with_role(contents=contents, role="custom") # type: ignore[arg-type] diff --git a/api/tests/unit_tests/core/workflow/nodes/llm/test_node.py b/api/tests/unit_tests/core/workflow/nodes/llm/test_node.py index c707cf28cd..fb50723402 100644 --- a/api/tests/unit_tests/core/workflow/nodes/llm/test_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/llm/test_node.py @@ -15,7 +15,7 @@ from core.app.llm.model_access import ( ) from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle from core.entities.provider_entities import CustomConfiguration, SystemConfiguration -from core.plugin.impl.model_runtime_factory import create_plugin_model_runtime +from core.plugin.impl.model_runtime_factory import create_plugin_model_assembly from core.prompt.entities.advanced_prompt_entities import MemoryConfig from core.workflow.system_variables import default_system_variables from graphon.entities import GraphInitParams @@ -187,7 +187,7 @@ def graph_init_params() -> GraphInitParams: @pytest.fixture def graph_runtime_state() -> GraphRuntimeState: - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -208,7 +208,7 @@ def llm_node( http_client = mock.MagicMock() node = LLMNode( node_id="1", - config=llm_node_data, + data=llm_node_data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, credentials_provider=mock_credentials_provider, @@ -222,7 +222,7 @@ def llm_node( @pytest.fixture -def model_config(monkeypatch): +def model_config(monkeypatch: pytest.MonkeyPatch): from tests.integration_tests.model_runtime.__mock.plugin_model import MockModelClass def mock_model_providers(_self): @@ -241,9 +241,10 @@ def model_config(monkeypatch): ) # Create actual provider and model type instances - model_provider_factory = ModelProviderFactory(model_runtime=create_plugin_model_runtime(tenant_id="test")) + model_assembly = create_plugin_model_assembly(tenant_id="test") + model_provider_factory = model_assembly.model_provider_factory provider_instance = model_provider_factory.get_model_provider("openai") - model_type_instance = model_provider_factory.get_model_type_instance("openai", ModelType.LLM) + model_type_instance = model_assembly.create_model_type_instance(provider="openai", model_type=ModelType.LLM) # Create a ProviderModelBundle provider_model_bundle = ProviderModelBundle( @@ -1173,7 +1174,7 @@ def llm_node_for_multimodal(llm_node_data, graph_init_params, graph_runtime_stat http_client = mock.MagicMock() node = LLMNode( node_id="1", - config=llm_node_data, + data=llm_node_data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, credentials_provider=mock_credentials_provider, @@ -1276,7 +1277,7 @@ class TestSaveMultimodalOutputAndConvertResultToMarkdown: mock_file_saver.save_binary_string.assert_not_called() mock_file_saver.save_remote_url.assert_not_called() - def test_image_content_with_inline_data(self, llm_node_for_multimodal, monkeypatch): + def test_image_content_with_inline_data(self, llm_node_for_multimodal, monkeypatch: pytest.MonkeyPatch): llm_node, mock_file_saver = llm_node_for_multimodal image_raw_data = b"PNG_DATA" diff --git a/api/tests/unit_tests/core/workflow/nodes/template_transform/template_transform_node_spec.py b/api/tests/unit_tests/core/workflow/nodes/template_transform/template_transform_node_spec.py index 892f6cc586..dd57dde1fe 100644 --- a/api/tests/unit_tests/core/workflow/nodes/template_transform/template_transform_node_spec.py +++ b/api/tests/unit_tests/core/workflow/nodes/template_transform/template_transform_node_spec.py @@ -28,7 +28,7 @@ def _build_template_transform_node( ) return TemplateTransformNode( node_id=node_id, - config=typed_node_data, + data=typed_node_data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, **kwargs, diff --git a/api/tests/unit_tests/core/workflow/nodes/template_transform/test_template_transform_node.py b/api/tests/unit_tests/core/workflow/nodes/template_transform/test_template_transform_node.py index a846efbb43..c25ac7da0f 100644 --- a/api/tests/unit_tests/core/workflow/nodes/template_transform/test_template_transform_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/template_transform/test_template_transform_node.py @@ -39,7 +39,7 @@ def mock_graph_runtime_state(): def test_node_uses_default_max_output_length_when_not_overridden(graph_init_params, mock_graph_runtime_state): node = TemplateTransformNode( node_id="test_node", - config=TemplateTransformNodeData( + data=TemplateTransformNodeData( title="Template Transform", type="template-transform", variables=[], diff --git a/api/tests/unit_tests/core/workflow/nodes/test_base_node.py b/api/tests/unit_tests/core/workflow/nodes/test_base_node.py index 364408ead6..a05151f79b 100644 --- a/api/tests/unit_tests/core/workflow/nodes/test_base_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/test_base_node.py @@ -35,7 +35,10 @@ def _build_context(graph_config: Mapping[str, object]) -> tuple[GraphInitParams, invoke_from="debugger", ) runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(user_id="user", files=[]), user_inputs={}), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(user_id="user", files=[]), + user_inputs={}, + ), start_at=0.0, ) return init_params, runtime_state @@ -62,7 +65,7 @@ def test_node_hydrates_data_during_initialization(): node = _SampleNode( node_id="node-1", - config=_build_node_data(), + data=_build_node_data(), graph_init_params=init_params, graph_runtime_state=runtime_state, ) @@ -82,13 +85,16 @@ def test_node_accepts_invoke_from_enum(): invoke_from=InvokeFrom.DEBUGGER, ) runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(user_id="user", files=[]), user_inputs={}), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(user_id="user", files=[]), + user_inputs={}, + ), start_at=0.0, ) node = _SampleNode( node_id="node-1", - config=_build_node_data(), + data=_build_node_data(), graph_init_params=init_params, graph_runtime_state=runtime_state, ) @@ -140,7 +146,7 @@ def test_node_hydration_preserves_compatibility_extra_fields(): node = _SampleNode( node_id="node-1", - config=node_config["data"], + data=node_config["data"], graph_init_params=init_params, graph_runtime_state=runtime_state, ) diff --git a/api/tests/unit_tests/core/workflow/nodes/test_document_extractor_node.py b/api/tests/unit_tests/core/workflow/nodes/test_document_extractor_node.py index dd75b32593..4c67f3fb02 100644 --- a/api/tests/unit_tests/core/workflow/nodes/test_document_extractor_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/test_document_extractor_node.py @@ -49,7 +49,7 @@ def document_extractor_node(graph_init_params): http_client = Mock() node = DocumentExtractorNode( node_id="test_node_id", - config=node_data, + data=node_data, graph_init_params=graph_init_params, graph_runtime_state=Mock(), http_client=http_client, @@ -186,12 +186,13 @@ def test_run_extract_text( monkeypatch.setattr("graphon.file.file_manager.download", mock_download) + dispatch_mock = None if mime_type == "application/pdf": - mock_pdf_extract = Mock(return_value=expected_text[0]) - monkeypatch.setattr("graphon.nodes.document_extractor.node._extract_text_from_pdf", mock_pdf_extract) + dispatch_mock = Mock(return_value=expected_text[0]) + monkeypatch.setattr("graphon.nodes.document_extractor.node._extract_text_by_file_extension", dispatch_mock) elif mime_type.startswith("application/vnd.openxmlformats"): - mock_docx_extract = Mock(return_value=expected_text[0]) - monkeypatch.setattr("graphon.nodes.document_extractor.node._extract_text_from_docx", mock_docx_extract) + dispatch_mock = Mock(return_value=expected_text[0]) + monkeypatch.setattr("graphon.nodes.document_extractor.node._extract_text_by_mime_type", dispatch_mock) result = document_extractor_node._run() @@ -200,6 +201,19 @@ def test_run_extract_text( assert result.outputs is not None assert result.outputs["text"] == ArrayStringSegment(value=expected_text) + if mime_type == "application/pdf": + dispatch_mock.assert_called_once_with( + file_content=file_content, + file_extension=extension, + unstructured_api_config=document_extractor_node._unstructured_api_config, + ) + elif mime_type.startswith("application/vnd.openxmlformats"): + dispatch_mock.assert_called_once_with( + file_content=file_content, + mime_type=mime_type, + unstructured_api_config=document_extractor_node._unstructured_api_config, + ) + if transfer_method == FileTransferMethod.REMOTE_URL: document_extractor_node._http_client.get.assert_called_once_with("https://example.com/file.txt") elif transfer_method == FileTransferMethod.LOCAL_FILE: @@ -439,24 +453,42 @@ def test_extract_text_from_file_routes_excel_inputs(document_extractor_node, ext file.extension = extension file.mime_type = mime_type - with ( - patch( - "graphon.nodes.document_extractor.node._download_file_content", - return_value=b"excel", - ), - patch( - "graphon.nodes.document_extractor.node._extract_text_from_excel", - return_value="excel text", - ) as mock_extract, + with patch( + "graphon.nodes.document_extractor.node._download_file_content", + return_value=b"excel", ): - result = _extract_text_from_file( - document_extractor_node.http_client, - file, - unstructured_api_config=document_extractor_node._unstructured_api_config, - ) + if extension: + with patch( + "graphon.nodes.document_extractor.node._extract_text_by_file_extension", + return_value="excel text", + ) as mock_extract: + result = _extract_text_from_file( + document_extractor_node.http_client, + file, + unstructured_api_config=document_extractor_node._unstructured_api_config, + ) + mock_extract.assert_called_once_with( + file_content=b"excel", + file_extension=extension, + unstructured_api_config=document_extractor_node._unstructured_api_config, + ) + else: + with patch( + "graphon.nodes.document_extractor.node._extract_text_by_mime_type", + return_value="excel text", + ) as mock_extract: + result = _extract_text_from_file( + document_extractor_node.http_client, + file, + unstructured_api_config=document_extractor_node._unstructured_api_config, + ) + mock_extract.assert_called_once_with( + file_content=b"excel", + mime_type=mime_type, + unstructured_api_config=document_extractor_node._unstructured_api_config, + ) assert result == "excel text" - mock_extract.assert_called_once_with(b"excel") def test_extract_text_from_file_rejects_missing_extension_and_mime_type(document_extractor_node): diff --git a/api/tests/unit_tests/core/workflow/nodes/test_if_else.py b/api/tests/unit_tests/core/workflow/nodes/test_if_else.py index aa9a1360b0..5965645c4f 100644 --- a/api/tests/unit_tests/core/workflow/nodes/test_if_else.py +++ b/api/tests/unit_tests/core/workflow/nodes/test_if_else.py @@ -29,7 +29,7 @@ def _build_if_else_node( node_id=str(uuid.uuid4()), graph_init_params=init_params, graph_runtime_state=graph_runtime_state, - config=node_data if isinstance(node_data, IfElseNodeData) else IfElseNodeData.model_validate(node_data), + data=node_data if isinstance(node_data, IfElseNodeData) else IfElseNodeData.model_validate(node_data), ) @@ -48,7 +48,10 @@ def test_execute_if_else_result_true(): ) # construct variable pool - pool = VariablePool(system_variables=build_system_variables(user_id="aaa", files=[]), user_inputs={}) + pool = VariablePool.from_bootstrap( + system_variables=build_system_variables(user_id="aaa", files=[]), + user_inputs={}, + ) pool.add(["start", "array_contains"], ["ab", "def"]) pool.add(["start", "array_not_contains"], ["ac", "def"]) pool.add(["start", "contains"], "cabcde") @@ -148,7 +151,7 @@ def test_execute_if_else_result_false(): ) # construct variable pool - pool = VariablePool( + pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id="aaa", files=[]), user_inputs={}, environment_variables=[], @@ -305,7 +308,7 @@ def test_execute_if_else_boolean_conditions(condition: Condition): ) # construct variable pool with boolean values - pool = VariablePool( + pool = VariablePool.from_bootstrap( system_variables=build_system_variables(files=[], user_id="aaa"), ) pool.add(["start", "bool_true"], True) @@ -359,7 +362,7 @@ def test_execute_if_else_boolean_false_conditions(): ) # construct variable pool with boolean values - pool = VariablePool( + pool = VariablePool.from_bootstrap( system_variables=build_system_variables(files=[], user_id="aaa"), ) pool.add(["start", "bool_true"], True) @@ -424,7 +427,7 @@ def test_execute_if_else_boolean_cases_structure(): ) # construct variable pool with boolean values - pool = VariablePool( + pool = VariablePool.from_bootstrap( system_variables=build_system_variables(files=[], user_id="aaa"), ) pool.add(["start", "bool_true"], True) diff --git a/api/tests/unit_tests/core/workflow/nodes/test_list_operator.py b/api/tests/unit_tests/core/workflow/nodes/test_list_operator.py index 465a4c0ff4..1b4cecc757 100644 --- a/api/tests/unit_tests/core/workflow/nodes/test_list_operator.py +++ b/api/tests/unit_tests/core/workflow/nodes/test_list_operator.py @@ -22,7 +22,7 @@ from graphon.variables import ArrayFileSegment def _build_list_operator_node(node_data: ListOperatorNodeData, graph_init_params) -> ListOperatorNode: return ListOperatorNode( node_id="test_node_id", - config=node_data, + data=node_data, graph_init_params=graph_init_params, graph_runtime_state=MagicMock(), ) diff --git a/api/tests/unit_tests/core/workflow/nodes/test_start_node_json_object.py b/api/tests/unit_tests/core/workflow/nodes/test_start_node_json_object.py index 5655f80737..f890f79511 100644 --- a/api/tests/unit_tests/core/workflow/nodes/test_start_node_json_object.py +++ b/api/tests/unit_tests/core/workflow/nodes/test_start_node_json_object.py @@ -31,7 +31,7 @@ def make_start_node(user_inputs, variables): return StartNode( node_id="start", - config=node_data, + data=node_data, graph_init_params=build_test_graph_init_params( workflow_id="wf", graph_config={}, @@ -260,7 +260,7 @@ def test_start_node_outputs_full_variable_pool_snapshot(): graph_runtime_state = GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter()) node = StartNode( node_id="start", - config=node_data, + data=node_data, graph_init_params=build_test_graph_init_params( workflow_id="wf", graph_config={}, diff --git a/api/tests/unit_tests/core/workflow/nodes/tool/test_tool_node.py b/api/tests/unit_tests/core/workflow/nodes/tool/test_tool_node.py index 284af68319..4d30746e5c 100644 --- a/api/tests/unit_tests/core/workflow/nodes/tool/test_tool_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/tool/test_tool_node.py @@ -15,16 +15,23 @@ from graphon.model_runtime.entities.llm_entities import LLMUsage from graphon.node_events import StreamChunkEvent, StreamCompletedEvent from graphon.nodes.tool.entities import ToolNodeData from graphon.nodes.tool_runtime_entities import ToolRuntimeHandle, ToolRuntimeMessage -from graphon.runtime import GraphRuntimeState, VariablePool +from graphon.runtime import GraphRuntimeState from graphon.variables.segments import ArrayFileSegment -from tests.workflow_test_utils import build_test_graph_init_params +from tests.workflow_test_utils import build_test_graph_init_params, build_test_variable_pool if TYPE_CHECKING: # pragma: no cover - imported for type checking only from graphon.nodes.tool.tool_node import ToolNode class _StubToolRuntime: - def get_runtime(self, *, node_id: str, node_data: Any, variable_pool: Any) -> ToolRuntimeHandle: + def get_runtime( + self, + *, + node_id: str, + node_data: Any, + variable_pool: Any, + node_execution_id: str | None = None, + ) -> ToolRuntimeHandle: raise NotImplementedError def get_runtime_parameters(self, *, tool_runtime: ToolRuntimeHandle) -> list[Any]: @@ -99,7 +106,7 @@ def tool_node(monkeypatch) -> ToolNode: call_depth=0, ) - variable_pool = VariablePool(system_variables=build_system_variables(user_id="user-id")) + variable_pool = build_test_variable_pool(variables=build_system_variables(user_id="user-id")) graph_runtime_state = GraphRuntimeState(variable_pool=variable_pool, start_at=0.0) config = graph_config["nodes"][0] @@ -110,7 +117,7 @@ def tool_node(monkeypatch) -> ToolNode: node = ToolNode( node_id="node-instance", - config=ToolNodeData.model_validate(config["data"]), + data=ToolNodeData.model_validate(config["data"]), graph_init_params=init_params, graph_runtime_state=graph_runtime_state, tool_file_manager_factory=tool_file_manager_factory, @@ -227,3 +234,22 @@ def test_image_link_messages_use_tool_file_id_metadata(tool_node: ToolNode): files_segment = completed_events[0].node_run_result.outputs["files"] assert isinstance(files_segment, ArrayFileSegment) assert files_segment.value == [file_obj] + + +def test_tool_node_passes_node_execution_id_when_runtime_accepts_it(tool_node: ToolNode): + runtime_handle = ToolRuntimeHandle(raw=object()) + tool_node._runtime.get_runtime = MagicMock(return_value=runtime_handle) + tool_node.ensure_execution_id = MagicMock(return_value="node-execution-id") + + result = tool_node._get_tool_runtime( + variable_pool=tool_node.graph_runtime_state.variable_pool, + node_execution_id="node-execution-id", + ) + + assert result is runtime_handle + tool_node._runtime.get_runtime.assert_called_once_with( + node_id="node-instance", + node_data=tool_node.node_data, + variable_pool=tool_node.graph_runtime_state.variable_pool, + node_execution_id="node-execution-id", + ) diff --git a/api/tests/unit_tests/core/workflow/nodes/tool/test_tool_node_runtime.py b/api/tests/unit_tests/core/workflow/nodes/tool/test_tool_node_runtime.py index 438af211f3..aece73ce8c 100644 --- a/api/tests/unit_tests/core/workflow/nodes/tool/test_tool_node_runtime.py +++ b/api/tests/unit_tests/core/workflow/nodes/tool/test_tool_node_runtime.py @@ -147,6 +147,69 @@ def test_get_runtime_converts_graph_provider_type_for_tool_manager(runtime: Dify assert workflow_tool.provider_type == CoreToolProviderType.BUILT_IN +def test_get_runtime_stores_parent_trace_context_for_workflow_tools( + runtime: DifyToolNodeRuntime, +) -> None: + variable_pool: VariablePool = build_test_variable_pool( + variables=build_system_variables( + conversation_id="conversation-id", + workflow_execution_id="workflow-run-id", + ) + ) + workflow_runtime = MagicMock() + workflow_runtime.runtime.runtime_parameters = {} + node_data = ToolNodeData.model_validate( + { + "type": "tool", + "title": "Tool", + "provider_id": "provider", + "provider_type": ToolProviderType.WORKFLOW, + "provider_name": "provider", + "tool_name": "lookup", + "tool_label": "Lookup", + "tool_configurations": {}, + "tool_parameters": {}, + } + ) + + with patch.object(ToolManager, "get_workflow_tool_runtime", return_value=workflow_runtime): + tool_runtime = runtime.get_runtime( + node_id="node-id", + node_data=node_data, + variable_pool=variable_pool, + node_execution_id="node-execution-id", + ) + + assert tool_runtime.raw.parent_trace_context.model_dump() == { + "parent_workflow_run_id": "workflow-run-id", + "parent_node_execution_id": "node-execution-id", + } + assert workflow_runtime.runtime.runtime_parameters == {} + + +def test_get_runtime_leaves_non_workflow_tool_runtime_parameters_unchanged( + runtime: DifyToolNodeRuntime, +) -> None: + variable_pool: VariablePool = build_test_variable_pool( + variables=build_system_variables( + conversation_id="conversation-id", + workflow_execution_id="workflow-run-id", + ) + ) + builtin_runtime = MagicMock() + builtin_runtime.runtime.runtime_parameters = {} + + with patch.object(ToolManager, "get_workflow_tool_runtime", return_value=builtin_runtime): + runtime.get_runtime( + node_id="node-id", + node_data=_build_tool_node_data(), + variable_pool=variable_pool, + node_execution_id="node-execution-id", + ) + + assert builtin_runtime.runtime.runtime_parameters == {} + + def test_get_runtime_parameters_reads_required_flags(runtime: DifyToolNodeRuntime) -> None: tool_runtime = ToolRuntimeHandle( raw=SimpleNamespace( diff --git a/api/tests/unit_tests/core/workflow/nodes/trigger_plugin/test_trigger_event_node.py b/api/tests/unit_tests/core/workflow/nodes/trigger_plugin/test_trigger_event_node.py index e3b5e3b591..c5ac8d2ce2 100644 --- a/api/tests/unit_tests/core/workflow/nodes/trigger_plugin/test_trigger_event_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/trigger_plugin/test_trigger_event_node.py @@ -44,7 +44,7 @@ def test_trigger_event_node_run_populates_trigger_info_metadata() -> None: init_params, runtime_state = _build_context(graph_config={}) node = TriggerEventNode( node_id="node-1", - config=_build_node_data(), + data=_build_node_data(), graph_init_params=init_params, graph_runtime_state=runtime_state, ) diff --git a/api/tests/unit_tests/core/workflow/nodes/webhook/test_webhook_file_conversion.py b/api/tests/unit_tests/core/workflow/nodes/webhook/test_webhook_file_conversion.py index 07d03bec05..fccb5ab1c3 100644 --- a/api/tests/unit_tests/core/workflow/nodes/webhook/test_webhook_file_conversion.py +++ b/api/tests/unit_tests/core/workflow/nodes/webhook/test_webhook_file_conversion.py @@ -52,7 +52,7 @@ def create_webhook_node( node = TriggerWebhookNode( node_id="webhook-node-1", - config=webhook_data, + data=webhook_data, graph_init_params=graph_init_params, graph_runtime_state=runtime_state, ) diff --git a/api/tests/unit_tests/core/workflow/nodes/webhook/test_webhook_node.py b/api/tests/unit_tests/core/workflow/nodes/webhook/test_webhook_node.py index b839490d3c..c5ae542d8b 100644 --- a/api/tests/unit_tests/core/workflow/nodes/webhook/test_webhook_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/webhook/test_webhook_node.py @@ -44,7 +44,7 @@ def create_webhook_node(webhook_data: WebhookData, variable_pool: VariablePool) ) node = TriggerWebhookNode( node_id="1", - config=webhook_data, + data=webhook_data, graph_init_params=graph_init_params, graph_runtime_state=runtime_state, ) diff --git a/api/tests/unit_tests/core/workflow/test_human_input_adapter.py b/api/tests/unit_tests/core/workflow/test_human_input_adapter.py index 8b5fceeb37..51049f8792 100644 --- a/api/tests/unit_tests/core/workflow/test_human_input_adapter.py +++ b/api/tests/unit_tests/core/workflow/test_human_input_adapter.py @@ -166,6 +166,71 @@ def test_adapt_node_data_for_graph_migrates_legacy_tool_configurations() -> None } +def test_adapt_node_data_for_graph_preserves_model_selector_top_level_configurations() -> None: + normalized = adapt_node_data_for_graph( + { + "type": BuiltinNodeTypes.TOOL, + "tool_configurations": { + "vision_llm_model": { + "type": "constant", + "value": "", + "provider": "langgenius/tongyi/tongyi", + "model": "qwen3-vl-plus", + "model_type": "llm", + "mode": "chat", + }, + }, + } + ) + + assert normalized["tool_configurations"] == {} + assert normalized["tool_parameters"] == { + "vision_llm_model": { + "type": "constant", + "value": { + "provider": "langgenius/tongyi/tongyi", + "model": "qwen3-vl-plus", + "model_type": "llm", + "mode": "chat", + }, + } + } + + +def test_adapt_node_data_for_graph_flattens_constant_model_selector_value() -> None: + normalized = adapt_node_data_for_graph( + { + "type": BuiltinNodeTypes.TOOL, + "tool_configurations": { + "tts_model": { + "type": "constant", + "value": { + "provider": "langgenius/tongyi/tongyi", + "model": "qwen3-tts-flash", + "model_type": "tts", + "language": "Chinese", + "voice": "Cherry", + }, + }, + }, + } + ) + + assert normalized["tool_configurations"] == {} + assert normalized["tool_parameters"] == { + "tts_model": { + "type": "constant", + "value": { + "provider": "langgenius/tongyi/tongyi", + "model": "qwen3-tts-flash", + "model_type": "tts", + "language": "Chinese", + "voice": "Cherry", + }, + } + } + + def test_adapt_node_config_for_graph_rewrites_nested_node_data() -> None: normalized = adapt_node_config_for_graph( { diff --git a/api/tests/unit_tests/core/workflow/test_node_factory.py b/api/tests/unit_tests/core/workflow/test_node_factory.py index 1418cdd87c..d6159e84d4 100644 --- a/api/tests/unit_tests/core/workflow/test_node_factory.py +++ b/api/tests/unit_tests/core/workflow/test_node_factory.py @@ -1,3 +1,4 @@ +from collections.abc import Mapping from types import SimpleNamespace from unittest.mock import MagicMock, patch, sentinel @@ -10,14 +11,21 @@ from core.workflow.nodes.knowledge_index import KNOWLEDGE_INDEX_NODE_TYPE from graphon.entities.base_node_data import BaseNodeData from graphon.enums import BuiltinNodeTypes, NodeType from graphon.nodes.code.entities import CodeLanguage +from graphon.nodes.llm.entities import LLMNodeData +from graphon.nodes.llm.node import LLMNode from graphon.variables.segments import StringSegment -def _assert_typed_node_config(config, *, node_id: str, node_type: NodeType, version: str = "1") -> None: +def _assert_constructor_node_data(data, *, node_id: str, node_type: NodeType, version: str = "1") -> None: _ = node_id - assert isinstance(config, BaseNodeData) - assert config.type == node_type - assert config.version == version + if isinstance(data, BaseNodeData): + assert data.type == node_type + assert data.version == version + return + + assert isinstance(data, Mapping) + assert data["type"] == node_type + assert data.get("version", "1") == version def _node_constructor(*, return_value): @@ -82,7 +90,7 @@ class TestFetchMemory: assert result is None - def test_returns_none_when_conversation_does_not_exist(self, monkeypatch): + def test_returns_none_when_conversation_does_not_exist(self, monkeypatch: pytest.MonkeyPatch): class FakeSelect: def where(self, *_args): return self @@ -113,7 +121,7 @@ class TestFetchMemory: assert result is None - def test_builds_token_buffer_memory_for_existing_conversation(self, monkeypatch): + def test_builds_token_buffer_memory_for_existing_conversation(self, monkeypatch: pytest.MonkeyPatch): conversation = sentinel.conversation memory = sentinel.memory @@ -183,7 +191,7 @@ class TestDifyGraphInitContext: class TestDefaultWorkflowCodeExecutor: - def test_execute_delegates_to_code_executor(self, monkeypatch): + def test_execute_delegates_to_code_executor(self, monkeypatch: pytest.MonkeyPatch): executor = node_factory.DefaultWorkflowCodeExecutor() execute_workflow_code_template = MagicMock(return_value={"answer": "ok"}) monkeypatch.setattr( @@ -213,7 +221,7 @@ class TestDefaultWorkflowCodeExecutor: class TestCodeExecutorJinja2TemplateRenderer: - def test_render_template_delegates_to_code_executor(self, monkeypatch): + def test_render_template_delegates_to_code_executor(self, monkeypatch: pytest.MonkeyPatch): renderer = workflow_template_rendering.CodeExecutorJinja2TemplateRenderer() execute_workflow_code_template = MagicMock(return_value={"result": "Hello workflow"}) monkeypatch.setattr( @@ -231,7 +239,7 @@ class TestCodeExecutorJinja2TemplateRenderer: inputs={"name": "workflow"}, ) - def test_render_template_wraps_code_execution_errors(self, monkeypatch): + def test_render_template_wraps_code_execution_errors(self, monkeypatch: pytest.MonkeyPatch): renderer = workflow_template_rendering.CodeExecutorJinja2TemplateRenderer() monkeypatch.setattr( workflow_template_rendering.CodeExecutor, @@ -428,7 +436,7 @@ class TestDifyNodeFactoryCreateNode: with pytest.raises(ValueError, match="No class mapping found for node type: missing"): factory.create_node({"id": "node-id", "data": {"type": "missing"}}) - def test_rejects_missing_class_mapping(self, monkeypatch, factory): + def test_rejects_missing_class_mapping(self, monkeypatch: pytest.MonkeyPatch, factory): monkeypatch.setattr( factory, "_resolve_node_class", @@ -438,7 +446,7 @@ class TestDifyNodeFactoryCreateNode: with pytest.raises(ValueError, match="No class mapping found for node type: start"): factory.create_node({"id": "node-id", "data": {"type": BuiltinNodeTypes.START}}) - def test_rejects_missing_latest_class(self, monkeypatch, factory): + def test_rejects_missing_latest_class(self, monkeypatch: pytest.MonkeyPatch, factory): monkeypatch.setattr( factory, "_resolve_node_class", @@ -448,7 +456,7 @@ class TestDifyNodeFactoryCreateNode: with pytest.raises(ValueError, match="No latest version class found for node type: start"): factory.create_node({"id": "node-id", "data": {"type": BuiltinNodeTypes.START}}) - def test_uses_version_specific_class_when_available(self, monkeypatch, factory): + def test_uses_version_specific_class_when_available(self, monkeypatch: pytest.MonkeyPatch, factory): matched_node = sentinel.matched_node latest_node_class = _node_constructor(return_value=sentinel.latest_node) matched_node_class = _node_constructor(return_value=matched_node) @@ -464,12 +472,14 @@ class TestDifyNodeFactoryCreateNode: matched_node_class.assert_called_once() kwargs = matched_node_class.call_args.kwargs assert kwargs["node_id"] == "node-id" - _assert_typed_node_config(kwargs["config"], node_id="node-id", node_type=BuiltinNodeTypes.START, version="9") + _assert_constructor_node_data(kwargs["data"], node_id="node-id", node_type=BuiltinNodeTypes.START, version="9") assert kwargs["graph_init_params"] is sentinel.graph_init_params assert kwargs["graph_runtime_state"] is factory.graph_runtime_state latest_node_class.assert_not_called() - def test_falls_back_to_latest_class_when_version_specific_mapping_is_missing(self, monkeypatch, factory): + def test_falls_back_to_latest_class_when_version_specific_mapping_is_missing( + self, monkeypatch: pytest.MonkeyPatch, factory + ): latest_node = sentinel.latest_node latest_node_class = _node_constructor(return_value=latest_node) monkeypatch.setattr( @@ -484,7 +494,7 @@ class TestDifyNodeFactoryCreateNode: latest_node_class.assert_called_once() kwargs = latest_node_class.call_args.kwargs assert kwargs["node_id"] == "node-id" - _assert_typed_node_config(kwargs["config"], node_id="node-id", node_type=BuiltinNodeTypes.START, version="9") + _assert_constructor_node_data(kwargs["data"], node_id="node-id", node_type=BuiltinNodeTypes.START, version="9") assert kwargs["graph_init_params"] is sentinel.graph_init_params assert kwargs["graph_runtime_state"] is factory.graph_runtime_state @@ -501,7 +511,7 @@ class TestDifyNodeFactoryCreateNode: (BuiltinNodeTypes.DOCUMENT_EXTRACTOR, "DocumentExtractorNode"), ], ) - def test_creates_specialized_nodes(self, monkeypatch, factory, node_type, constructor_name): + def test_creates_specialized_nodes(self, monkeypatch: pytest.MonkeyPatch, factory, node_type, constructor_name): created_node = object() constructor = _node_constructor(return_value=created_node) constructor._mock_name = constructor_name @@ -522,7 +532,7 @@ class TestDifyNodeFactoryCreateNode: assert result is created_node kwargs = constructor.call_args.kwargs assert kwargs["node_id"] == "node-id" - _assert_typed_node_config(kwargs["config"], node_id="node-id", node_type=node_type) + _assert_constructor_node_data(kwargs["data"], node_id="node-id", node_type=node_type) assert kwargs["graph_init_params"] is sentinel.graph_init_params assert kwargs["graph_runtime_state"] is factory.graph_runtime_state @@ -546,6 +556,133 @@ class TestDifyNodeFactoryCreateNode: assert kwargs["unstructured_api_config"] is sentinel.unstructured_api_config assert kwargs["http_client"] is sentinel.http_client + def test_build_llm_compatible_node_init_kwargs_preserves_structured_output_switch(self, factory): + node_data = LLMNodeData.model_validate( + { + "type": BuiltinNodeTypes.LLM, + "title": "LLM", + "model": {"provider": "provider", "name": "model", "mode": "chat", "completion_params": {}}, + "prompt_template": [{"role": "system", "text": "x"}], + "context": {"enabled": False, "variable_selector": []}, + "vision": {"enabled": False}, + "structured_output_enabled": True, + "structured_output": { + "schema": { + "type": "object", + "properties": {"type": {"type": "string"}}, + "required": ["type"], + } + }, + } + ) + wrapped_model_instance = sentinel.wrapped_model_instance + memory = sentinel.memory + factory._build_model_instance_for_llm_node = MagicMock(return_value=sentinel.model_instance) + factory._build_memory_for_llm_node = MagicMock(return_value=memory) + with patch.object(node_factory, "DifyPreparedLLM", return_value=wrapped_model_instance) as prepared_llm: + kwargs = factory._build_llm_compatible_node_init_kwargs( + node_class=sentinel.node_class, + node_data=node_data, + wrap_model_instance=True, + include_http_client=True, + include_llm_file_saver=True, + include_prompt_message_serializer=True, + include_retriever_attachment_loader=True, + include_jinja2_template_renderer=True, + ) + + assert node_data.structured_output_switch_on is True + assert node_data.structured_output_enabled is True + factory._build_model_instance_for_llm_node.assert_called_once_with(node_data) + factory._build_memory_for_llm_node.assert_called_once_with( + node_data=node_data, + model_instance=sentinel.model_instance, + ) + prepared_llm.assert_called_once_with(sentinel.model_instance) + assert kwargs["model_instance"] is wrapped_model_instance + + def test_create_node_passes_alias_preserving_llm_data_to_constructor(self, monkeypatch, factory): + created_node = object() + constructor = _node_constructor(return_value=created_node) + constructor.validate_node_data.side_effect = lambda node_data: LLMNodeData.model_validate( + node_data.model_dump(mode="python") if isinstance(node_data, BaseNodeData) else node_data + ) + monkeypatch.setattr(factory, "_resolve_node_class", MagicMock(return_value=constructor)) + monkeypatch.setattr(factory, "_build_llm_compatible_node_init_kwargs", MagicMock(return_value={})) + + node_config = { + "id": "llm-node-id", + "data": { + "type": BuiltinNodeTypes.LLM, + "title": "LLM", + "model": {"provider": "provider", "name": "model", "mode": "chat", "completion_params": {}}, + "prompt_template": [{"role": "system", "text": "x"}], + "context": {"enabled": False, "variable_selector": []}, + "vision": {"enabled": False}, + "structured_output_enabled": True, + "structured_output": { + "schema": { + "type": "object", + "properties": {"type": {"type": "string"}}, + "required": ["type"], + } + }, + }, + } + + factory.create_node(node_config) + + data = constructor.call_args.kwargs["data"] + assert isinstance(data, Mapping) + assert data["structured_output_enabled"] is True + assert "structured_output_switch_on" not in data + assert LLMNodeData.model_validate(data).structured_output_enabled is True + + def test_create_node_preserves_structured_output_switch_after_graphon_constructor(self, monkeypatch, factory): + factory.graph_init_params = SimpleNamespace( + workflow_id="workflow-id", + graph_config={}, + run_context={}, + call_depth=0, + ) + monkeypatch.setattr(factory, "_resolve_node_class", MagicMock(return_value=LLMNode)) + monkeypatch.setattr( + factory, + "_build_llm_compatible_node_init_kwargs", + MagicMock( + return_value={ + "model_instance": sentinel.model_instance, + "llm_file_saver": sentinel.llm_file_saver, + "prompt_message_serializer": sentinel.prompt_message_serializer, + } + ), + ) + + node_config = { + "id": "llm-node-id", + "data": { + "type": BuiltinNodeTypes.LLM, + "title": "LLM", + "model": {"provider": "provider", "name": "model", "mode": "chat", "completion_params": {}}, + "prompt_template": [{"role": "system", "text": "x"}], + "context": {"enabled": False, "variable_selector": []}, + "vision": {"enabled": False}, + "structured_output_enabled": True, + "structured_output": { + "schema": { + "type": "object", + "properties": {"type": {"type": "string"}}, + "required": ["type"], + } + }, + }, + } + + node = factory.create_node(node_config) + + assert node.node_data.structured_output_switch_on is True + assert node.node_data.structured_output_enabled is True + @pytest.mark.parametrize( ("node_type", "constructor_name", "expected_extra_kwargs"), [ @@ -581,7 +718,7 @@ class TestDifyNodeFactoryCreateNode: ) def test_creates_model_backed_nodes( self, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, factory, node_type, constructor_name, @@ -623,7 +760,7 @@ class TestDifyNodeFactoryCreateNode: constructor_kwargs = constructor.call_args.kwargs assert constructor_kwargs["node_id"] == "node-id" - _assert_typed_node_config(constructor_kwargs["config"], node_id="node-id", node_type=node_type) + _assert_constructor_node_data(constructor_kwargs["data"], node_id="node-id", node_type=node_type) assert constructor_kwargs["graph_init_params"] is sentinel.graph_init_params assert constructor_kwargs["graph_runtime_state"] is factory.graph_runtime_state assert constructor_kwargs["credentials_provider"] is sentinel.credentials_provider @@ -642,7 +779,7 @@ class TestDifyNodeFactoryModelInstance: factory._llm_model_factory = sentinel.model_factory return factory - def test_delegates_to_fetch_model_config(self, monkeypatch, factory): + def test_delegates_to_fetch_model_config(self, monkeypatch: pytest.MonkeyPatch, factory): node_data_model = SimpleNamespace( provider="provider", name="model", @@ -671,7 +808,7 @@ class TestDifyNodeFactoryModelInstance: model_factory=sentinel.model_factory, ) - def test_propagates_fetch_model_config_errors(self, monkeypatch, factory): + def test_propagates_fetch_model_config_errors(self, monkeypatch: pytest.MonkeyPatch, factory): fetch_model_config = MagicMock(side_effect=ValueError("broken model config")) monkeypatch.setattr(node_factory, "fetch_model_config", fetch_model_config) @@ -696,7 +833,7 @@ class TestDifyNodeFactoryMemory: assert result is None factory.graph_runtime_state.variable_pool.get.assert_not_called() - def test_uses_string_segment_conversation_id(self, monkeypatch, factory): + def test_uses_string_segment_conversation_id(self, monkeypatch: pytest.MonkeyPatch, factory): memory_config = sentinel.memory_config factory.graph_runtime_state.variable_pool.get.return_value = StringSegment(value="conversation-id") fetch_memory = MagicMock(return_value=sentinel.memory) @@ -716,7 +853,7 @@ class TestDifyNodeFactoryMemory: model_instance=sentinel.model_instance, ) - def test_ignores_non_string_segment_conversation_ids(self, monkeypatch, factory): + def test_ignores_non_string_segment_conversation_ids(self, monkeypatch: pytest.MonkeyPatch, factory): memory_config = sentinel.memory_config factory.graph_runtime_state.variable_pool.get.return_value = sentinel.segment fetch_memory = MagicMock(return_value=sentinel.memory) diff --git a/api/tests/unit_tests/core/workflow/test_node_mapping_bootstrap.py b/api/tests/unit_tests/core/workflow/test_node_mapping_bootstrap.py index d18fc262ef..2dd3953d9a 100644 --- a/api/tests/unit_tests/core/workflow/test_node_mapping_bootstrap.py +++ b/api/tests/unit_tests/core/workflow/test_node_mapping_bootstrap.py @@ -7,6 +7,17 @@ from pathlib import Path def test_moved_core_nodes_resolve_after_importing_production_entrypoints(): api_root = Path(__file__).resolve().parents[4] + + # `PYTHONSAFEPATH=1` enables Python's safe-path mode, which suppresses the + # usual implicit insertion of the working directory into `sys.path`. + # Set `PYTHONPATH` explicitly so this subprocess test stays deterministic in + # both CI and local shells that may export `PYTHONSAFEPATH`. + env = os.environ.copy() + existing_pythonpath = env.get("PYTHONPATH") + env["PYTHONPATH"] = ( + str(api_root) if not existing_pythonpath else os.pathsep.join([str(api_root), existing_pythonpath]) + ) + env["PYTHONSAFEPATH"] = "1" script = textwrap.dedent( """ from core.app.apps import workflow_app_runner @@ -34,7 +45,7 @@ def test_moved_core_nodes_resolve_after_importing_production_entrypoints(): completed = subprocess.run( [sys.executable, "-c", script], cwd=api_root, - env=os.environ.copy(), + env=env, capture_output=True, text=True, check=False, diff --git a/api/tests/unit_tests/core/workflow/test_node_runtime.py b/api/tests/unit_tests/core/workflow/test_node_runtime.py index 5a43369a1a..d2925fd1a8 100644 --- a/api/tests/unit_tests/core/workflow/test_node_runtime.py +++ b/api/tests/unit_tests/core/workflow/test_node_runtime.py @@ -22,6 +22,7 @@ from core.workflow.node_runtime import ( DifyPromptMessageSerializer, DifyRetrieverAttachmentLoader, DifyToolFileManager, + DifyToolNodeRuntime, apply_dify_debug_email_recipient, build_dify_llm_file_saver, resolve_dify_run_context, @@ -30,6 +31,7 @@ from graphon.file import FileTransferMethod, FileType from graphon.model_runtime.entities.common_entities import I18nObject from graphon.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType from graphon.nodes.human_input.entities import HumanInputNodeData +from graphon.nodes.tool.entities import ToolNodeData, ToolProviderType from tests.workflow_test_utils import build_test_run_context @@ -314,6 +316,81 @@ def test_dify_tool_file_manager_delegates_file_generator_lookup(monkeypatch: pyt get_file_generator.assert_called_once_with("tool-file-id") +def test_dify_tool_node_runtime_injects_outer_workflow_run_id_for_workflow_tools( + monkeypatch: pytest.MonkeyPatch, +) -> None: + runtime_tool = SimpleNamespace(runtime=SimpleNamespace(runtime_parameters={})) + get_runtime = MagicMock(return_value=runtime_tool) + monkeypatch.setattr(node_runtime.ToolManager, "get_workflow_tool_runtime", get_runtime) + monkeypatch.setattr( + node_runtime, + "get_system_text", + lambda _pool, key: ( + "outer-workflow-run-id" if key == node_runtime.SystemVariableKey.WORKFLOW_EXECUTION_ID else None + ), + ) + + runtime = node_runtime.DifyToolNodeRuntime(_build_run_context()) + node_data = ToolNodeData( + title="Workflow Tool Node", + desc=None, + provider_id="workflow-provider-id", + provider_type=ToolProviderType.WORKFLOW, + provider_name="workflow-provider", + tool_name="workflow-tool", + tool_label="Workflow Tool", + tool_configurations={}, + tool_parameters={}, + ) + + handle = runtime.get_runtime( + node_id="tool-node", + node_data=node_data, + variable_pool=object(), + node_execution_id="node-execution-id", + ) + + assert handle.raw.tool is runtime_tool + assert handle.raw.parent_trace_context.model_dump() == { + "parent_workflow_run_id": "outer-workflow-run-id", + "parent_node_execution_id": "node-execution-id", + } + assert runtime_tool.runtime.runtime_parameters == {} + get_runtime.assert_called_once() + + +def test_dify_tool_node_runtime_does_not_inject_outer_workflow_run_id_for_non_workflow_tools( + monkeypatch: pytest.MonkeyPatch, +) -> None: + runtime_tool = SimpleNamespace(runtime=SimpleNamespace(runtime_parameters={})) + get_runtime = MagicMock(return_value=runtime_tool) + monkeypatch.setattr(node_runtime.ToolManager, "get_workflow_tool_runtime", get_runtime) + monkeypatch.setattr(node_runtime, "get_system_text", lambda _pool, _key: None) + + runtime = node_runtime.DifyToolNodeRuntime(_build_run_context()) + node_data = ToolNodeData( + title="Builtin Tool Node", + desc=None, + provider_id="builtin-provider-id", + provider_type=ToolProviderType.BUILT_IN, + provider_name="builtin-provider", + tool_name="builtin-tool", + tool_label="Builtin Tool", + tool_configurations={}, + tool_parameters={}, + ) + + handle = runtime.get_runtime( + node_id="tool-node", + node_data=node_data, + variable_pool=object(), + ) + + assert handle.raw.tool is runtime_tool + assert "outer_workflow_run_id" not in runtime_tool.runtime.runtime_parameters + get_runtime.assert_called_once() + + def test_dify_human_input_runtime_builds_debug_repository(monkeypatch: pytest.MonkeyPatch) -> None: repository = MagicMock() repository_cls = MagicMock(return_value=repository) @@ -334,6 +411,41 @@ def test_dify_human_input_runtime_builds_debug_repository(monkeypatch: pytest.Mo ) +def test_dify_tool_runtime_spec_prefers_tool_parameters_for_runtime_form_values() -> None: + node_data = ToolNodeData( + provider_id="video-mixcut-agent", + provider_type=ToolProviderType.PLUGIN, + provider_name="sawyer-shi/video-mixcut-agent", + tool_name="mixcut", + tool_label="MixCut", + tool_configurations={"count": 2}, + tool_parameters={ + "vision_llm_model": { + "type": "constant", + "value": { + "provider": "langgenius/tongyi/tongyi", + "model": "qwen3-vl-plus", + "model_type": "llm", + }, + } + }, + ) + + spec = DifyToolNodeRuntime._build_tool_runtime_spec(node_data) + + assert spec.tool_configurations == { + "count": 2, + "vision_llm_model": { + "type": "constant", + "value": { + "provider": "langgenius/tongyi/tongyi", + "model": "qwen3-vl-plus", + "model_type": "llm", + }, + }, + } + + def test_dify_human_input_runtime_create_form_filters_debugger_delivery_methods() -> None: repository = MagicMock() repository.create_form.return_value = sentinel.form diff --git a/api/tests/unit_tests/core/workflow/test_variable_pool.py b/api/tests/unit_tests/core/workflow/test_variable_pool.py index 9dab38ed8e..0017cd8d3f 100644 --- a/api/tests/unit_tests/core/workflow/test_variable_pool.py +++ b/api/tests/unit_tests/core/workflow/test_variable_pool.py @@ -109,8 +109,8 @@ class TestVariablePool: assert pool.get([ENVIRONMENT_VARIABLE_NODE_ID, "env_var_1"]) is not None assert pool.get([CONVERSATION_VARIABLE_NODE_ID, "conv_var_1"]) is not None - def test_constructor_loads_legacy_bootstrap_kwargs(self): - pool = VariablePool( + def test_from_bootstrap_loads_legacy_bootstrap_kwargs(self): + pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id="test_user_id"), environment_variables=[StringVariable(name="env_var", value="env-value")], conversation_variables=[StringVariable(name="conv_var", value="conv-value")], diff --git a/api/tests/unit_tests/core/workflow/test_workflow_entry.py b/api/tests/unit_tests/core/workflow/test_workflow_entry.py index 041c5cc612..661882f013 100644 --- a/api/tests/unit_tests/core/workflow/test_workflow_entry.py +++ b/api/tests/unit_tests/core/workflow/test_workflow_entry.py @@ -19,7 +19,7 @@ from graphon.variables.variables import StringVariable @pytest.fixture(autouse=True) -def _mock_ssrf_head(monkeypatch): +def _mock_ssrf_head(monkeypatch: pytest.MonkeyPatch): """Avoid any real network requests during tests. factories.file_factory.remote.get_remote_file_info() uses ssrf_proxy.head @@ -55,7 +55,7 @@ class TestWorkflowEntry: def test_mapping_user_inputs_to_variable_pool_with_system_variables(self): """Test mapping system variables from user inputs to variable pool.""" # Initialize variable pool with system variables - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( user_id="test_user_id", app_id="test_app_id", @@ -128,7 +128,7 @@ class TestWorkflowEntry: return NodeConfigDictAdapter.validate_python(node_config) workflow = StubWorkflow() - variable_pool = VariablePool(system_variables=default_system_variables(), user_inputs={}) + variable_pool = VariablePool.from_bootstrap(system_variables=default_system_variables(), user_inputs={}) expected_limits = CodeNodeLimits( max_string_length=dify_config.CODE_MAX_STRING_LENGTH, max_number=dify_config.CODE_MAX_NUMBER, @@ -157,7 +157,7 @@ class TestWorkflowEntry: """Test mapping environment variables from user inputs to variable pool.""" # Initialize variable pool with environment variables env_var = StringVariable(name="API_KEY", value="existing_key") - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), environment_variables=[env_var], user_inputs={}, @@ -198,7 +198,7 @@ class TestWorkflowEntry: """Test mapping conversation variables from user inputs to variable pool.""" # Initialize variable pool with conversation variables conv_var = StringVariable(name="last_message", value="Hello") - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), conversation_variables=[conv_var], user_inputs={}, @@ -239,7 +239,7 @@ class TestWorkflowEntry: def test_mapping_user_inputs_to_variable_pool_with_regular_variables(self): """Test mapping regular node variables from user inputs to variable pool.""" # Initialize empty variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -281,7 +281,7 @@ class TestWorkflowEntry: def test_mapping_user_inputs_with_file_handling(self): """Test mapping file inputs from user inputs to variable pool.""" - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -340,7 +340,7 @@ class TestWorkflowEntry: def test_mapping_user_inputs_missing_variable_error(self): """Test that mapping raises error when required variable is missing.""" - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -366,7 +366,7 @@ class TestWorkflowEntry: def test_mapping_user_inputs_with_alternative_key_format(self): """Test mapping with alternative key format (without node prefix).""" - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -396,7 +396,7 @@ class TestWorkflowEntry: def test_mapping_user_inputs_with_complex_selectors(self): """Test mapping with complex node variable keys.""" - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -432,7 +432,7 @@ class TestWorkflowEntry: def test_mapping_user_inputs_invalid_node_variable(self): """Test that mapping handles invalid node variable format.""" - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -463,7 +463,7 @@ class TestWorkflowEntry: env_var = StringVariable(name="API_KEY", value="existing_key") conv_var = StringVariable(name="session_id", value="session123") - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( user_id="test_user", app_id="test_app", diff --git a/api/tests/unit_tests/core/workflow/test_workflow_entry_helpers.py b/api/tests/unit_tests/core/workflow/test_workflow_entry_helpers.py index 270d0bf90d..a57cdd1337 100644 --- a/api/tests/unit_tests/core/workflow/test_workflow_entry_helpers.py +++ b/api/tests/unit_tests/core/workflow/test_workflow_entry_helpers.py @@ -7,7 +7,6 @@ import pytest from core.app.apps.exc import GenerateTaskStoppedError from core.app.entities.app_invoke_entities import InvokeFrom, UserFrom -from core.model_manager import ModelInstance from core.workflow import workflow_entry from core.workflow.system_variables import default_system_variables from graphon.entities.base_node_data import BaseNodeData @@ -16,10 +15,12 @@ from graphon.errors import WorkflowNodeRunFailedError from graphon.file import File, FileTransferMethod, FileType from graphon.graph import Graph from graphon.graph_events import GraphRunFailedEvent -from graphon.model_runtime.entities.llm_entities import LLMUsage +from graphon.model_runtime.entities.llm_entities import LLMMode, LLMUsage from graphon.node_events import NodeRunResult from graphon.nodes import BuiltinNodeTypes from graphon.nodes.base.node import Node +from graphon.nodes.llm.entities import ContextConfig, LLMNodeData, ModelConfig +from graphon.nodes.question_classifier.entities import QuestionClassifierNodeData from graphon.runtime import ChildGraphNotFoundError, VariablePool from graphon.variables.variables import StringVariable from tests.workflow_test_utils import build_test_graph_init_params, build_test_variable_pool @@ -29,9 +30,30 @@ def _build_typed_node_config(node_type: NodeType): return {"id": "node-id", "data": BaseNodeData(type=node_type)} -def _build_wrapped_model_instance() -> tuple[SimpleNamespace, ModelInstance]: - raw_model_instance = ModelInstance.__new__(ModelInstance) - return SimpleNamespace(_model_instance=raw_model_instance), raw_model_instance +def _build_model_config(*, provider: str = "openai", model_name: str = "gpt-4o") -> ModelConfig: + return ModelConfig(provider=provider, name=model_name, mode=LLMMode.CHAT) + + +def _build_llm_node_data(*, provider: str = "openai", model_name: str = "gpt-4o") -> LLMNodeData: + return LLMNodeData( + type=BuiltinNodeTypes.LLM, + title="Child Model", + model=_build_model_config(provider=provider, model_name=model_name), + prompt_template=[], + context=ContextConfig(enabled=False), + ) + + +def _build_question_classifier_node_data( + *, provider: str = "openai", model_name: str = "gpt-4o" +) -> QuestionClassifierNodeData: + return QuestionClassifierNodeData( + type=BuiltinNodeTypes.QUESTION_CLASSIFIER, + title="Child Model", + query_variable_selector=["sys", "query"], + model=_build_model_config(provider=provider, model_name=model_name), + classes=[], + ) class _FakeModelNodeMixin: @@ -40,22 +62,26 @@ class _FakeModelNodeMixin: return "1" def post_init(self) -> None: - self.model_instance, self.raw_model_instance = _build_wrapped_model_instance() + self.model_instance = SimpleNamespace(provider="stale-provider", model_name="stale-model") self.usage_snapshot = LLMUsage.empty_usage() self.usage_snapshot.total_tokens = 1 def _run(self) -> NodeRunResult: return NodeRunResult( status=WorkflowNodeExecutionStatus.SUCCEEDED, + inputs={ + "model_provider": self.node_data.model.provider, + "model_name": self.node_data.model.name, + }, llm_usage=self.usage_snapshot, ) -class _FakeLLMNode(_FakeModelNodeMixin, Node[BaseNodeData]): +class _FakeLLMNode(_FakeModelNodeMixin, Node[LLMNodeData]): node_type = BuiltinNodeTypes.LLM -class _FakeQuestionClassifierNode(_FakeModelNodeMixin, Node[BaseNodeData]): +class _FakeQuestionClassifierNode(_FakeModelNodeMixin, Node[QuestionClassifierNodeData]): node_type = BuiltinNodeTypes.QUESTION_CLASSIFIER @@ -75,7 +101,7 @@ class TestWorkflowChildEngineBuilder: assert result is expected def test_build_child_engine_raises_when_root_node_is_missing(self): - builder = workflow_entry._WorkflowChildEngineBuilder() + builder = workflow_entry._WorkflowChildEngineBuilder(tenant_id="tenant-id") graph_init_params = SimpleNamespace(graph_config={"nodes": []}) parent_graph_runtime_state = SimpleNamespace( execution_context=sentinel.execution_context, @@ -92,7 +118,7 @@ class TestWorkflowChildEngineBuilder: ) def test_build_child_engine_constructs_graph_engine_with_quota_layer_only(self): - builder = workflow_entry._WorkflowChildEngineBuilder() + builder = workflow_entry._WorkflowChildEngineBuilder(tenant_id="tenant-id") graph_init_params = SimpleNamespace(graph_config={"nodes": [{"id": "root"}]}) parent_graph_runtime_state = SimpleNamespace( execution_context=sentinel.execution_context, @@ -114,7 +140,7 @@ class TestWorkflowChildEngineBuilder: patch.object(workflow_entry, "GraphEngine", return_value=child_engine) as graph_engine_cls, patch.object(workflow_entry, "GraphEngineConfig", return_value=sentinel.graph_engine_config), patch.object(workflow_entry, "InMemoryChannel", return_value=sentinel.command_channel), - patch.object(workflow_entry, "LLMQuotaLayer", return_value=sentinel.llm_quota_layer), + patch.object(workflow_entry, "LLMQuotaLayer", return_value=sentinel.llm_quota_layer) as llm_quota_layer_cls, ): result = builder.build_child_engine( workflow_id="workflow-id", @@ -147,11 +173,12 @@ class TestWorkflowChildEngineBuilder: config=sentinel.graph_engine_config, child_engine_builder=builder, ) + llm_quota_layer_cls.assert_called_once_with(tenant_id="tenant-id") assert child_engine.layer.call_args_list == [((sentinel.llm_quota_layer,), {})] @pytest.mark.parametrize("node_cls", [_FakeLLMNode, _FakeQuestionClassifierNode]) def test_build_child_engine_runs_llm_quota_layer_for_child_model_nodes(self, node_cls): - builder = workflow_entry._WorkflowChildEngineBuilder() + builder = workflow_entry._WorkflowChildEngineBuilder(tenant_id="tenant-id") graph_init_params = build_test_graph_init_params( graph_config={"nodes": [{"id": "root"}], "edges": []}, ) @@ -163,12 +190,10 @@ class TestWorkflowChildEngineBuilder: def build_graph(*, graph_config, node_factory, root_node_id): _ = graph_config + node_data = _build_llm_node_data() if node_cls is _FakeLLMNode else _build_question_classifier_node_data() node = node_cls( node_id=root_node_id, - config=BaseNodeData( - type=node_cls.node_type, - title="Child Model", - ), + data=node_data, graph_init_params=node_factory.graph_init_params, graph_runtime_state=node_factory.graph_runtime_state, ) @@ -191,8 +216,8 @@ class TestWorkflowChildEngineBuilder: ), ), patch.object(workflow_entry.Graph, "init", side_effect=build_graph), - patch("core.app.workflow.layers.llm_quota.ensure_llm_quota_available") as ensure_quota, - patch("core.app.workflow.layers.llm_quota.deduct_llm_quota") as deduct_quota, + patch("core.app.workflow.layers.llm_quota.ensure_llm_quota_available_for_model") as ensure_quota, + patch("core.app.workflow.layers.llm_quota.deduct_llm_quota_for_model") as deduct_quota, ): child_engine = builder.build_child_engine( workflow_id="workflow-id", @@ -203,10 +228,15 @@ class TestWorkflowChildEngineBuilder: list(child_engine.run()) node = created_node["node"] - ensure_quota.assert_called_once_with(model_instance=node.raw_model_instance) + ensure_quota.assert_called_once_with( + tenant_id="tenant-id", + provider=node.node_data.model.provider, + model=node.node_data.model.name, + ) deduct_quota.assert_called_once_with( - tenant_id="tenant", - model_instance=node.raw_model_instance, + tenant_id="tenant-id", + provider=node.node_data.model.provider, + model=node.node_data.model.name, usage=node.usage_snapshot, ) @@ -252,7 +282,7 @@ class TestWorkflowEntryInit: "ExecutionLimitsLayer", return_value=execution_limits_layer, ) as execution_limits_layer_cls, - patch.object(workflow_entry, "LLMQuotaLayer", return_value=llm_quota_layer), + patch.object(workflow_entry, "LLMQuotaLayer", return_value=llm_quota_layer) as llm_quota_layer_cls, patch.object(workflow_entry, "ObservabilityLayer", return_value=observability_layer), ): entry = workflow_entry.WorkflowEntry( @@ -291,6 +321,7 @@ class TestWorkflowEntryInit: max_steps=workflow_entry.dify_config.WORKFLOW_MAX_EXECUTION_STEPS, max_time=workflow_entry.dify_config.WORKFLOW_MAX_EXECUTION_TIME, ) + llm_quota_layer_cls.assert_called_once_with(tenant_id="tenant-id") assert graph_engine.layer.call_args_list == [ ((debug_layer,), {}), ((execution_limits_layer,), {}), @@ -334,7 +365,7 @@ class TestWorkflowEntrySingleStepRun: def extract_variable_selector_to_variable_mapping(**_kwargs): return {} - variable_pool = VariablePool(system_variables=default_system_variables(), user_inputs={}) + variable_pool = VariablePool.from_bootstrap(system_variables=default_system_variables(), user_inputs={}) variable_loader = MagicMock() variable_loader.load_variables.return_value = [ StringVariable( @@ -603,7 +634,7 @@ class TestWorkflowEntryHelpers: user_inputs={}, ) - def test_run_free_node_rejects_missing_node_class(self, monkeypatch): + def test_run_free_node_rejects_missing_node_class(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( workflow_entry, "resolve_workflow_node_class", @@ -619,7 +650,9 @@ class TestWorkflowEntryHelpers: user_inputs={}, ) - def test_run_free_node_uses_empty_mapping_when_selector_extraction_is_not_implemented(self, monkeypatch): + def test_run_free_node_uses_empty_mapping_when_selector_extraction_is_not_implemented( + self, monkeypatch: pytest.MonkeyPatch + ): class FakeNodeClass: @staticmethod def extract_variable_selector_to_variable_mapping(**_kwargs): @@ -707,7 +740,7 @@ class TestWorkflowEntryHelpers: tenant_id="tenant-id", ) - def test_run_free_node_wraps_execution_failures(self, monkeypatch): + def test_run_free_node_wraps_execution_failures(self, monkeypatch: pytest.MonkeyPatch): class FakeNodeClass: @staticmethod def extract_variable_selector_to_variable_mapping(**_kwargs): diff --git a/api/tests/unit_tests/events/test_update_provider_when_message_created.py b/api/tests/unit_tests/events/test_update_provider_when_message_created.py new file mode 100644 index 0000000000..9cb8ca7854 --- /dev/null +++ b/api/tests/unit_tests/events/test_update_provider_when_message_created.py @@ -0,0 +1,130 @@ +from types import SimpleNamespace +from unittest.mock import patch +from uuid import uuid4 + +from sqlalchemy import create_engine, select + +from core.app.entities.app_invoke_entities import ChatAppGenerateEntity +from core.entities.provider_entities import ProviderQuotaType, QuotaUnit +from events.event_handlers import update_provider_when_message_created +from models import TenantCreditPool +from models.provider import ProviderType + + +def test_message_created_trial_credit_accounting_does_not_raise_when_balance_is_insufficient() -> None: + engine = create_engine("sqlite:///:memory:") + TenantCreditPool.__table__.create(engine) + tenant_id = str(uuid4()) + pool_id = str(uuid4()) + with engine.begin() as connection: + connection.execute( + TenantCreditPool.__table__.insert(), + { + "id": pool_id, + "tenant_id": tenant_id, + "pool_type": ProviderQuotaType.TRIAL, + "quota_limit": 10, + "quota_used": 9, + }, + ) + + system_configuration = SimpleNamespace( + current_quota_type=ProviderQuotaType.TRIAL, + quota_configurations=[ + SimpleNamespace( + quota_type=ProviderQuotaType.TRIAL, + quota_unit=QuotaUnit.TOKENS, + quota_limit=10, + ) + ], + ) + application_generate_entity = ChatAppGenerateEntity.model_construct( + app_config=SimpleNamespace(tenant_id=tenant_id), + model_conf=SimpleNamespace( + provider="openai", + model="gpt-4o", + provider_model_bundle=SimpleNamespace( + configuration=SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=system_configuration, + ) + ), + ), + ) + message = SimpleNamespace(message_tokens=2, answer_tokens=1) + + with ( + patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)), + patch.object(update_provider_when_message_created, "_execute_provider_updates"), + ): + update_provider_when_message_created.handle( + sender=message, + application_generate_entity=application_generate_entity, + ) + + with engine.connect() as connection: + quota_used = connection.scalar(select(TenantCreditPool.quota_used).where(TenantCreditPool.id == pool_id)) + + assert quota_used == 10 + + +def test_message_created_paid_credit_accounting_uses_paid_pool() -> None: + tenant_id = str(uuid4()) + system_configuration = SimpleNamespace( + current_quota_type=ProviderQuotaType.PAID, + quota_configurations=[ + SimpleNamespace( + quota_type=ProviderQuotaType.PAID, + quota_unit=QuotaUnit.TOKENS, + quota_limit=10, + ) + ], + ) + application_generate_entity = ChatAppGenerateEntity.model_construct( + app_config=SimpleNamespace(tenant_id=tenant_id), + model_conf=SimpleNamespace( + provider="openai", + model="gpt-4o", + provider_model_bundle=SimpleNamespace( + configuration=SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=system_configuration, + ) + ), + ), + ) + message = SimpleNamespace(message_tokens=2, answer_tokens=1) + + with ( + patch.object(update_provider_when_message_created, "_deduct_credit_pool_quota_capped") as mock_deduct, + patch.object(update_provider_when_message_created, "_execute_provider_updates"), + ): + update_provider_when_message_created.handle( + sender=message, + application_generate_entity=application_generate_entity, + ) + + mock_deduct.assert_called_once_with( + tenant_id=tenant_id, + credits_required=3, + pool_type="paid", + ) + + +def test_capped_credit_pool_accounting_skips_exhaustion_warning_when_full_amount_is_deducted(caplog) -> None: + with patch( + "services.credit_pool_service.CreditPoolService.deduct_credits_capped", + return_value=3, + ) as mock_deduct: + update_provider_when_message_created._deduct_credit_pool_quota_capped( + tenant_id="tenant-id", + credits_required=3, + pool_type="trial", + ) + + mock_deduct.assert_called_once_with( + tenant_id="tenant-id", + credits_required=3, + pool_type="trial", + ) + assert "Credit pool exhausted during message-created accounting" not in caplog.text diff --git a/api/tests/unit_tests/extensions/test_ext_request_logging.py b/api/tests/unit_tests/extensions/test_ext_request_logging.py index dcb457c806..03479686bb 100644 --- a/api/tests/unit_tests/extensions/test_ext_request_logging.py +++ b/api/tests/unit_tests/extensions/test_ext_request_logging.py @@ -71,7 +71,7 @@ def enable_request_logging(monkeypatch: pytest.MonkeyPatch): class TestRequestLoggingExtension: def test_receiver_should_not_be_invoked_if_configuration_is_disabled( self, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, mock_request_receiver, mock_response_receiver, ): @@ -266,7 +266,9 @@ class TestResponseUnmodified: class TestRequestFinishedInfoAccessLine: - def test_info_access_log_includes_method_path_status_duration_trace_id(self, monkeypatch, caplog): + def test_info_access_log_includes_method_path_status_duration_trace_id( + self, monkeypatch: pytest.MonkeyPatch, caplog + ): """Ensure INFO access line contains expected fields with computed duration and trace id.""" app = _get_test_app() # Push a real request context so flask.request and g are available @@ -299,7 +301,7 @@ class TestRequestFinishedInfoAccessLine: assert "123.456" in msg # rounded to 3 decimals assert "trace-xyz" in msg - def test_info_access_log_uses_dash_without_start_timestamp(self, monkeypatch, caplog): + def test_info_access_log_uses_dash_without_start_timestamp(self, monkeypatch: pytest.MonkeyPatch, caplog): app = _get_test_app() with app.test_request_context("/bar", method="POST"): # No g.__request_started_ts set -> duration should be '-' diff --git a/api/tests/unit_tests/extensions/test_pubsub_channel.py b/api/tests/unit_tests/extensions/test_pubsub_channel.py index 926c406ad4..24bbf55cb3 100644 --- a/api/tests/unit_tests/extensions/test_pubsub_channel.py +++ b/api/tests/unit_tests/extensions/test_pubsub_channel.py @@ -1,10 +1,12 @@ +import pytest + from configs import dify_config from extensions import ext_redis from libs.broadcast_channel.redis.channel import BroadcastChannel as RedisBroadcastChannel from libs.broadcast_channel.redis.sharded_channel import ShardedRedisBroadcastChannel -def test_get_pubsub_broadcast_channel_defaults_to_pubsub(monkeypatch): +def test_get_pubsub_broadcast_channel_defaults_to_pubsub(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "pubsub") monkeypatch.setattr(ext_redis, "_pubsub_redis_client", object()) @@ -13,7 +15,7 @@ def test_get_pubsub_broadcast_channel_defaults_to_pubsub(monkeypatch): assert isinstance(channel, RedisBroadcastChannel) -def test_get_pubsub_broadcast_channel_sharded(monkeypatch): +def test_get_pubsub_broadcast_channel_sharded(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "sharded") monkeypatch.setattr(ext_redis, "_pubsub_redis_client", object()) diff --git a/api/tests/unit_tests/factories/test_file_factory.py b/api/tests/unit_tests/factories/test_file_factory.py index 5b105d6084..293be925ae 100644 --- a/api/tests/unit_tests/factories/test_file_factory.py +++ b/api/tests/unit_tests/factories/test_file_factory.py @@ -1,8 +1,11 @@ import re +from unittest.mock import MagicMock import pytest +from factories.file_factory import builders from factories.file_factory.remote import extract_filename, get_remote_file_info +from graphon.file import FileTransferMethod class _FakeResponse: @@ -230,3 +233,153 @@ class TestExtractFilename: "http://example.com/", 'attachment; filename="file%20with%20quotes%20%26%20encoding.txt"' ) assert result == "file with quotes & encoding.txt" + + def test_url_with_query_string(self): + """Test that query strings are stripped from URL basename.""" + result = extract_filename("http://example.com/path/file.txt?signature=abc123&expires=12345", None) + assert result == "file.txt" + + def test_url_with_hash_fragment(self): + """Test that hash fragments are stripped from URL basename.""" + result = extract_filename("http://example.com/path/file.txt#section", None) + assert result == "file.txt" + + def test_url_with_query_and_fragment(self): + """Test that both query strings and hash fragments are stripped.""" + result = extract_filename("http://example.com/path/file.txt?token=xyz#section", None) + assert result == "file.txt" + + def test_signed_url_preserves_filename(self): + """Test that signed URL parameters don't affect filename extraction.""" + result = extract_filename( + "http://storage.example.com/bucket/documents/report.pdf?AWSAccessKeyId=xxx&Signature=yyy&Expires=12345", + None, + ) + assert result == "report.pdf" + + def test_percent_encoded_filename_with_query_string(self): + """Test percent-encoded filename with query string is decoded correctly.""" + result = extract_filename("http://example.com/path/my%20file.txt?download=true", None) + assert result == "my file.txt" + + def test_percent_encoded_filename_with_fragment(self): + """Test percent-encoded filename with fragment is decoded correctly.""" + result = extract_filename("http://example.com/path/my%20file.txt#page=1", None) + assert result == "my file.txt" + + def test_complex_percent_encoding_with_query(self): + """Test complex percent-encoded filename with query parameters.""" + result = extract_filename("http://example.com/docs/%E4%B8%AD%E6%96%87%E6%96%87%E4%BB%B6.pdf?v=1", None) + assert result == "中文文件.pdf" + + def test_url_with_special_chars_in_query(self): + """Test that special characters in query string don't affect filename.""" + result = extract_filename("http://example.com/file.bin?name=test&path=/some/path", None) + assert result == "file.bin" + + def test_malformed_percent_encoding_safe_fallback(self): + """Test that malformed percent-encoding is handled safely.""" + result = extract_filename("http://example.com/path/file%20name%GG.txt?x=1", None) + # %GG is invalid, should be replaced with replacement character + + assert "file" in result + assert ".txt" in result + + def test_empty_path_with_query_returns_none(self): + """Test that empty path with query string returns None.""" + result = extract_filename("http://example.com/?query=value", None) + assert result is None + + def test_path_only_with_query_string(self): + """Test bare path (not full URL) with query string.""" + result = extract_filename("/path/to/file.txt?extra=params", None) + assert result == "file.txt" + + +class TestBuildFromDatasourceFile: + """Tests for _build_from_datasource_file extension handling.""" + + @staticmethod + def _patch_session(monkeypatch: pytest.MonkeyPatch, datasource_file): + """Stub session_factory.create_session() so it returns the given UploadFile-shaped record.""" + session = MagicMock() + session.scalar.return_value = datasource_file + ctx = MagicMock() + ctx.__enter__ = MagicMock(return_value=session) + ctx.__exit__ = MagicMock(return_value=False) + monkeypatch.setattr(builders.session_factory, "create_session", lambda: ctx) + + def _make_datasource_file(self, *, key: str, mime_type: str = "text/csv"): + f = MagicMock() + f.id = "file-id" + f.key = key + f.name = key.split("/")[-1] + f.mime_type = mime_type + f.size = 123 + f.source_url = f"https://example.com/{key}" + return f + + def test_extension_passed_without_doubled_dot(self, monkeypatch: pytest.MonkeyPatch): + """Regression: standardize_file_type must receive the extension exactly once-prefixed. + + Previously the call was ``standardize_file_type(extension="." + extension, ...)`` while + ``extension`` already had a leading dot, producing ``"..csv"``. The mitigating + ``lstrip(".")`` inside ``standardize_file_type`` masked the bug from end users, but the + argument shape itself was wrong and showed up in any caller that didn't strip dots. + """ + captured: dict = {} + + def fake_standardize(*, extension: str = "", mime_type: str = ""): + from graphon.file import FileType + + captured["extension"] = extension + captured["mime_type"] = mime_type + return FileType.DOCUMENT + + monkeypatch.setattr(builders, "standardize_file_type", fake_standardize) + + datasource_file = self._make_datasource_file(key="folder/data.csv", mime_type="text/csv") + self._patch_session(monkeypatch, datasource_file) + + access_controller = MagicMock() + access_controller.apply_upload_file_filters = lambda stmt: stmt + + file = builders._build_from_datasource_file( + mapping={"datasource_file_id": "file-id", "transfer_method": "datasource_file"}, + tenant_id="tenant-id", + transfer_method=FileTransferMethod.DATASOURCE_FILE, + access_controller=access_controller, + ) + + assert captured["extension"] == ".csv", ( + f"standardize_file_type received {captured['extension']!r}; expected single-dot '.csv'" + ) + assert captured["mime_type"] == "text/csv" + assert file.extension == ".csv" + + def test_extension_falls_back_to_bin_when_key_has_no_dot(self, monkeypatch: pytest.MonkeyPatch): + captured: dict = {} + + def fake_standardize(*, extension: str = "", mime_type: str = ""): + from graphon.file import FileType + + captured["extension"] = extension + return FileType.CUSTOM + + monkeypatch.setattr(builders, "standardize_file_type", fake_standardize) + + datasource_file = self._make_datasource_file(key="dotless-key", mime_type="application/octet-stream") + self._patch_session(monkeypatch, datasource_file) + + access_controller = MagicMock() + access_controller.apply_upload_file_filters = lambda stmt: stmt + + file = builders._build_from_datasource_file( + mapping={"datasource_file_id": "file-id", "transfer_method": "datasource_file"}, + tenant_id="tenant-id", + transfer_method=FileTransferMethod.DATASOURCE_FILE, + access_controller=access_controller, + ) + + assert captured["extension"] == ".bin" + assert file.extension == ".bin" diff --git a/api/tests/unit_tests/factories/test_file_validation.py b/api/tests/unit_tests/factories/test_file_validation.py new file mode 100644 index 0000000000..61337fcf10 --- /dev/null +++ b/api/tests/unit_tests/factories/test_file_validation.py @@ -0,0 +1,159 @@ +"""Unit tests for is_file_valid_with_config.""" + +from __future__ import annotations + +import pytest + +from factories.file_factory.validation import is_file_valid_with_config +from graphon.file import FileTransferMethod, FileType, FileUploadConfig + + +def _validate( + *, + input_file_type: str, + file_extension: str = ".png", + file_transfer_method: FileTransferMethod = FileTransferMethod.LOCAL_FILE, + config: FileUploadConfig, +) -> bool: + return is_file_valid_with_config( + input_file_type=input_file_type, + file_extension=file_extension, + file_transfer_method=file_transfer_method, + config=config, + ) + + +@pytest.mark.parametrize( + ("input_file_type", "file_extension", "allowed_file_types", "allowed_file_extensions", "expected"), + [ + # round-1 happy path: literal "custom" mapping, ext whitelisted + ("custom", ".png", [FileType.CUSTOM], [".png"], True), + # round-2 replay: MessageFile.type is the resolved type, but config still allows CUSTOM + ("image", ".png", [FileType.CUSTOM], [".png"], True), + ("document", ".pdf", [FileType.CUSTOM], [".pdf"], True), + # mixed bucket [IMAGE, CUSTOM]: document falls into CUSTOM bucket via extension + ("document", ".pdf", [FileType.IMAGE, FileType.CUSTOM], [".pdf"], True), + ("document", ".exe", [FileType.IMAGE, FileType.CUSTOM], [".pdf"], False), + ("image", ".jpg", [FileType.IMAGE], [], True), + ("video", ".mp4", [FileType.IMAGE, FileType.DOCUMENT], [], False), + ("custom", ".exe", [FileType.CUSTOM], [".png"], False), + # empty allowed_file_types == no type restriction + ("video", ".mp4", [], [], True), + ], +) +def test_bucket_semantics(input_file_type, file_extension, allowed_file_types, allowed_file_extensions, expected): + config = FileUploadConfig( + allowed_file_types=allowed_file_types, + allowed_file_extensions=allowed_file_extensions, + ) + assert _validate(input_file_type=input_file_type, file_extension=file_extension, config=config) is expected + + +@pytest.mark.parametrize("whitelist_entry", [".png", ".PNG", "png", "PNG", " .Png ", "PnG"]) +def test_extension_match_is_case_and_dot_insensitive(whitelist_entry): + config = FileUploadConfig( + allowed_file_types=[FileType.CUSTOM], + allowed_file_extensions=[whitelist_entry], + ) + assert _validate(input_file_type="custom", file_extension=".png", config=config) is True + + +def test_extension_mismatch_still_rejected_after_normalization(): + config = FileUploadConfig( + allowed_file_types=[FileType.CUSTOM], + allowed_file_extensions=[".png", ".jpg"], + ) + assert _validate(input_file_type="custom", file_extension=".pdf", config=config) is False + + +def test_mixed_case_whitelist_replicating_real_user_config(): + config = FileUploadConfig( + allowed_file_types=[FileType.CUSTOM], + allowed_file_extensions=[".PNG", "png", "JPG", ".WEBP", "SVG", "GIF"], + ) + for ext in (".png", ".jpg", ".webp", ".svg", ".gif"): + assert _validate(input_file_type="custom", file_extension=ext, config=config) is True + + +def test_tool_file_always_passes(): + config = FileUploadConfig(allowed_file_types=[FileType.CUSTOM], allowed_file_extensions=[".pdf"]) + assert ( + _validate( + input_file_type="image", + file_extension=".png", + file_transfer_method=FileTransferMethod.TOOL_FILE, + config=config, + ) + is True + ) + + +def test_transfer_method_gate_for_non_image(): + config = FileUploadConfig( + allowed_file_types=[FileType.DOCUMENT], + allowed_file_upload_methods=[FileTransferMethod.LOCAL_FILE], + ) + assert ( + _validate( + input_file_type="document", + file_extension=".pdf", + file_transfer_method=FileTransferMethod.LOCAL_FILE, + config=config, + ) + is True + ) + assert ( + _validate( + input_file_type="document", + file_extension=".pdf", + file_transfer_method=FileTransferMethod.REMOTE_URL, + config=config, + ) + is False + ) + + +def test_history_replay_matches_round_1_outcome_under_unchanged_config(): + """A file that passes round 1 must pass history replay when config is unchanged.""" + config = FileUploadConfig( + allowed_file_types=[FileType.CUSTOM], + allowed_file_extensions=[".png"], + ) + assert _validate(input_file_type="custom", file_extension=".png", config=config) is True + assert _validate(input_file_type="image", file_extension=".png", config=config) is True + + +def test_empty_whitelist_in_custom_bucket_denies_by_default(): + """Defensive: when a file lands in the CUSTOM bucket, an empty + allowed_file_extensions list rejects. The UI never submits empty; + this guards DSL / API paths that bypass the UI from accidentally + widening what's accepted.""" + config = FileUploadConfig( + allowed_file_types=[FileType.CUSTOM], + allowed_file_extensions=[], + ) + assert _validate(input_file_type="custom", file_extension=".png", config=config) is False + assert _validate(input_file_type="image", file_extension=".png", config=config) is False + + +def test_normalize_handles_whitespace_and_empty_consistently(): + """Whitespace-only or empty entries in the whitelist must not match real + extensions (regression guard for _normalize_extension edge cases).""" + for noisy_entry in ("", " ", "\t"): + config = FileUploadConfig( + allowed_file_types=[FileType.CUSTOM], + allowed_file_extensions=[noisy_entry], + ) + assert _validate(input_file_type="custom", file_extension=".png", config=config) is False + + +def test_empty_extension_does_not_spuriously_match_empty_whitelist_entry(): + """Defensive: even if the whitelist contains an empty / whitespace entry + (e.g., a stray comma in DSL), an extensionless file must not pass via + a both-sides-empty match. Real entries in the same whitelist still match.""" + config = FileUploadConfig( + allowed_file_types=[FileType.CUSTOM], + allowed_file_extensions=["", ".png"], + ) + assert _validate(input_file_type="custom", file_extension=".png", config=config) is True + assert _validate(input_file_type="custom", file_extension="", config=config) is False diff --git a/api/tests/unit_tests/libs/broadcast_channel/redis/test_channel_unit_tests.py b/api/tests/unit_tests/libs/broadcast_channel/redis/test_channel_unit_tests.py index 8bef01c1ed..7c7f20374e 100644 --- a/api/tests/unit_tests/libs/broadcast_channel/redis/test_channel_unit_tests.py +++ b/api/tests/unit_tests/libs/broadcast_channel/redis/test_channel_unit_tests.py @@ -673,7 +673,7 @@ class TestRedisShardedSubscription: """Test cases for the _RedisShardedSubscription class.""" @pytest.fixture(autouse=True) - def patch_sharded_redis_type(self, monkeypatch): + def patch_sharded_redis_type(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr("libs.broadcast_channel.redis.sharded_channel.Redis", FakeRedisClient) @pytest.fixture @@ -889,7 +889,9 @@ class TestRedisShardedSubscription: assert not sharded_subscription._queue.empty() assert sharded_subscription._queue.get_nowait() == b"test sharded payload" - def test_get_message_uses_target_node_for_cluster_client(self, mock_pubsub: MagicMock, monkeypatch): + def test_get_message_uses_target_node_for_cluster_client( + self, mock_pubsub: MagicMock, monkeypatch: pytest.MonkeyPatch + ): """Test that cluster clients use target_node for sharded messages.""" class DummyRedisCluster: @@ -1177,7 +1179,7 @@ class TestRedisSubscriptionCommon: return request.param @pytest.fixture(autouse=True) - def patch_sharded_redis_type(self, monkeypatch): + def patch_sharded_redis_type(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr("libs.broadcast_channel.redis.sharded_channel.Redis", FakeRedisClient) @pytest.fixture diff --git a/api/tests/unit_tests/libs/test_archive_storage.py b/api/tests/unit_tests/libs/test_archive_storage.py index de3c9c4737..4363c23571 100644 --- a/api/tests/unit_tests/libs/test_archive_storage.py +++ b/api/tests/unit_tests/libs/test_archive_storage.py @@ -34,7 +34,7 @@ def _client_error(code: str) -> ClientError: return ClientError({"Error": {"Code": code}}, "Operation") -def _mock_client(monkeypatch): +def _mock_client(monkeypatch: pytest.MonkeyPatch): client = MagicMock() client.head_bucket.return_value = None # Configure put_object to return a proper ETag that matches the MD5 hash @@ -56,19 +56,19 @@ def _mock_client(monkeypatch): return client, boto_client -def test_init_disabled(monkeypatch): +def test_init_disabled(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch, ARCHIVE_STORAGE_ENABLED=False) with pytest.raises(ArchiveStorageNotConfiguredError, match="not enabled"): ArchiveStorage(bucket=BUCKET_NAME) -def test_init_missing_config(monkeypatch): +def test_init_missing_config(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch, ARCHIVE_STORAGE_ENDPOINT=None) with pytest.raises(ArchiveStorageNotConfiguredError, match="incomplete"): ArchiveStorage(bucket=BUCKET_NAME) -def test_init_bucket_not_found(monkeypatch): +def test_init_bucket_not_found(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.head_bucket.side_effect = _client_error("404") @@ -77,7 +77,7 @@ def test_init_bucket_not_found(monkeypatch): ArchiveStorage(bucket=BUCKET_NAME) -def test_init_bucket_access_denied(monkeypatch): +def test_init_bucket_access_denied(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.head_bucket.side_effect = _client_error("403") @@ -86,7 +86,7 @@ def test_init_bucket_access_denied(monkeypatch): ArchiveStorage(bucket=BUCKET_NAME) -def test_init_bucket_other_error(monkeypatch): +def test_init_bucket_other_error(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.head_bucket.side_effect = _client_error("500") @@ -95,7 +95,7 @@ def test_init_bucket_other_error(monkeypatch): ArchiveStorage(bucket=BUCKET_NAME) -def test_init_sets_client(monkeypatch): +def test_init_sets_client(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, boto_client = _mock_client(monkeypatch) @@ -113,7 +113,7 @@ def test_init_sets_client(monkeypatch): assert storage.bucket == BUCKET_NAME -def test_put_object_returns_checksum(monkeypatch): +def test_put_object_returns_checksum(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) storage = ArchiveStorage(bucket=BUCKET_NAME) @@ -132,7 +132,7 @@ def test_put_object_returns_checksum(monkeypatch): assert checksum == expected_md5 -def test_put_object_raises_on_error(monkeypatch): +def test_put_object_raises_on_error(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) storage = ArchiveStorage(bucket=BUCKET_NAME) @@ -142,7 +142,7 @@ def test_put_object_raises_on_error(monkeypatch): storage.put_object("key", b"data") -def test_get_object_returns_bytes(monkeypatch): +def test_get_object_returns_bytes(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) body = MagicMock() @@ -153,7 +153,7 @@ def test_get_object_returns_bytes(monkeypatch): assert storage.get_object("key") == b"payload" -def test_get_object_missing(monkeypatch): +def test_get_object_missing(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.get_object.side_effect = _client_error("NoSuchKey") @@ -163,7 +163,7 @@ def test_get_object_missing(monkeypatch): storage.get_object("missing") -def test_get_object_stream(monkeypatch): +def test_get_object_stream(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) body = MagicMock() @@ -174,7 +174,7 @@ def test_get_object_stream(monkeypatch): assert list(storage.get_object_stream("key")) == [b"a", b"b"] -def test_get_object_stream_missing(monkeypatch): +def test_get_object_stream_missing(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.get_object.side_effect = _client_error("NoSuchKey") @@ -184,7 +184,7 @@ def test_get_object_stream_missing(monkeypatch): list(storage.get_object_stream("missing")) -def test_object_exists(monkeypatch): +def test_object_exists(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) storage = ArchiveStorage(bucket=BUCKET_NAME) @@ -194,7 +194,7 @@ def test_object_exists(monkeypatch): assert storage.object_exists("missing") is False -def test_delete_object_error(monkeypatch): +def test_delete_object_error(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.delete_object.side_effect = _client_error("500") @@ -204,7 +204,7 @@ def test_delete_object_error(monkeypatch): storage.delete_object("key") -def test_list_objects(monkeypatch): +def test_list_objects(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) paginator = MagicMock() @@ -219,7 +219,7 @@ def test_list_objects(monkeypatch): paginator.paginate.assert_called_once_with(Bucket="archive-bucket", Prefix="prefix") -def test_list_objects_error(monkeypatch): +def test_list_objects_error(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) paginator = MagicMock() @@ -231,7 +231,7 @@ def test_list_objects_error(monkeypatch): storage.list_objects("prefix") -def test_generate_presigned_url(monkeypatch): +def test_generate_presigned_url(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.generate_presigned_url.return_value = "http://signed-url" @@ -247,7 +247,7 @@ def test_generate_presigned_url(monkeypatch): assert url == "http://signed-url" -def test_generate_presigned_url_error(monkeypatch): +def test_generate_presigned_url_error(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.generate_presigned_url.side_effect = _client_error("500") diff --git a/api/tests/unit_tests/libs/test_pandas.py b/api/tests/unit_tests/libs/test_pandas.py index 21c2f0781d..a4739dbbc2 100644 --- a/api/tests/unit_tests/libs/test_pandas.py +++ b/api/tests/unit_tests/libs/test_pandas.py @@ -1,7 +1,8 @@ import pandas as pd +import pytest -def test_pandas_csv(tmp_path, monkeypatch): +def test_pandas_csv(tmp_path, monkeypatch: pytest.MonkeyPatch): monkeypatch.chdir(tmp_path) data = {"col1": [1, 2.2, -3.3, 4.0, 5], "col2": ["A", "B", "C", "D", "E"]} df1 = pd.DataFrame(data) @@ -16,7 +17,7 @@ def test_pandas_csv(tmp_path, monkeypatch): assert df2[df2.columns[1]].to_list() == data["col2"] -def test_pandas_xlsx(tmp_path, monkeypatch): +def test_pandas_xlsx(tmp_path, monkeypatch: pytest.MonkeyPatch): monkeypatch.chdir(tmp_path) data = {"col1": [1, 2.2, -3.3, 4.0, 5], "col2": ["A", "B", "C", "D", "E"]} df1 = pd.DataFrame(data) @@ -31,7 +32,7 @@ def test_pandas_xlsx(tmp_path, monkeypatch): assert df2[df2.columns[1]].to_list() == data["col2"] -def test_pandas_xlsx_with_sheets(tmp_path, monkeypatch): +def test_pandas_xlsx_with_sheets(tmp_path, monkeypatch: pytest.MonkeyPatch): monkeypatch.chdir(tmp_path) data1 = {"col1": [1, 2, 3, 4, 5], "col2": ["A", "B", "C", "D", "E"]} df1 = pd.DataFrame(data1) diff --git a/api/tests/unit_tests/libs/test_rate_limiter.py b/api/tests/unit_tests/libs/test_rate_limiter.py index 9d44b07b5e..5052033db8 100644 --- a/api/tests/unit_tests/libs/test_rate_limiter.py +++ b/api/tests/unit_tests/libs/test_rate_limiter.py @@ -1,5 +1,7 @@ from unittest.mock import MagicMock +import pytest + from libs import helper as helper_module @@ -31,7 +33,7 @@ class _FakeRedis: return True -def test_rate_limiter_counts_attempts_within_same_second(monkeypatch): +def test_rate_limiter_counts_attempts_within_same_second(monkeypatch: pytest.MonkeyPatch): fake_redis = _FakeRedis() monkeypatch.setattr(helper_module.time, "time", lambda: 1000) @@ -48,7 +50,7 @@ def test_rate_limiter_counts_attempts_within_same_second(monkeypatch): assert limiter.is_rate_limited("203.0.113.10") is True -def test_rate_limiter_uses_injected_redis(monkeypatch): +def test_rate_limiter_uses_injected_redis(monkeypatch: pytest.MonkeyPatch): redis_client = MagicMock() redis_client.zcard.return_value = 1 monkeypatch.setattr(helper_module.time, "time", lambda: 1000) diff --git a/api/tests/unit_tests/libs/test_token.py b/api/tests/unit_tests/libs/test_token.py index 6a65b5faa0..734568d37b 100644 --- a/api/tests/unit_tests/libs/test_token.py +++ b/api/tests/unit_tests/libs/test_token.py @@ -1,5 +1,6 @@ from unittest.mock import MagicMock +import pytest from werkzeug.wrappers import Response from constants import COOKIE_NAME_ACCESS_TOKEN, COOKIE_NAME_WEBAPP_ACCESS_TOKEN @@ -30,7 +31,7 @@ def test_extract_access_token(): assert extract_webapp_access_token(request) == expected_webapp # pyright: ignore[reportArgumentType] -def test_real_cookie_name_uses_host_prefix_without_domain(monkeypatch): +def test_real_cookie_name_uses_host_prefix_without_domain(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(token.dify_config, "CONSOLE_WEB_URL", "https://console.example.com", raising=False) monkeypatch.setattr(token.dify_config, "CONSOLE_API_URL", "https://api.example.com", raising=False) monkeypatch.setattr(token.dify_config, "COOKIE_DOMAIN", "", raising=False) @@ -38,7 +39,7 @@ def test_real_cookie_name_uses_host_prefix_without_domain(monkeypatch): assert token._real_cookie_name("csrf_token") == "__Host-csrf_token" -def test_real_cookie_name_without_host_prefix_when_domain_present(monkeypatch): +def test_real_cookie_name_without_host_prefix_when_domain_present(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(token.dify_config, "CONSOLE_WEB_URL", "https://console.example.com", raising=False) monkeypatch.setattr(token.dify_config, "CONSOLE_API_URL", "https://api.example.com", raising=False) monkeypatch.setattr(token.dify_config, "COOKIE_DOMAIN", ".example.com", raising=False) @@ -46,7 +47,7 @@ def test_real_cookie_name_without_host_prefix_when_domain_present(monkeypatch): assert token._real_cookie_name("csrf_token") == "csrf_token" -def test_set_csrf_cookie_includes_domain_when_configured(monkeypatch): +def test_set_csrf_cookie_includes_domain_when_configured(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(token.dify_config, "CONSOLE_WEB_URL", "https://console.example.com", raising=False) monkeypatch.setattr(token.dify_config, "CONSOLE_API_URL", "https://api.example.com", raising=False) monkeypatch.setattr(token.dify_config, "COOKIE_DOMAIN", ".example.com", raising=False) diff --git a/api/tests/unit_tests/models/test_comment_models.py b/api/tests/unit_tests/models/test_comment_models.py index 277335cbef..8c8985aff8 100644 --- a/api/tests/unit_tests/models/test_comment_models.py +++ b/api/tests/unit_tests/models/test_comment_models.py @@ -4,7 +4,15 @@ from models.comment import WorkflowComment, WorkflowCommentMention, WorkflowComm def test_workflow_comment_account_properties_and_cache() -> None: - comment = WorkflowComment(created_by="user-1", resolved_by="user-2", content="hello", position_x=1, position_y=2) + comment = WorkflowComment( + created_by="user-1", + resolved_by="user-2", + content="hello", + position_x=1, + position_y=2, + tenant_id="xxx", + app_id="yyy", + ) created_account = Mock(id="user-1") resolved_account = Mock(id="user-2") @@ -21,6 +29,8 @@ def test_workflow_comment_account_properties_and_cache() -> None: get_mock.assert_not_called() comment_without_resolver = WorkflowComment( + tenant_id="xxx", + app_id="yyy", created_by="user-1", resolved_by=None, content="hello", @@ -37,7 +47,15 @@ def test_workflow_comment_counts_and_participants() -> None: reply_2 = WorkflowCommentReply(comment_id="comment-1", content="reply-2", created_by="user-2") mention_1 = WorkflowCommentMention(comment_id="comment-1", mentioned_user_id="user-3") mention_2 = WorkflowCommentMention(comment_id="comment-1", mentioned_user_id="user-4") - comment = WorkflowComment(created_by="user-1", resolved_by=None, content="hello", position_x=1, position_y=2) + comment = WorkflowComment( + created_by="user-1", + resolved_by=None, + content="hello", + position_x=1, + position_y=2, + tenant_id="xxx", + app_id="yyy", + ) comment.replies = [reply_1, reply_2] comment.mentions = [mention_1, mention_2] @@ -63,7 +81,15 @@ def test_workflow_comment_counts_and_participants() -> None: def test_workflow_comment_participants_use_cached_accounts() -> None: reply = WorkflowCommentReply(comment_id="comment-1", content="reply-1", created_by="user-2") mention = WorkflowCommentMention(comment_id="comment-1", mentioned_user_id="user-3") - comment = WorkflowComment(created_by="user-1", resolved_by=None, content="hello", position_x=1, position_y=2) + comment = WorkflowComment( + created_by="user-1", + resolved_by=None, + content="hello", + position_x=1, + position_y=2, + tenant_id="xxx", + app_id="yyy", + ) comment.replies = [reply] comment.mentions = [mention] diff --git a/api/tests/unit_tests/models/test_dataset_models.py b/api/tests/unit_tests/models/test_dataset_models.py index 51d95c4239..f4ccfb4191 100644 --- a/api/tests/unit_tests/models/test_dataset_models.py +++ b/api/tests/unit_tests/models/test_dataset_models.py @@ -12,7 +12,9 @@ This test suite covers: import json import pickle from datetime import UTC, datetime +from types import SimpleNamespace from unittest.mock import Mock, patch +from urllib.parse import parse_qs, urlparse from uuid import uuid4 from core.rag.index_processor.constant.index_type import IndexTechniqueType @@ -676,6 +678,51 @@ class TestDocumentSegmentIndexing: # Assert assert segment.hit_count == 5 + def test_document_segment_attachments_prefers_files_url_for_source_url(self, monkeypatch): + """Test attachment source URLs use FILES_URL before falling back to CONSOLE_API_URL.""" + # Arrange + segment = DocumentSegment( + tenant_id="tenant-1", + dataset_id="dataset-1", + document_id="document-1", + position=1, + content="Test", + word_count=1, + tokens=2, + created_by="user-1", + ) + segment.id = "segment-1" + attachment = SimpleNamespace( + id="upload-1", + name="image.png", + size=128, + extension="png", + mime_type="image/png", + ) + + monkeypatch.setattr("models.dataset.time.time", lambda: 1700000000) + monkeypatch.setattr("models.dataset.os.urandom", lambda _: b"\x01" * 16) + monkeypatch.setattr("models.dataset.dify_config.SECRET_KEY", "unit-secret") + monkeypatch.setattr("models.dataset.dify_config.FILES_URL", "https://files.example.com") + monkeypatch.setattr("models.dataset.dify_config.CONSOLE_API_URL", "https://console.example.com") + + with patch("models.dataset.db") as mock_db: + mock_db.session.execute.return_value.all.return_value = [(Mock(), attachment)] + + # Act + attachments = segment.attachments + + # Assert + assert len(attachments) == 1 + source_url = attachments[0]["source_url"] + parsed = urlparse(source_url) + query = parse_qs(parsed.query) + assert parsed.netloc == "files.example.com" + assert parsed.path == "/files/upload-1/image-preview" + assert query["timestamp"] == ["1700000000"] + assert query["nonce"] == ["01010101010101010101010101010101"] + assert query["sign"][0] + def test_document_segment_error_tracking(self): """Test document segment error tracking.""" # Arrange @@ -800,9 +847,7 @@ class TestDatasetProcessRule: # Act process_rule = DatasetProcessRule( - dataset_id=dataset_id, - mode=ProcessRuleMode.AUTOMATIC, - created_by=created_by, + dataset_id=dataset_id, mode=ProcessRuleMode.AUTOMATIC, created_by=created_by, rules=None ) # Assert diff --git a/api/tests/unit_tests/services/controller_api.py b/api/tests/unit_tests/services/controller_api.py index 762d7b9090..e7f7cabecd 100644 --- a/api/tests/unit_tests/services/controller_api.py +++ b/api/tests/unit_tests/services/controller_api.py @@ -146,7 +146,7 @@ class ControllerApiTestDataFactory: return app @staticmethod - def create_api_instance(app): + def create_api_instance(app: Flask): """ Create a Flask-RESTX API instance. @@ -160,7 +160,12 @@ class ControllerApiTestDataFactory: return api @staticmethod - def create_test_client(app, api, resource_class, route): + def create_test_client( + app: Flask, + api: Api, + resource_class: type, + route: str, + ): """ Create a Flask test client with a resource registered. @@ -302,7 +307,7 @@ class TestDatasetListApi: return ControllerApiTestDataFactory.create_flask_app() @pytest.fixture - def api(self, app): + def api(self, app: Flask): """ Create Flask-RESTX API instance. @@ -311,7 +316,7 @@ class TestDatasetListApi: return ControllerApiTestDataFactory.create_api_instance(app) @pytest.fixture - def client(self, app, api): + def client(self, app: Flask, api: Api): """ Create test client with DatasetListApi registered. @@ -472,12 +477,12 @@ class TestDatasetApiGet: return ControllerApiTestDataFactory.create_flask_app() @pytest.fixture - def api(self, app): + def api(self, app: Flask): """Create Flask-RESTX API instance.""" return ControllerApiTestDataFactory.create_api_instance(app) @pytest.fixture - def client(self, app, api): + def client(self, app: Flask, api: Api): """Create test client with DatasetApi registered.""" return ControllerApiTestDataFactory.create_test_client(app, api, DatasetApi, "/datasets/") @@ -588,12 +593,12 @@ class TestDatasetApiCreate: return ControllerApiTestDataFactory.create_flask_app() @pytest.fixture - def api(self, app): + def api(self, app: Flask): """Create Flask-RESTX API instance.""" return ControllerApiTestDataFactory.create_api_instance(app) @pytest.fixture - def client(self, app, api): + def client(self, app: Flask, api: Api): """Create test client with DatasetApi registered.""" return ControllerApiTestDataFactory.create_test_client(app, api, DatasetApi, "/datasets") @@ -681,12 +686,12 @@ class TestHitTestingApi: return ControllerApiTestDataFactory.create_flask_app() @pytest.fixture - def api(self, app): + def api(self, app: Flask): """Create Flask-RESTX API instance.""" return ControllerApiTestDataFactory.create_api_instance(app) @pytest.fixture - def client(self, app, api): + def client(self, app: Flask, api: Api): """Create test client with HitTestingApi registered.""" return ControllerApiTestDataFactory.create_test_client( app, api, HitTestingApi, "/datasets//hit-testing" @@ -799,12 +804,12 @@ class TestExternalDatasetApi: return ControllerApiTestDataFactory.create_flask_app() @pytest.fixture - def api(self, app): + def api(self, app: Flask): """Create Flask-RESTX API instance.""" return ControllerApiTestDataFactory.create_api_instance(app) @pytest.fixture - def client_list(self, app, api): + def client_list(self, app: Flask, api: Api): """Create test client for external knowledge API list endpoint.""" return ControllerApiTestDataFactory.create_test_client( app, api, ExternalApiTemplateListApi, "/datasets/external-knowledge-api" diff --git a/api/tests/unit_tests/services/plugin/conftest.py b/api/tests/unit_tests/services/plugin/conftest.py index 80c6077b0c..9dc4fa0390 100644 --- a/api/tests/unit_tests/services/plugin/conftest.py +++ b/api/tests/unit_tests/services/plugin/conftest.py @@ -21,7 +21,7 @@ def make_features( @pytest.fixture -def mock_installer(monkeypatch): +def mock_installer(monkeypatch: pytest.MonkeyPatch): """Patch PluginInstaller at the service import site.""" mock = MagicMock() monkeypatch.setattr("services.plugin.plugin_service.PluginInstaller", lambda: mock) diff --git a/api/tests/unit_tests/services/plugin/test_plugin_permission_service.py b/api/tests/unit_tests/services/plugin/test_plugin_permission_service.py deleted file mode 100644 index 53a9e6210c..0000000000 --- a/api/tests/unit_tests/services/plugin/test_plugin_permission_service.py +++ /dev/null @@ -1,79 +0,0 @@ -from unittest.mock import MagicMock, patch - -from models.account import TenantPluginPermission - -MODULE = "services.plugin.plugin_permission_service" - - -def _patched_session(): - """Patch session_factory.create_session() to return a mock session as context manager.""" - session = MagicMock() - session.__enter__ = MagicMock(return_value=session) - session.__exit__ = MagicMock(return_value=False) - session.begin.return_value.__enter__ = MagicMock(return_value=session) - session.begin.return_value.__exit__ = MagicMock(return_value=False) - mock_factory = MagicMock() - mock_factory.create_session.return_value = session - patcher = patch(f"{MODULE}.session_factory", mock_factory) - return patcher, session - - -class TestGetPermission: - def test_returns_permission_when_found(self): - p1, session = _patched_session() - permission = MagicMock() - session.scalar.return_value = permission - - with p1: - from services.plugin.plugin_permission_service import PluginPermissionService - - result = PluginPermissionService.get_permission("t1") - - assert result is permission - - def test_returns_none_when_not_found(self): - p1, session = _patched_session() - session.scalar.return_value = None - - with p1: - from services.plugin.plugin_permission_service import PluginPermissionService - - result = PluginPermissionService.get_permission("t1") - - assert result is None - - -class TestChangePermission: - def test_creates_new_permission_when_not_exists(self): - p1, session = _patched_session() - session.scalar.return_value = None - - with p1, patch(f"{MODULE}.select"), patch(f"{MODULE}.TenantPluginPermission") as perm_cls: - perm_cls.return_value = MagicMock() - from services.plugin.plugin_permission_service import PluginPermissionService - - result = PluginPermissionService.change_permission( - "t1", TenantPluginPermission.InstallPermission.EVERYONE, TenantPluginPermission.DebugPermission.EVERYONE - ) - - assert result is True - session.begin.assert_called_once() - session.add.assert_called_once() - - def test_updates_existing_permission(self): - p1, session = _patched_session() - existing = MagicMock() - session.scalar.return_value = existing - - with p1: - from services.plugin.plugin_permission_service import PluginPermissionService - - result = PluginPermissionService.change_permission( - "t1", TenantPluginPermission.InstallPermission.ADMINS, TenantPluginPermission.DebugPermission.ADMINS - ) - - assert result is True - session.begin.assert_called_once() - assert existing.install_permission == TenantPluginPermission.InstallPermission.ADMINS - assert existing.debug_permission == TenantPluginPermission.DebugPermission.ADMINS - session.add.assert_not_called() diff --git a/api/tests/unit_tests/services/rag_pipeline/test_rag_pipeline_task_proxy.py b/api/tests/unit_tests/services/rag_pipeline/test_rag_pipeline_task_proxy.py index 1a2d062208..287391c24c 100644 --- a/api/tests/unit_tests/services/rag_pipeline/test_rag_pipeline_task_proxy.py +++ b/api/tests/unit_tests/services/rag_pipeline/test_rag_pipeline_task_proxy.py @@ -2,12 +2,13 @@ from types import SimpleNamespace from unittest.mock import Mock import pytest +from pytest_mock import MockerFixture from services.rag_pipeline.rag_pipeline_task_proxy import RagPipelineTaskProxy @pytest.fixture -def proxy(mocker): +def proxy(mocker: MockerFixture): """Create a RagPipelineTaskProxy with mocked dependencies.""" mocker.patch("services.rag_pipeline.rag_pipeline_task_proxy.TenantIsolatedTaskQueue") entity = Mock() diff --git a/api/tests/unit_tests/services/recommend_app/test_category_order.py b/api/tests/unit_tests/services/recommend_app/test_category_order.py new file mode 100644 index 0000000000..3b94021f26 --- /dev/null +++ b/api/tests/unit_tests/services/recommend_app/test_category_order.py @@ -0,0 +1,26 @@ +import json +from unittest.mock import patch + +from services.recommend_app.category_order import get_explore_app_category_order, order_categories + + +@patch("services.recommend_app.category_order.redis_client.get") +def test_get_explore_app_category_order_returns_redis_list(mock_get): + mock_get.return_value = json.dumps(["C", "A", "B"]).encode() + + assert get_explore_app_category_order("en-US") == ["C", "A", "B"] + mock_get.assert_called_once_with("explore:apps:category_order:en-US") + + +@patch("services.recommend_app.category_order.redis_client.get") +def test_order_categories_uses_redis_order_as_source_of_truth(mock_get): + mock_get.return_value = json.dumps(["C", "A", "B"]).encode() + + assert order_categories({"A", "B", "C", "D"}, "en-US") == ["C", "A", "B"] + + +@patch("services.recommend_app.category_order.redis_client.get") +def test_order_categories_falls_back_to_sorted_categories_without_redis_order(mock_get): + mock_get.return_value = None + + assert order_categories({"B", "A", "C"}, "en-US") == ["A", "B", "C"] diff --git a/api/tests/unit_tests/services/test_app_generate_service.py b/api/tests/unit_tests/services/test_app_generate_service.py index d3f9c5dd9f..216c5d9db6 100644 --- a/api/tests/unit_tests/services/test_app_generate_service.py +++ b/api/tests/unit_tests/services/test_app_generate_service.py @@ -20,6 +20,7 @@ from contextlib import contextmanager from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture import services.app_generate_service as ags_module from core.app.entities.app_invoke_entities import InvokeFrom @@ -96,7 +97,7 @@ def _noop_rate_limit_context(rate_limit, request_id): class TestBuildStreamingTaskOnSubscribe: """Tests for AppGenerateService._build_streaming_task_on_subscribe.""" - def test_streams_mode_starts_immediately(self, monkeypatch): + def test_streams_mode_starts_immediately(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "streams") called = [] cb = AppGenerateService._build_streaming_task_on_subscribe(lambda: called.append(1)) @@ -106,7 +107,7 @@ class TestBuildStreamingTaskOnSubscribe: cb() assert called == [1] # not called again - def test_pubsub_mode_starts_on_subscribe(self, monkeypatch): + def test_pubsub_mode_starts_on_subscribe(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "pubsub") monkeypatch.setattr(ags_module, "SSE_TASK_START_FALLBACK_MS", 60_000) # large to prevent timer called = [] @@ -118,7 +119,7 @@ class TestBuildStreamingTaskOnSubscribe: cb() assert called == [1] - def test_sharded_mode_starts_on_subscribe(self, monkeypatch): + def test_sharded_mode_starts_on_subscribe(self, monkeypatch: pytest.MonkeyPatch): """sharded is treated like pubsub (i.e. not 'streams').""" monkeypatch.setattr(ags_module.dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "sharded") monkeypatch.setattr(ags_module, "SSE_TASK_START_FALLBACK_MS", 60_000) @@ -128,7 +129,7 @@ class TestBuildStreamingTaskOnSubscribe: cb() assert called == [1] - def test_pubsub_fallback_timer_fires(self, monkeypatch): + def test_pubsub_fallback_timer_fires(self, monkeypatch: pytest.MonkeyPatch): """When nobody subscribes fast enough the fallback timer fires.""" monkeypatch.setattr(ags_module.dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "pubsub") monkeypatch.setattr(ags_module, "SSE_TASK_START_FALLBACK_MS", 50) # 50 ms @@ -137,7 +138,7 @@ class TestBuildStreamingTaskOnSubscribe: time.sleep(0.2) # give the timer time to fire assert called == [1] - def test_exception_in_start_task_returns_false(self, monkeypatch): + def test_exception_in_start_task_returns_false(self, monkeypatch: pytest.MonkeyPatch): """When start_task raises, _try_start returns False and next call retries.""" monkeypatch.setattr(ags_module.dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "streams") call_count = 0 @@ -154,7 +155,7 @@ class TestBuildStreamingTaskOnSubscribe: cb() assert call_count == 2 - def test_concurrent_subscribe_only_starts_once(self, monkeypatch): + def test_concurrent_subscribe_only_starts_once(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "pubsub") monkeypatch.setattr(ags_module, "SSE_TASK_START_FALLBACK_MS", 60_000) call_count = 0 @@ -176,31 +177,31 @@ class TestBuildStreamingTaskOnSubscribe: # _get_max_active_requests # --------------------------------------------------------------------------- class TestGetMaxActiveRequests: - def test_both_zero_returns_zero(self, monkeypatch): + def test_both_zero_returns_zero(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "APP_MAX_ACTIVE_REQUESTS", 0) monkeypatch.setattr(ags_module.dify_config, "APP_DEFAULT_ACTIVE_REQUESTS", 0) app = _make_app(AppMode.CHAT, max_active_requests=0) assert AppGenerateService._get_max_active_requests(app) == 0 - def test_app_limit_only(self, monkeypatch): + def test_app_limit_only(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "APP_MAX_ACTIVE_REQUESTS", 0) monkeypatch.setattr(ags_module.dify_config, "APP_DEFAULT_ACTIVE_REQUESTS", 0) app = _make_app(AppMode.CHAT, max_active_requests=5) assert AppGenerateService._get_max_active_requests(app) == 5 - def test_config_limit_only(self, monkeypatch): + def test_config_limit_only(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "APP_MAX_ACTIVE_REQUESTS", 10) monkeypatch.setattr(ags_module.dify_config, "APP_DEFAULT_ACTIVE_REQUESTS", 0) app = _make_app(AppMode.CHAT, max_active_requests=0) assert AppGenerateService._get_max_active_requests(app) == 10 - def test_both_non_zero_returns_min(self, monkeypatch): + def test_both_non_zero_returns_min(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "APP_MAX_ACTIVE_REQUESTS", 20) monkeypatch.setattr(ags_module.dify_config, "APP_DEFAULT_ACTIVE_REQUESTS", 0) app = _make_app(AppMode.CHAT, max_active_requests=5) assert AppGenerateService._get_max_active_requests(app) == 5 - def test_default_active_requests_used_when_app_has_none(self, monkeypatch): + def test_default_active_requests_used_when_app_has_none(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "APP_MAX_ACTIVE_REQUESTS", 0) monkeypatch.setattr(ags_module.dify_config, "APP_DEFAULT_ACTIVE_REQUESTS", 15) app = _make_app(AppMode.CHAT, max_active_requests=0) @@ -214,7 +215,7 @@ class TestGenerate: """Tests for AppGenerateService.generate covering each mode.""" @pytest.fixture(autouse=True) - def _common(self, mocker, monkeypatch): + def _common(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "BILLING_ENABLED", False) mocker.patch("services.app_generate_service.RateLimit", _DummyRateLimit) # Prevent AppExecutionParams.new from touching real models via isinstance @@ -224,7 +225,7 @@ class TestGenerate: ) # -- COMPLETION --------------------------------------------------------- - def test_completion_mode(self, mocker): + def test_completion_mode(self, mocker: MockerFixture): gen_spy = mocker.patch( "services.app_generate_service.CompletionAppGenerator.generate", return_value={"result": "ok"}, @@ -244,7 +245,7 @@ class TestGenerate: gen_spy.assert_called_once() # -- AGENT_CHAT via mode ------------------------------------------------ - def test_agent_chat_mode(self, mocker): + def test_agent_chat_mode(self, mocker: MockerFixture): gen_spy = mocker.patch( "services.app_generate_service.AgentChatAppGenerator.generate", return_value={"result": "agent"}, @@ -264,7 +265,7 @@ class TestGenerate: gen_spy.assert_called_once() # -- AGENT_CHAT via is_agent flag (non-AGENT_CHAT mode) ----------------- - def test_agent_via_is_agent_flag(self, mocker): + def test_agent_via_is_agent_flag(self, mocker: MockerFixture): gen_spy = mocker.patch( "services.app_generate_service.AgentChatAppGenerator.generate", return_value={"result": "agent-via-flag"}, @@ -285,7 +286,7 @@ class TestGenerate: gen_spy.assert_called_once() # -- CHAT --------------------------------------------------------------- - def test_chat_mode(self, mocker): + def test_chat_mode(self, mocker: MockerFixture): gen_spy = mocker.patch( "services.app_generate_service.ChatAppGenerator.generate", return_value={"result": "chat"}, @@ -306,7 +307,7 @@ class TestGenerate: gen_spy.assert_called_once() # -- ADVANCED_CHAT blocking --------------------------------------------- - def test_advanced_chat_blocking(self, mocker): + def test_advanced_chat_blocking(self, mocker: MockerFixture): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) @@ -333,7 +334,7 @@ class TestGenerate: retrieve_spy.assert_not_called() # -- ADVANCED_CHAT streaming -------------------------------------------- - def test_advanced_chat_streaming(self, mocker, monkeypatch): + def test_advanced_chat_streaming(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) mocker.patch( @@ -365,7 +366,7 @@ class TestGenerate: delay_spy.assert_called_once() # -- WORKFLOW blocking -------------------------------------------------- - def test_workflow_blocking(self, mocker): + def test_workflow_blocking(self, mocker: MockerFixture): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) gen_spy = mocker.patch( @@ -390,7 +391,7 @@ class TestGenerate: assert call_kwargs["pause_state_config"].state_owner_user_id == "owner-id" # -- WORKFLOW streaming ------------------------------------------------- - def test_workflow_streaming(self, mocker, monkeypatch): + def test_workflow_streaming(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) mocker.patch( @@ -422,7 +423,7 @@ class TestGenerate: delay_spy.assert_called_once() # -- Invalid mode ------------------------------------------------------- - def test_invalid_mode_raises(self, mocker): + def test_invalid_mode_raises(self, mocker: MockerFixture): app = _make_app("invalid-mode", is_agent=False) with pytest.raises(ValueError, match="Invalid app mode"): AppGenerateService.generate( @@ -439,14 +440,14 @@ class TestGenerate: # --------------------------------------------------------------------------- class TestGenerateBilling: @pytest.fixture(autouse=True) - def _common(self, mocker, monkeypatch): + def _common(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): mocker.patch("services.app_generate_service.RateLimit", _DummyRateLimit) mocker.patch( "services.app_generate_service.rate_limit_context", _noop_rate_limit_context, ) - def test_billing_enabled_consumes_quota(self, mocker, monkeypatch): + def test_billing_enabled_consumes_quota(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "BILLING_ENABLED", True) quota_charge = MagicMock() reserve_mock = mocker.patch( @@ -472,7 +473,9 @@ class TestGenerateBilling: reserve_mock.assert_called_once_with(QuotaType.WORKFLOW, "tenant-id") quota_charge.commit.assert_called_once() - def test_billing_quota_exceeded_raises_rate_limit_error(self, mocker, monkeypatch): + def test_billing_quota_exceeded_raises_rate_limit_error( + self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch + ): from services.errors.app import QuotaExceededError from services.errors.llm import InvokeRateLimitError @@ -491,7 +494,7 @@ class TestGenerateBilling: streaming=False, ) - def test_exception_refunds_quota_and_exits_rate_limit(self, mocker, monkeypatch): + def test_exception_refunds_quota_and_exits_rate_limit(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "BILLING_ENABLED", True) quota_charge = MagicMock() mocker.patch( @@ -517,7 +520,9 @@ class TestGenerateBilling: ) quota_charge.refund.assert_called_once() - def test_rate_limit_exit_called_in_finally_for_blocking(self, mocker, monkeypatch): + def test_rate_limit_exit_called_in_finally_for_blocking( + self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch + ): """For non-streaming (blocking) calls, rate_limit.exit should be called in finally.""" monkeypatch.setattr(ags_module.dify_config, "BILLING_ENABLED", False) @@ -552,7 +557,7 @@ class TestGenerateBilling: # _get_workflow # --------------------------------------------------------------------------- class TestGetWorkflow: - def test_debugger_fetches_draft(self, mocker): + def test_debugger_fetches_draft(self, mocker: MockerFixture): draft_wf = _make_workflow() ws = MagicMock() ws.get_draft_workflow.return_value = draft_wf @@ -562,7 +567,7 @@ class TestGetWorkflow: assert result is draft_wf ws.get_draft_workflow.assert_called_once() - def test_debugger_raises_when_no_draft(self, mocker): + def test_debugger_raises_when_no_draft(self, mocker: MockerFixture): ws = MagicMock() ws.get_draft_workflow.return_value = None mocker.patch("services.app_generate_service.WorkflowService", return_value=ws) @@ -570,7 +575,7 @@ class TestGetWorkflow: with pytest.raises(ValueError, match="Workflow not initialized"): AppGenerateService._get_workflow(_make_app(AppMode.WORKFLOW), InvokeFrom.DEBUGGER) - def test_non_debugger_fetches_published(self, mocker): + def test_non_debugger_fetches_published(self, mocker: MockerFixture): pub_wf = _make_workflow() ws = MagicMock() ws.get_published_workflow.return_value = pub_wf @@ -580,7 +585,7 @@ class TestGetWorkflow: assert result is pub_wf ws.get_published_workflow.assert_called_once() - def test_non_debugger_raises_when_no_published(self, mocker): + def test_non_debugger_raises_when_no_published(self, mocker: MockerFixture): ws = MagicMock() ws.get_published_workflow.return_value = None mocker.patch("services.app_generate_service.WorkflowService", return_value=ws) @@ -588,7 +593,7 @@ class TestGetWorkflow: with pytest.raises(ValueError, match="Workflow not published"): AppGenerateService._get_workflow(_make_app(AppMode.WORKFLOW), InvokeFrom.SERVICE_API) - def test_specific_workflow_id_valid_uuid(self, mocker): + def test_specific_workflow_id_valid_uuid(self, mocker: MockerFixture): valid_uuid = str(uuid.uuid4()) specific_wf = _make_workflow(workflow_id=valid_uuid) ws = MagicMock() @@ -601,7 +606,7 @@ class TestGetWorkflow: assert result is specific_wf ws.get_published_workflow_by_id.assert_called_once() - def test_specific_workflow_id_invalid_uuid(self, mocker): + def test_specific_workflow_id_invalid_uuid(self, mocker: MockerFixture): ws = MagicMock() mocker.patch("services.app_generate_service.WorkflowService", return_value=ws) @@ -610,7 +615,7 @@ class TestGetWorkflow: _make_app(AppMode.WORKFLOW), InvokeFrom.SERVICE_API, workflow_id="not-a-uuid" ) - def test_specific_workflow_id_not_found(self, mocker): + def test_specific_workflow_id_not_found(self, mocker: MockerFixture): valid_uuid = str(uuid.uuid4()) ws = MagicMock() ws.get_published_workflow_by_id.return_value = None @@ -626,7 +631,7 @@ class TestGetWorkflow: # generate_single_iteration # --------------------------------------------------------------------------- class TestGenerateSingleIteration: - def test_advanced_chat_mode(self, mocker): + def test_advanced_chat_mode(self, mocker: MockerFixture): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) gen_spy = mocker.patch( @@ -644,7 +649,7 @@ class TestGenerateSingleIteration: iter_spy.assert_called_once() assert result == {"event": "iteration"} - def test_workflow_mode(self, mocker): + def test_workflow_mode(self, mocker: MockerFixture): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) mocker.patch( @@ -662,7 +667,7 @@ class TestGenerateSingleIteration: iter_spy.assert_called_once() assert result == {"event": "wf-iteration"} - def test_invalid_mode_raises(self, mocker): + def test_invalid_mode_raises(self, mocker: MockerFixture): app = _make_app(AppMode.CHAT) with pytest.raises(ValueError, match="Invalid app mode"): AppGenerateService.generate_single_iteration(app_model=app, user=_make_user(), node_id="n1", args={}) @@ -672,7 +677,7 @@ class TestGenerateSingleIteration: # generate_single_loop # --------------------------------------------------------------------------- class TestGenerateSingleLoop: - def test_advanced_chat_mode(self, mocker): + def test_advanced_chat_mode(self, mocker: MockerFixture): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) mocker.patch( @@ -690,7 +695,7 @@ class TestGenerateSingleLoop: loop_spy.assert_called_once() assert result == {"event": "loop"} - def test_workflow_mode(self, mocker): + def test_workflow_mode(self, mocker: MockerFixture): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) mocker.patch( @@ -708,7 +713,7 @@ class TestGenerateSingleLoop: loop_spy.assert_called_once() assert result == {"event": "wf-loop"} - def test_invalid_mode_raises(self, mocker): + def test_invalid_mode_raises(self, mocker: MockerFixture): app = _make_app(AppMode.COMPLETION) with pytest.raises(ValueError, match="Invalid app mode"): AppGenerateService.generate_single_loop(app_model=app, user=_make_user(), node_id="n1", args=MagicMock()) @@ -718,7 +723,7 @@ class TestGenerateSingleLoop: # generate_more_like_this # --------------------------------------------------------------------------- class TestGenerateMoreLikeThis: - def test_delegates_to_completion_generator(self, mocker): + def test_delegates_to_completion_generator(self, mocker: MockerFixture): gen_spy = mocker.patch( "services.app_generate_service.CompletionAppGenerator.generate_more_like_this", return_value={"result": "similar"}, @@ -739,7 +744,7 @@ class TestGenerateMoreLikeThis: # get_response_generator # --------------------------------------------------------------------------- class TestGetResponseGenerator: - def test_non_ended_workflow_run(self, mocker): + def test_non_ended_workflow_run(self, mocker: MockerFixture): app = _make_app(AppMode.ADVANCED_CHAT) workflow_run = MagicMock() workflow_run.id = "run-1" @@ -756,7 +761,7 @@ class TestGetResponseGenerator: result = AppGenerateService.get_response_generator(app_model=app, workflow_run=workflow_run) gen_instance.retrieve_events.assert_called_once() - def test_ended_workflow_run_still_returns_generator(self, mocker): + def test_ended_workflow_run_still_returns_generator(self, mocker: MockerFixture): """Even when the run is ended, the current code still returns a generator (TODO branch).""" app = _make_app(AppMode.WORKFLOW) workflow_run = MagicMock() diff --git a/api/tests/unit_tests/services/test_app_generate_service_streaming_integration.py b/api/tests/unit_tests/services/test_app_generate_service_streaming_integration.py index 30aa359b45..4293be8f72 100644 --- a/api/tests/unit_tests/services/test_app_generate_service_streaming_integration.py +++ b/api/tests/unit_tests/services/test_app_generate_service_streaming_integration.py @@ -89,7 +89,7 @@ class _FakeStreams: @pytest.fixture -def _patch_get_channel_streams(monkeypatch): +def _patch_get_channel_streams(monkeypatch: pytest.MonkeyPatch): from libs.broadcast_channel.redis.streams_channel import StreamsBroadcastChannel fake = _FakeStreams() @@ -108,7 +108,7 @@ def _patch_get_channel_streams(monkeypatch): @pytest.fixture -def _patch_get_channel_pubsub(monkeypatch): +def _patch_get_channel_pubsub(monkeypatch: pytest.MonkeyPatch): from libs.broadcast_channel.redis.channel import BroadcastChannel as RedisBroadcastChannel store: dict[str, deque[bytes]] = defaultdict(deque) @@ -163,7 +163,7 @@ def test_streams_full_flow_prepublish_and_replay(): @pytest.mark.usefixtures("_patch_get_channel_pubsub") -def test_pubsub_full_flow_start_on_subscribe_gated(monkeypatch): +def test_pubsub_full_flow_start_on_subscribe_gated(monkeypatch: pytest.MonkeyPatch): # Speed up any potential timer if it accidentally triggers monkeypatch.setattr("services.app_generate_service.SSE_TASK_START_FALLBACK_MS", 50) diff --git a/api/tests/unit_tests/services/test_audio_service.py b/api/tests/unit_tests/services/test_audio_service.py index 83258fd1b7..5d148974f8 100644 --- a/api/tests/unit_tests/services/test_audio_service.py +++ b/api/tests/unit_tests/services/test_audio_service.py @@ -173,7 +173,8 @@ class AudioServiceTestDataFactory: file = Mock(spec=FileStorage) file.filename = filename file.mimetype = mimetype - file.read = Mock(return_value=content) + file.stream = Mock() + file.stream.read = Mock(return_value=content) for key, value in kwargs.items(): setattr(file, key, value) return file @@ -216,7 +217,7 @@ class TestAudioServiceASR: """Test speech-to-text (ASR) operations.""" @patch("services.audio_service.ModelManager.for_tenant", autospec=True) - def test_transcript_asr_success_chat_mode(self, mock_model_manager_class, factory): + def test_transcript_asr_success_chat_mode(self, mock_model_manager_class, factory: AudioServiceTestDataFactory): """Test successful ASR transcription in CHAT mode.""" # Arrange app_model_config = factory.create_app_model_config_mock(speech_to_text_dict={"enabled": True}) @@ -241,7 +242,9 @@ class TestAudioServiceASR: mock_model_manager_class.assert_called_once_with(tenant_id=app.tenant_id, user_id="user-123") @patch("services.audio_service.ModelManager.for_tenant", autospec=True) - def test_transcript_asr_success_advanced_chat_mode(self, mock_model_manager_class, factory): + def test_transcript_asr_success_advanced_chat_mode( + self, mock_model_manager_class, factory: AudioServiceTestDataFactory + ): """Test successful ASR transcription in ADVANCED_CHAT mode.""" # Arrange workflow = factory.create_workflow_mock(features_dict={"speech_to_text": {"enabled": True}}) @@ -263,7 +266,7 @@ class TestAudioServiceASR: # Assert assert result == {"text": "Workflow transcribed text"} - def test_transcript_asr_raises_error_when_feature_disabled_chat_mode(self, factory): + def test_transcript_asr_raises_error_when_feature_disabled_chat_mode(self, factory: AudioServiceTestDataFactory): """Test that ASR raises error when speech-to-text is disabled in CHAT mode.""" # Arrange app_model_config = factory.create_app_model_config_mock(speech_to_text_dict={"enabled": False}) @@ -277,7 +280,9 @@ class TestAudioServiceASR: with pytest.raises(ValueError, match="Speech to text is not enabled"): AudioService.transcript_asr(app_model=app, file=file) - def test_transcript_asr_raises_error_when_feature_disabled_workflow_mode(self, factory): + def test_transcript_asr_raises_error_when_feature_disabled_workflow_mode( + self, factory: AudioServiceTestDataFactory + ): """Test that ASR raises error when speech-to-text is disabled in WORKFLOW mode.""" # Arrange workflow = factory.create_workflow_mock(features_dict={"speech_to_text": {"enabled": False}}) @@ -291,7 +296,7 @@ class TestAudioServiceASR: with pytest.raises(ValueError, match="Speech to text is not enabled"): AudioService.transcript_asr(app_model=app, file=file) - def test_transcript_asr_raises_error_when_workflow_missing(self, factory): + def test_transcript_asr_raises_error_when_workflow_missing(self, factory: AudioServiceTestDataFactory): """Test that ASR raises error when workflow is missing in WORKFLOW mode.""" # Arrange app = factory.create_app_mock( @@ -304,7 +309,7 @@ class TestAudioServiceASR: with pytest.raises(ValueError, match="Speech to text is not enabled"): AudioService.transcript_asr(app_model=app, file=file) - def test_transcript_asr_raises_error_when_no_file_uploaded(self, factory): + def test_transcript_asr_raises_error_when_no_file_uploaded(self, factory: AudioServiceTestDataFactory): """Test that ASR raises error when no file is uploaded.""" # Arrange app_model_config = factory.create_app_model_config_mock(speech_to_text_dict={"enabled": True}) @@ -317,7 +322,7 @@ class TestAudioServiceASR: with pytest.raises(NoAudioUploadedServiceError): AudioService.transcript_asr(app_model=app, file=None) - def test_transcript_asr_raises_error_for_unsupported_audio_type(self, factory): + def test_transcript_asr_raises_error_for_unsupported_audio_type(self, factory: AudioServiceTestDataFactory): """Test that ASR raises error for unsupported audio file types.""" # Arrange app_model_config = factory.create_app_model_config_mock(speech_to_text_dict={"enabled": True}) @@ -331,7 +336,7 @@ class TestAudioServiceASR: with pytest.raises(UnsupportedAudioTypeServiceError): AudioService.transcript_asr(app_model=app, file=file) - def test_transcript_asr_raises_error_for_large_file(self, factory): + def test_transcript_asr_raises_error_for_large_file(self, factory: AudioServiceTestDataFactory): """Test that ASR raises error when file exceeds size limit (30MB).""" # Arrange app_model_config = factory.create_app_model_config_mock(speech_to_text_dict={"enabled": True}) @@ -348,7 +353,9 @@ class TestAudioServiceASR: AudioService.transcript_asr(app_model=app, file=file) @patch("services.audio_service.ModelManager.for_tenant", autospec=True) - def test_transcript_asr_raises_error_when_no_model_instance(self, mock_model_manager_class, factory): + def test_transcript_asr_raises_error_when_no_model_instance( + self, mock_model_manager_class, factory: AudioServiceTestDataFactory + ): """Test that ASR raises error when no model instance is available.""" # Arrange app_model_config = factory.create_app_model_config_mock(speech_to_text_dict={"enabled": True}) @@ -371,7 +378,7 @@ class TestAudioServiceTTS: """Test text-to-speech (TTS) operations.""" @patch("services.audio_service.ModelManager.for_tenant", autospec=True) - def test_transcript_tts_with_text_success(self, mock_model_manager_class, factory): + def test_transcript_tts_with_text_success(self, mock_model_manager_class, factory: AudioServiceTestDataFactory): """Test successful TTS with text input.""" # Arrange app_model_config = factory.create_app_model_config_mock( @@ -405,7 +412,7 @@ class TestAudioServiceTTS: ) @patch("services.audio_service.ModelManager.for_tenant", autospec=True) - def test_transcript_tts_with_default_voice(self, mock_model_manager_class, factory): + def test_transcript_tts_with_default_voice(self, mock_model_manager_class, factory: AudioServiceTestDataFactory): """Test TTS uses default voice when none specified.""" # Arrange app_model_config = factory.create_app_model_config_mock( @@ -435,7 +442,9 @@ class TestAudioServiceTTS: assert call_args.kwargs["voice"] == "default-voice" @patch("services.audio_service.ModelManager.for_tenant", autospec=True) - def test_transcript_tts_gets_first_available_voice_when_none_configured(self, mock_model_manager_class, factory): + def test_transcript_tts_gets_first_available_voice_when_none_configured( + self, mock_model_manager_class, factory: AudioServiceTestDataFactory + ): """Test TTS gets first available voice when none is configured.""" # Arrange app_model_config = factory.create_app_model_config_mock( @@ -467,7 +476,7 @@ class TestAudioServiceTTS: @patch("services.audio_service.WorkflowService", autospec=True) @patch("services.audio_service.ModelManager.for_tenant", autospec=True) def test_transcript_tts_workflow_mode_with_draft( - self, mock_model_manager_class, mock_workflow_service_class, factory + self, mock_model_manager_class, mock_workflow_service_class, factory: AudioServiceTestDataFactory ): """Test TTS in WORKFLOW mode with draft workflow.""" # Arrange @@ -499,7 +508,7 @@ class TestAudioServiceTTS: assert result == b"draft audio" mock_workflow_service.get_draft_workflow.assert_called_once_with(app_model=app) - def test_transcript_tts_raises_error_when_text_missing(self, factory): + def test_transcript_tts_raises_error_when_text_missing(self, factory: AudioServiceTestDataFactory): """Test that TTS raises error when text is missing.""" # Arrange app = factory.create_app_mock() @@ -509,7 +518,9 @@ class TestAudioServiceTTS: AudioService.transcript_tts(app_model=app, text=None) @patch("services.audio_service.ModelManager.for_tenant", autospec=True) - def test_transcript_tts_raises_error_when_no_voices_available(self, mock_model_manager_class, factory): + def test_transcript_tts_raises_error_when_no_voices_available( + self, mock_model_manager_class, factory: AudioServiceTestDataFactory + ): """Test that TTS raises error when no voices are available.""" # Arrange app_model_config = factory.create_app_model_config_mock( @@ -535,7 +546,7 @@ class TestAudioServiceTTSVoices: """Test TTS voice listing operations.""" @patch("services.audio_service.ModelManager.for_tenant", autospec=True) - def test_transcript_tts_voices_success(self, mock_model_manager_class, factory): + def test_transcript_tts_voices_success(self, mock_model_manager_class, factory: AudioServiceTestDataFactory): """Test successful retrieval of TTS voices.""" # Arrange tenant_id = "tenant-123" @@ -560,7 +571,9 @@ class TestAudioServiceTTSVoices: mock_model_instance.get_tts_voices.assert_called_once_with(language) @patch("services.audio_service.ModelManager.for_tenant", autospec=True) - def test_transcript_tts_voices_raises_error_when_no_model_instance(self, mock_model_manager_class, factory): + def test_transcript_tts_voices_raises_error_when_no_model_instance( + self, mock_model_manager_class, factory: AudioServiceTestDataFactory + ): """Test that TTS voices raises error when no model instance is available.""" # Arrange tenant_id = "tenant-123" @@ -575,7 +588,9 @@ class TestAudioServiceTTSVoices: AudioService.transcript_tts_voices(tenant_id=tenant_id, language=language) @patch("services.audio_service.ModelManager.for_tenant", autospec=True) - def test_transcript_tts_voices_propagates_exceptions(self, mock_model_manager_class, factory): + def test_transcript_tts_voices_propagates_exceptions( + self, mock_model_manager_class, factory: AudioServiceTestDataFactory + ): """Test that TTS voices propagates exceptions from model instance.""" # Arrange tenant_id = "tenant-123" diff --git a/api/tests/unit_tests/services/test_credit_pool_service.py b/api/tests/unit_tests/services/test_credit_pool_service.py new file mode 100644 index 0000000000..e77ef894e7 --- /dev/null +++ b/api/tests/unit_tests/services/test_credit_pool_service.py @@ -0,0 +1,158 @@ +from types import SimpleNamespace +from unittest.mock import patch +from uuid import uuid4 + +import pytest +from sqlalchemy import create_engine, select +from sqlalchemy.engine import Engine + +from core.errors.error import QuotaExceededError +from models import TenantCreditPool +from models.enums import ProviderQuotaType +from services.credit_pool_service import CreditPoolService + + +def _create_engine_with_pool(*, quota_limit: int, quota_used: int) -> tuple[Engine, str, str]: + engine = create_engine("sqlite:///:memory:") + TenantCreditPool.__table__.create(engine) + tenant_id = str(uuid4()) + pool_id = str(uuid4()) + with engine.begin() as connection: + connection.execute( + TenantCreditPool.__table__.insert(), + { + "id": pool_id, + "tenant_id": tenant_id, + "pool_type": ProviderQuotaType.TRIAL, + "quota_limit": quota_limit, + "quota_used": quota_used, + }, + ) + return engine, tenant_id, pool_id + + +def _get_quota_used(*, engine: Engine, pool_id: str) -> int | None: + with engine.connect() as connection: + return connection.scalar(select(TenantCreditPool.quota_used).where(TenantCreditPool.id == pool_id)) + + +def test_check_and_deduct_credits_deducts_exact_amount_when_sufficient() -> None: + engine, tenant_id, pool_id = _create_engine_with_pool(quota_limit=10, quota_used=2) + + with patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)): + deducted_credits = CreditPoolService.check_and_deduct_credits(tenant_id=tenant_id, credits_required=3) + + assert deducted_credits == 3 + assert _get_quota_used(engine=engine, pool_id=pool_id) == 5 + + +def test_check_and_deduct_credits_returns_zero_for_non_positive_request() -> None: + assert CreditPoolService.check_and_deduct_credits(tenant_id=str(uuid4()), credits_required=0) == 0 + + +def test_check_and_deduct_credits_raises_when_pool_is_missing() -> None: + engine = create_engine("sqlite:///:memory:") + TenantCreditPool.__table__.create(engine) + + with ( + patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)), + pytest.raises(QuotaExceededError, match="Credit pool not found"), + ): + CreditPoolService.check_and_deduct_credits(tenant_id=str(uuid4()), credits_required=1) + + +def test_check_and_deduct_credits_raises_when_pool_is_empty() -> None: + engine, tenant_id, pool_id = _create_engine_with_pool(quota_limit=10, quota_used=10) + + with ( + patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)), + pytest.raises(QuotaExceededError, match="No credits remaining"), + ): + CreditPoolService.check_and_deduct_credits(tenant_id=tenant_id, credits_required=1) + + assert _get_quota_used(engine=engine, pool_id=pool_id) == 10 + + +def test_check_and_deduct_credits_raises_without_partial_deduction_when_insufficient() -> None: + engine, tenant_id, pool_id = _create_engine_with_pool(quota_limit=10, quota_used=9) + + with ( + patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)), + pytest.raises(QuotaExceededError, match="Insufficient credits remaining"), + ): + CreditPoolService.check_and_deduct_credits(tenant_id=tenant_id, credits_required=3) + + assert _get_quota_used(engine=engine, pool_id=pool_id) == 9 + + +def test_check_and_deduct_credits_wraps_unexpected_deduction_errors() -> None: + engine, tenant_id, pool_id = _create_engine_with_pool(quota_limit=10, quota_used=2) + + with ( + patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)), + patch.object(CreditPoolService, "_get_locked_pool", side_effect=RuntimeError("database unavailable")), + pytest.raises(QuotaExceededError, match="Failed to deduct credits"), + ): + CreditPoolService.check_and_deduct_credits(tenant_id=tenant_id, credits_required=1) + + assert _get_quota_used(engine=engine, pool_id=pool_id) == 2 + + +def test_deduct_credits_capped_returns_zero_for_non_positive_request() -> None: + assert CreditPoolService.deduct_credits_capped(tenant_id=str(uuid4()), credits_required=0) == 0 + + +def test_deduct_credits_capped_returns_zero_when_pool_is_missing() -> None: + engine = create_engine("sqlite:///:memory:") + TenantCreditPool.__table__.create(engine) + + with patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)): + deducted_credits = CreditPoolService.deduct_credits_capped(tenant_id=str(uuid4()), credits_required=1) + + assert deducted_credits == 0 + + +def test_deduct_credits_capped_returns_zero_when_pool_is_empty() -> None: + engine, tenant_id, pool_id = _create_engine_with_pool(quota_limit=10, quota_used=10) + + with patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)): + deducted_credits = CreditPoolService.deduct_credits_capped(tenant_id=tenant_id, credits_required=1) + + assert deducted_credits == 0 + assert _get_quota_used(engine=engine, pool_id=pool_id) == 10 + + +def test_deduct_credits_capped_deducts_only_remaining_balance_when_insufficient() -> None: + engine, tenant_id, pool_id = _create_engine_with_pool(quota_limit=10, quota_used=9) + + with patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)): + deducted_credits = CreditPoolService.deduct_credits_capped(tenant_id=tenant_id, credits_required=3) + + assert deducted_credits == 1 + assert _get_quota_used(engine=engine, pool_id=pool_id) == 10 + + +def test_deduct_credits_capped_wraps_unexpected_deduction_errors() -> None: + engine, tenant_id, pool_id = _create_engine_with_pool(quota_limit=10, quota_used=2) + + with ( + patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)), + patch.object(CreditPoolService, "_get_locked_pool", side_effect=RuntimeError("database unavailable")), + pytest.raises(QuotaExceededError, match="Failed to deduct credits"), + ): + CreditPoolService.deduct_credits_capped(tenant_id=tenant_id, credits_required=1) + + assert _get_quota_used(engine=engine, pool_id=pool_id) == 2 + + +def test_deduct_credits_capped_reraises_quota_exceeded_errors() -> None: + engine, tenant_id, pool_id = _create_engine_with_pool(quota_limit=10, quota_used=2) + + with ( + patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)), + patch.object(CreditPoolService, "_get_locked_pool", side_effect=QuotaExceededError("quota unavailable")), + pytest.raises(QuotaExceededError, match="quota unavailable"), + ): + CreditPoolService.deduct_credits_capped(tenant_id=tenant_id, credits_required=1) + + assert _get_quota_used(engine=engine, pool_id=pool_id) == 2 diff --git a/api/tests/unit_tests/services/test_dataset_service_lock_not_owned.py b/api/tests/unit_tests/services/test_dataset_service_lock_not_owned.py index 9a513c3fe6..f5879d973d 100644 --- a/api/tests/unit_tests/services/test_dataset_service_lock_not_owned.py +++ b/api/tests/unit_tests/services/test_dataset_service_lock_not_owned.py @@ -22,7 +22,7 @@ class FakeLock: @pytest.fixture -def fake_current_user(monkeypatch): +def fake_current_user(monkeypatch: pytest.MonkeyPatch): user = create_autospec(Account, instance=True) user.id = "user-1" user.current_tenant_id = "tenant-1" @@ -31,7 +31,7 @@ def fake_current_user(monkeypatch): @pytest.fixture -def fake_features(monkeypatch): +def fake_features(monkeypatch: pytest.MonkeyPatch): """Features.billing.enabled == False to skip quota logic.""" features = types.SimpleNamespace( billing=types.SimpleNamespace(enabled=False, subscription=types.SimpleNamespace(plan="ENTERPRISE")), @@ -45,7 +45,7 @@ def fake_features(monkeypatch): @pytest.fixture -def fake_lock(monkeypatch): +def fake_lock(monkeypatch: pytest.MonkeyPatch): """Patch redis_client.lock to always raise LockNotOwnedError on enter.""" def _fake_lock(name, timeout=None, *args, **kwargs): @@ -61,7 +61,7 @@ def fake_lock(monkeypatch): def test_save_document_with_dataset_id_ignores_lock_not_owned( - monkeypatch, + monkeypatch: pytest.MonkeyPatch, fake_current_user, fake_features, fake_lock, @@ -118,7 +118,7 @@ def test_save_document_with_dataset_id_ignores_lock_not_owned( def test_add_segment_ignores_lock_not_owned( - monkeypatch, + monkeypatch: pytest.MonkeyPatch, fake_current_user, fake_lock, ): @@ -161,7 +161,7 @@ def test_add_segment_ignores_lock_not_owned( def test_multi_create_segment_ignores_lock_not_owned( - monkeypatch, + monkeypatch: pytest.MonkeyPatch, fake_current_user, fake_lock, ): diff --git a/api/tests/unit_tests/services/test_human_input_service.py b/api/tests/unit_tests/services/test_human_input_service.py index 55af564821..9fc818f789 100644 --- a/api/tests/unit_tests/services/test_human_input_service.py +++ b/api/tests/unit_tests/services/test_human_input_service.py @@ -3,6 +3,7 @@ from datetime import datetime, timedelta from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture import services.human_input_service as human_input_service_module from core.repositories.human_input_repository import ( @@ -177,7 +178,9 @@ def test_get_form_definition_by_token_for_console_uses_repository(sample_form_re assert form.get_definition() == console_record.definition -def test_submit_form_by_token_calls_repository_and_enqueue(sample_form_record, mock_session_factory, mocker): +def test_submit_form_by_token_calls_repository_and_enqueue( + sample_form_record, mock_session_factory, mocker: MockerFixture +): session_factory, _ = mock_session_factory repo = MagicMock(spec=HumanInputFormSubmissionRepository) repo.get_by_token.return_value = sample_form_record @@ -204,7 +207,9 @@ def test_submit_form_by_token_calls_repository_and_enqueue(sample_form_record, m enqueue_spy.assert_called_once_with(sample_form_record.workflow_run_id) -def test_submit_form_by_token_skips_enqueue_for_delivery_test(sample_form_record, mock_session_factory, mocker): +def test_submit_form_by_token_skips_enqueue_for_delivery_test( + sample_form_record, mock_session_factory, mocker: MockerFixture +): session_factory, _ = mock_session_factory repo = MagicMock(spec=HumanInputFormSubmissionRepository) test_record = dataclasses.replace( @@ -227,7 +232,9 @@ def test_submit_form_by_token_skips_enqueue_for_delivery_test(sample_form_record enqueue_spy.assert_not_called() -def test_submit_form_by_token_passes_submission_user_id(sample_form_record, mock_session_factory, mocker): +def test_submit_form_by_token_passes_submission_user_id( + sample_form_record, mock_session_factory, mocker: MockerFixture +): session_factory, _ = mock_session_factory repo = MagicMock(spec=HumanInputFormSubmissionRepository) repo.get_by_token.return_value = sample_form_record @@ -314,7 +321,7 @@ def test_form_submitted_error_init(): assert error.code == 412 -def test_human_input_service_init_with_engine(mocker): +def test_human_input_service_init_with_engine(mocker: MockerFixture): engine = MagicMock(spec=human_input_service_module.Engine) sessionmaker_mock = mocker.patch("services.human_input_service.sessionmaker") @@ -371,7 +378,7 @@ def test_submit_form_by_token_delivery_not_enabled(mock_session_factory): service.submit_form_by_token(RecipientType.STANDALONE_WEB_APP, "token", "action", {}) -def test_submit_form_by_token_no_workflow_run_id(sample_form_record, mock_session_factory, mocker): +def test_submit_form_by_token_no_workflow_run_id(sample_form_record, mock_session_factory, mocker: MockerFixture): session_factory, _ = mock_session_factory repo = MagicMock(spec=HumanInputFormSubmissionRepository) repo.get_by_token.return_value = sample_form_record diff --git a/api/tests/unit_tests/services/test_message_service.py b/api/tests/unit_tests/services/test_message_service.py index 7adc15d63e..51f8b3ef5b 100644 --- a/api/tests/unit_tests/services/test_message_service.py +++ b/api/tests/unit_tests/services/test_message_service.py @@ -906,7 +906,7 @@ class TestMessageServiceSuggestedQuestions: ): """Test successful suggested questions generation in basic Chat mode.""" # Arrange - app = factory.create_app_mock(mode=AppMode.CHAT.value) + app = factory.create_app_mock(mode=AppMode.CHAT) user = factory.create_end_user_mock() message = factory.create_message_mock() mock_get_message.return_value = message @@ -953,7 +953,7 @@ class TestMessageServiceSuggestedQuestions: """Test suggested question generation uses frontend configured model and prompt.""" from core.app.entities.app_invoke_entities import InvokeFrom - app = factory.create_app_mock(mode=AppMode.CHAT.value) + app = factory.create_app_mock(mode=AppMode.CHAT) app.tenant_id = "tenant-123" user = factory.create_end_user_mock() message = factory.create_message_mock() @@ -1024,7 +1024,7 @@ class TestMessageServiceSuggestedQuestions: factory, ): """Test invalid frontend configured model falls back to tenant default model.""" - app = factory.create_app_mock(mode=AppMode.CHAT.value) + app = factory.create_app_mock(mode=AppMode.CHAT) app.tenant_id = "tenant-123" user = factory.create_end_user_mock() message = factory.create_message_mock() diff --git a/api/tests/unit_tests/services/test_model_load_balancing_service.py b/api/tests/unit_tests/services/test_model_load_balancing_service.py index 3119af40a2..beecf73caa 100644 --- a/api/tests/unit_tests/services/test_model_load_balancing_service.py +++ b/api/tests/unit_tests/services/test_model_load_balancing_service.py @@ -104,7 +104,7 @@ def test_enable_disable_model_load_balancing_should_call_provider_configuration_ service.provider_manager.get_configurations.return_value = {"openai": provider_configuration} # Act - getattr(service, method_name)("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM.value) + getattr(service, method_name)("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM) # Assert getattr(provider_configuration, expected_provider_method).assert_called_once_with( @@ -125,7 +125,7 @@ def test_enable_disable_model_load_balancing_should_raise_value_error_when_provi # Act + Assert with pytest.raises(ValueError, match="Provider openai does not exist"): - getattr(service, method_name)("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM.value) + getattr(service, method_name)("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM) def test_get_load_balancing_configs_should_raise_value_error_when_provider_missing( @@ -136,7 +136,7 @@ def test_get_load_balancing_configs_should_raise_value_error_when_provider_missi # Act + Assert with pytest.raises(ValueError, match="Provider openai does not exist"): - service.get_load_balancing_configs("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM.value) + service.get_load_balancing_configs("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM) def test_get_load_balancing_configs_should_insert_inherit_config_when_missing_for_custom_provider( @@ -177,7 +177,7 @@ def test_get_load_balancing_configs_should_insert_inherit_config_when_missing_fo "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, ) # Assert @@ -238,7 +238,7 @@ def test_get_load_balancing_configs_should_reorder_existing_inherit_and_tolerate "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, config_from="predefined-model", ) @@ -259,7 +259,7 @@ def test_get_load_balancing_config_should_raise_value_error_when_provider_missin # Act + Assert with pytest.raises(ValueError, match="Provider openai does not exist"): - service.get_load_balancing_config("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM.value, "cfg-1") + service.get_load_balancing_config("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM, "cfg-1") def test_get_load_balancing_config_should_return_none_when_config_not_found( @@ -272,7 +272,7 @@ def test_get_load_balancing_config_should_return_none_when_config_not_found( mock_db.session.scalar.return_value = None # Act - result = service.get_load_balancing_config("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM.value, "cfg-1") + result = service.get_load_balancing_config("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM, "cfg-1") # Assert assert result is None @@ -292,7 +292,7 @@ def test_get_load_balancing_config_should_return_obfuscated_payload_when_config_ mock_db.session.scalar.return_value = config # Act - result = service.get_load_balancing_config("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM.value, "cfg-1") + result = service.get_load_balancing_config("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM, "cfg-1") # Assert assert result == { @@ -335,7 +335,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_provider_mi "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [], "custom-model", ) @@ -354,7 +354,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_configs_is_ "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, cast(list[dict[str, object]], "invalid-configs"), "custom-model", ) @@ -375,7 +375,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_config_item "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, cast(list[dict[str, object]], ["bad-item"]), "custom-model", ) @@ -397,7 +397,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_credential_ "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"credential_id": "cred-1", "enabled": True}], "predefined-model", ) @@ -418,7 +418,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_name_or_ena "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"enabled": True}], "custom-model", ) @@ -428,7 +428,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_name_or_ena "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"name": "cfg-without-enabled"}], "custom-model", ) @@ -450,7 +450,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_existing_co "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"id": "cfg-2", "name": "invalid", "enabled": True}], "custom-model", ) @@ -472,7 +472,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_credentials "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"id": "cfg-1", "name": "new", "enabled": True, "credentials": "bad"}], "custom-model", ) @@ -482,7 +482,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_credentials "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"name": "new-config", "enabled": True, "credentials": "bad"}], "custom-model", ) @@ -519,7 +519,7 @@ def test_update_load_balancing_configs_should_update_existing_create_new_and_del "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [ {"id": "cfg-1", "name": "updated-name", "enabled": False, "credentials": {"api_key": "plain"}}, {"name": "new-config", "enabled": True, "credentials": {"api_key": "plain"}}, @@ -553,7 +553,7 @@ def test_update_load_balancing_configs_should_raise_value_error_for_invalid_new_ "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"name": "__inherit__", "enabled": True, "credentials": {"api_key": "x"}}], "custom-model", ) @@ -563,7 +563,7 @@ def test_update_load_balancing_configs_should_raise_value_error_for_invalid_new_ "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"name": "new", "enabled": True}], "custom-model", ) @@ -585,7 +585,7 @@ def test_update_load_balancing_configs_should_create_from_existing_provider_cred "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"credential_id": "cred-1", "enabled": True}], "predefined-model", ) @@ -611,7 +611,7 @@ def test_validate_load_balancing_credentials_should_raise_value_error_when_provi "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, {"api_key": "plain"}, ) @@ -631,7 +631,7 @@ def test_validate_load_balancing_credentials_should_raise_value_error_when_confi "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, {"api_key": "plain"}, config_id="cfg-1", ) @@ -654,7 +654,7 @@ def test_validate_load_balancing_credentials_should_delegate_to_custom_validate_ "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, {"api_key": "plain"}, config_id="cfg-1", ) @@ -662,7 +662,7 @@ def test_validate_load_balancing_credentials_should_delegate_to_custom_validate_ "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, {"api_key": "plain"}, ) diff --git a/api/tests/unit_tests/services/test_model_provider_service.py b/api/tests/unit_tests/services/test_model_provider_service.py index 28d459eac9..9e4eeb2d6e 100644 --- a/api/tests/unit_tests/services/test_model_provider_service.py +++ b/api/tests/unit_tests/services/test_model_provider_service.py @@ -90,7 +90,7 @@ class TestModelProviderServiceConfiguration: ) manager.get_configurations.return_value = {"openai": allowed, "embedding": filtered} - result = service.get_provider_list(tenant_id="tenant-1", model_type=ModelType.LLM.value) + result = service.get_provider_list(tenant_id="tenant-1", model_type=ModelType.LLM) assert len(result) == 1 assert result[0].provider == "openai" @@ -232,7 +232,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credential_id": "cred-1", }, @@ -245,7 +245,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credentials": {"api_key": "x"}, }, @@ -258,7 +258,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credentials": {"api_key": "x"}, "credential_name": "cred-a", @@ -277,7 +277,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credentials": {"api_key": "x"}, "credential_id": "cred-1", @@ -298,7 +298,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credential_id": "cred-1", }, @@ -311,7 +311,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credential_id": "cred-1", }, @@ -324,7 +324,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credential_id": "cred-1", }, @@ -337,7 +337,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", }, "delete_custom_model", @@ -425,7 +425,7 @@ class TestModelProviderServiceListingsAndDefaults: provider_configurations = SimpleNamespace(get_models=MagicMock(return_value=models)) manager.get_configurations.return_value = provider_configurations - result = service.get_models_by_model_type(tenant_id="tenant-1", model_type=ModelType.LLM.value) + result = service.get_models_by_model_type(tenant_id="tenant-1", model_type=ModelType.LLM) provider_configurations.get_models.assert_called_once_with(model_type=ModelType.LLM, only_active=True) assert len(result) == 1 @@ -495,7 +495,7 @@ class TestModelProviderServiceListingsAndDefaults: ), ) - result = service.get_default_model_of_model_type(tenant_id="tenant-1", model_type=ModelType.LLM.value) + result = service.get_default_model_of_model_type(tenant_id="tenant-1", model_type=ModelType.LLM) assert result is not None assert result.model == "gpt-4o" @@ -506,7 +506,7 @@ class TestModelProviderServiceListingsAndDefaults: service, manager = _create_service_with_mocked_manager() manager.get_default_model.return_value = None - result = service.get_default_model_of_model_type(tenant_id="tenant-1", model_type=ModelType.LLM.value) + result = service.get_default_model_of_model_type(tenant_id="tenant-1", model_type=ModelType.LLM) assert result is None @@ -514,7 +514,7 @@ class TestModelProviderServiceListingsAndDefaults: service, manager = _create_service_with_mocked_manager() manager.get_default_model.side_effect = RuntimeError("boom") - result = service.get_default_model_of_model_type(tenant_id="tenant-1", model_type=ModelType.LLM.value) + result = service.get_default_model_of_model_type(tenant_id="tenant-1", model_type=ModelType.LLM) assert result is None @@ -523,7 +523,7 @@ class TestModelProviderServiceListingsAndDefaults: service.update_default_model_of_model_type( tenant_id="tenant-1", - model_type=ModelType.LLM.value, + model_type=ModelType.LLM, provider="openai", model="gpt-4o", ) @@ -593,7 +593,7 @@ class TestModelProviderServiceListingsAndDefaults: tenant_id="tenant-1", provider="openai", model="gpt-4o", - model_type=ModelType.LLM.value, + model_type=ModelType.LLM, ) getattr(provider_configuration, provider_method_name).assert_called_once_with( diff --git a/api/tests/unit_tests/services/test_trigger_provider_service.py b/api/tests/unit_tests/services/test_trigger_provider_service.py index 6eba60e5f1..4da4af2d93 100644 --- a/api/tests/unit_tests/services/test_trigger_provider_service.py +++ b/api/tests/unit_tests/services/test_trigger_provider_service.py @@ -325,7 +325,7 @@ def test_update_trigger_subscription_should_raise_error_when_name_conflicts( id="sub-1", name="old", provider_id="langgenius/github/github", - credential_type=CredentialType.API_KEY.value, + credential_type=CredentialType.API_KEY, ) mock_session.scalar.side_effect = [subscription, object()] # found sub, name conflict _mock_get_trigger_provider(mocker, provider_controller) @@ -350,7 +350,7 @@ def test_update_trigger_subscription_should_update_fields_and_clear_cache( properties={"project": "enc-old"}, parameters={"event": "old"}, credentials={"api_key": "enc-old"}, - credential_type=CredentialType.API_KEY.value, + credential_type=CredentialType.API_KEY, credential_expires_at=0, expires_at=0, ) @@ -456,7 +456,7 @@ def test_delete_trigger_provider_should_delete_and_clear_cache_even_if_unsubscri id="sub-1", user_id="user-1", provider_id=str(provider_id), - credential_type=CredentialType.OAUTH2.value, + credential_type=CredentialType.OAUTH2, credentials={"token": "enc"}, to_entity=lambda: SimpleNamespace(id="sub-1"), ) @@ -492,7 +492,7 @@ def test_delete_trigger_provider_should_skip_unsubscribe_for_unauthorized( id="sub-2", user_id="user-1", provider_id=str(provider_id), - credential_type=CredentialType.UNAUTHORIZED.value, + credential_type=CredentialType.UNAUTHORIZED, credentials={}, to_entity=lambda: SimpleNamespace(id="sub-2"), ) @@ -527,7 +527,7 @@ def test_refresh_oauth_token_should_raise_error_for_non_oauth_credentials( mocker: MockerFixture, mock_session: MagicMock ) -> None: # Arrange - subscription = SimpleNamespace(credential_type=CredentialType.API_KEY.value) + subscription = SimpleNamespace(credential_type=CredentialType.API_KEY) mock_session.scalar.return_value = subscription # Act + Assert @@ -545,7 +545,7 @@ def test_refresh_oauth_token_should_refresh_and_persist_new_credentials( subscription = SimpleNamespace( provider_id=str(provider_id), user_id="user-1", - credential_type=CredentialType.OAUTH2.value, + credential_type=CredentialType.OAUTH2, credentials={"access_token": "enc"}, credential_expires_at=0, ) @@ -613,7 +613,7 @@ def test_refresh_subscription_should_refresh_and_persist_properties( parameters={"event": "push"}, properties={"p": "enc"}, credentials={"c": "enc"}, - credential_type=CredentialType.API_KEY.value, + credential_type=CredentialType.API_KEY, ) mock_session.scalar.return_value = subscription _mock_get_trigger_provider(mocker, provider_controller) @@ -989,7 +989,7 @@ def test_verify_subscription_credentials_should_raise_when_api_key_validation_fa provider_controller: MagicMock, ) -> None: # Arrange - subscription = SimpleNamespace(credential_type=CredentialType.API_KEY.value, credentials={"api_key": "old"}) + subscription = SimpleNamespace(credential_type=CredentialType.API_KEY, credentials={"api_key": "old"}) _mock_get_trigger_provider(mocker, provider_controller) mocker.patch.object(TriggerProviderService, "get_subscription_by_id", return_value=subscription) provider_controller.validate_credentials.side_effect = RuntimeError("bad credentials") @@ -1012,7 +1012,7 @@ def test_verify_subscription_credentials_should_return_verified_when_api_key_val provider_controller: MagicMock, ) -> None: # Arrange - subscription = SimpleNamespace(credential_type=CredentialType.API_KEY.value, credentials={"api_key": "old"}) + subscription = SimpleNamespace(credential_type=CredentialType.API_KEY, credentials={"api_key": "old"}) _mock_get_trigger_provider(mocker, provider_controller) mocker.patch.object(TriggerProviderService, "get_subscription_by_id", return_value=subscription) @@ -1036,7 +1036,7 @@ def test_verify_subscription_credentials_should_return_verified_for_non_api_key_ provider_controller: MagicMock, ) -> None: # Arrange - subscription = SimpleNamespace(credential_type=CredentialType.OAUTH2.value, credentials={}) + subscription = SimpleNamespace(credential_type=CredentialType.OAUTH2, credentials={}) _mock_get_trigger_provider(mocker, provider_controller) mocker.patch.object(TriggerProviderService, "get_subscription_by_id", return_value=subscription) @@ -1100,7 +1100,7 @@ def test_rebuild_trigger_subscription_should_raise_for_unsupported_credential_ty provider_controller: MagicMock, ) -> None: # Arrange - subscription = SimpleNamespace(credential_type=CredentialType.UNAUTHORIZED.value) + subscription = SimpleNamespace(credential_type=CredentialType.UNAUTHORIZED) _mock_get_trigger_provider(mocker, provider_controller) mocker.patch.object(TriggerProviderService, "get_subscription_by_id", return_value=subscription) @@ -1126,7 +1126,7 @@ def test_rebuild_trigger_subscription_should_raise_when_unsubscribe_fails( id="sub-1", user_id="user-1", endpoint_id="endpoint-1", - credential_type=CredentialType.API_KEY.value, + credential_type=CredentialType.API_KEY, credentials={"api_key": "old"}, to_entity=lambda: SimpleNamespace(id="sub-1"), ) @@ -1159,7 +1159,7 @@ def test_rebuild_trigger_subscription_should_resubscribe_and_update_existing_sub id="sub-1", user_id="user-1", endpoint_id="endpoint-1", - credential_type=CredentialType.API_KEY.value, + credential_type=CredentialType.API_KEY, credentials={"api_key": "old-key"}, to_entity=lambda: SimpleNamespace(id="sub-1"), ) diff --git a/api/tests/unit_tests/services/test_webhook_service.py b/api/tests/unit_tests/services/test_webhook_service.py index ffdcc046f9..a2b56fe777 100644 --- a/api/tests/unit_tests/services/test_webhook_service.py +++ b/api/tests/unit_tests/services/test_webhook_service.py @@ -140,7 +140,7 @@ class TestWebhookServiceUnit: assert args[1] == "text/plain" assert args[2] is webhook_trigger - def test_detect_binary_mimetype_uses_magic(self, monkeypatch): + def test_detect_binary_mimetype_uses_magic(self, monkeypatch: pytest.MonkeyPatch): """python-magic output should be used when available.""" fake_magic = MagicMock() fake_magic.from_buffer.return_value = "image/png" @@ -151,7 +151,7 @@ class TestWebhookServiceUnit: assert result == "image/png" fake_magic.from_buffer.assert_called_once() - def test_detect_binary_mimetype_fallback_without_magic(self, monkeypatch): + def test_detect_binary_mimetype_fallback_without_magic(self, monkeypatch: pytest.MonkeyPatch): """Fallback MIME type should be used when python-magic is unavailable.""" monkeypatch.setattr("services.trigger.webhook_service.magic", None) @@ -159,7 +159,7 @@ class TestWebhookServiceUnit: assert result == "application/octet-stream" - def test_detect_binary_mimetype_handles_magic_exception(self, monkeypatch): + def test_detect_binary_mimetype_handles_magic_exception(self, monkeypatch: pytest.MonkeyPatch): """Fallback MIME type should be used when python-magic raises an exception.""" try: import magic as real_magic @@ -268,8 +268,8 @@ class TestWebhookServiceUnit: } # Mock file reads - files["file1"].read.return_value = b"content1" - files["file2"].read.return_value = b"content2" + files["file1"].stream.read.return_value = b"content1" + files["file2"].stream.read.return_value = b"content2" webhook_trigger = MagicMock() webhook_trigger.tenant_id = "test_tenant" @@ -304,8 +304,8 @@ class TestWebhookServiceUnit: "bad_file": MagicMock(filename="test.bad", content_type="text/plain"), } - files["good_file"].read.return_value = b"content" - files["bad_file"].read.side_effect = Exception("Read error") + files["good_file"].stream.read.return_value = b"content" + files["bad_file"].stream.read.side_effect = Exception("Read error") webhook_trigger = MagicMock() webhook_trigger.tenant_id = "test_tenant" diff --git a/api/tests/unit_tests/services/test_workflow_service.py b/api/tests/unit_tests/services/test_workflow_service.py index feafada59a..e152ab923c 100644 --- a/api/tests/unit_tests/services/test_workflow_service.py +++ b/api/tests/unit_tests/services/test_workflow_service.py @@ -11,6 +11,7 @@ This test suite covers: import json import uuid +from types import SimpleNamespace from typing import Any, cast from unittest.mock import ANY, MagicMock, Mock, patch, sentinel @@ -61,7 +62,7 @@ class TestWorkflowAssociatedDataFactory: def create_app_mock( app_id: str = "app-123", tenant_id: str = "tenant-456", - mode: str = AppMode.WORKFLOW.value, + mode: str = AppMode.WORKFLOW, workflow_id: str | None = None, **kwargs, ) -> MagicMock: @@ -93,7 +94,7 @@ class TestWorkflowAssociatedDataFactory: tenant_id: str = "tenant-456", app_id: str = "app-123", version: str = Workflow.VERSION_DRAFT, - workflow_type: str = WorkflowType.WORKFLOW.value, + workflow_type: str = WorkflowType.WORKFLOW, graph: dict[str, Any] | None = None, features: dict[str, Any] | None = None, unique_hash: str | None = None, @@ -584,7 +585,7 @@ class TestWorkflowService: id="published-workflow-id", tenant_id=app.tenant_id, app_id=app.id, - type=WorkflowType.WORKFLOW.value, + type=WorkflowType.WORKFLOW, version="2026-03-19T00:00:00", graph=json.dumps(TestWorkflowAssociatedDataFactory.create_valid_workflow_graph()), features=json.dumps(legacy_features), @@ -597,7 +598,7 @@ class TestWorkflowService: id="draft-workflow-id", tenant_id=app.tenant_id, app_id=app.id, - type=WorkflowType.WORKFLOW.value, + type=WorkflowType.WORKFLOW, version=Workflow.VERSION_DRAFT, graph=json.dumps({"nodes": [], "edges": []}), features=json.dumps({}), @@ -685,7 +686,7 @@ class TestWorkflowService: Different app modes have different feature configurations. This ensures the features match the expected schema for workflow apps. """ - app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW.value) + app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW) features = {"file_upload": {"enabled": False}} with patch("services.workflow_service.WorkflowAppConfigManager.config_validate") as mock_validate: @@ -696,7 +697,7 @@ class TestWorkflowService: def test_validate_features_structure_advanced_chat_mode(self, workflow_service): """Test validate_features_structure for advanced chat mode.""" - app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.ADVANCED_CHAT.value) + app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.ADVANCED_CHAT) features = {"opening_statement": "Hello"} with patch("services.workflow_service.AdvancedChatAppConfigManager.config_validate") as mock_validate: @@ -707,7 +708,7 @@ class TestWorkflowService: def test_validate_features_structure_invalid_mode_raises_error(self, workflow_service): """Test validate_features_structure raises error for invalid mode.""" - app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.COMPLETION.value) + app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.COMPLETION) features = {} with pytest.raises(ValueError, match="Invalid app mode"): @@ -1326,7 +1327,7 @@ class TestWorkflowService: The conversion creates equivalent workflow nodes from the chat configuration, giving users more control and customization options. """ - app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.CHAT.value) + app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.CHAT) account = TestWorkflowAssociatedDataFactory.create_account_mock() args = { "name": "Converted Workflow", @@ -1337,7 +1338,7 @@ class TestWorkflowService: with patch("services.workflow_service.WorkflowConverter") as MockConverter: mock_converter = MockConverter.return_value - mock_new_app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW.value) + mock_new_app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW) mock_converter.convert_to_workflow.return_value = mock_new_app result = workflow_service.convert_to_workflow(app, account, args) @@ -1353,13 +1354,13 @@ class TestWorkflowService: Completion apps are simpler (single prompt-response), so the conversion creates a basic workflow with fewer nodes. """ - app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.COMPLETION.value) + app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.COMPLETION) account = TestWorkflowAssociatedDataFactory.create_account_mock() args = {"name": "Converted Workflow"} with patch("services.workflow_service.WorkflowConverter") as MockConverter: mock_converter = MockConverter.return_value - mock_new_app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW.value) + mock_new_app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW) mock_converter.convert_to_workflow.return_value = mock_new_app result = workflow_service.convert_to_workflow(app, account, args) @@ -1373,7 +1374,7 @@ class TestWorkflowService: Only chat and completion apps can be converted to workflows. Apps that are already workflows or have other modes cannot be converted. """ - app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW.value) + app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW) account = TestWorkflowAssociatedDataFactory.create_account_mock() args = {} @@ -2087,7 +2088,7 @@ class TestSetupVariablePool: This helper initialises the VariablePool used for single-step workflow execution. """ - def _make_workflow(self, workflow_type: str = WorkflowType.WORKFLOW.value) -> MagicMock: + def _make_workflow(self, workflow_type: str = WorkflowType.WORKFLOW) -> MagicMock: wf = MagicMock(spec=Workflow) wf.app_id = "app-1" wf.id = "wf-1" @@ -2176,7 +2177,7 @@ class TestSetupVariablePool: from models.workflow import WorkflowType # Arrange - workflow = self._make_workflow(workflow_type=WorkflowType.CHAT.value) + workflow = self._make_workflow(workflow_type=WorkflowType.CHAT) # Act with ( @@ -2649,7 +2650,12 @@ class TestWorkflowServiceHumanInputOperations: mock_node = MagicMock() mock_node.node_data = MagicMock() + mock_node.node_data.user_actions = [ + SimpleNamespace(id="submit", title="card_visa_enterprise_001"), + ] mock_node.node_data.outputs_field_names.return_value = ["field1"] + mock_node.render_form_content_before_submission.return_value = "Ticket: {{#$output.field1#}}" + mock_node.render_form_content_with_outputs.return_value = "Ticket: val1" with ( patch("services.workflow_service.db"), @@ -2665,6 +2671,8 @@ class TestWorkflowServiceHumanInputOperations: app_model=app_model, account=account, node_id="node-1", form_inputs={"field1": "val1"}, action="submit" ) assert result["__action_id"] == "submit" + assert result["__action_value"] == "card_visa_enterprise_001" + assert result["__rendered_content"] == "Ticket: val1" mock_saver_cls.return_value.save.assert_called_once() def test_test_human_input_delivery_success(self, service: WorkflowService) -> None: @@ -2845,7 +2853,7 @@ class TestWorkflowServiceFreeNodeExecution: mock_node_cls.validate_node_data.assert_called_once_with(sentinel.adapted_node_data) mock_node_cls.assert_called_once_with( node_id="n-1", - config=sentinel.node_data, + data=sentinel.node_data, graph_init_params=mock_graph_init_context_cls.return_value.to_graph_init_params.return_value, graph_runtime_state=ANY, runtime=mock_runtime_cls.return_value, diff --git a/api/tests/unit_tests/services/tools/test_builtin_tools_manage_service.py b/api/tests/unit_tests/services/tools/test_builtin_tools_manage_service.py index ce0d94398d..c210db580e 100644 --- a/api/tests/unit_tests/services/tools/test_builtin_tools_manage_service.py +++ b/api/tests/unit_tests/services/tools/test_builtin_tools_manage_service.py @@ -180,7 +180,7 @@ class TestSetDefaultProvider: session.scalar.return_value = None with pytest.raises(ValueError, match="provider not found"): - BuiltinToolManageService.set_default_provider("t", "u", "p", "id") + BuiltinToolManageService.set_default_provider("t", "p", "id") @patch(f"{MODULE}.sessionmaker") @patch(f"{MODULE}.db") @@ -189,11 +189,29 @@ class TestSetDefaultProvider: target = MagicMock() session.scalar.return_value = target - result = BuiltinToolManageService.set_default_provider("t", "u", "p", "id") + result = BuiltinToolManageService.set_default_provider("t", "p", "id") assert result == {"result": "success"} assert target.is_default is True + @patch(f"{MODULE}.sessionmaker") + @patch(f"{MODULE}.db") + def test_clear_default_is_tenant_scoped_not_user_scoped(self, mock_db, mock_sm_cls): + # Regression: clearing prior defaults must NOT filter by user_id, otherwise + # two workspace members can each leave their own credential as default at + # the same time (the default flag is tenant-scoped, not per-user). + session = _mock_sessionmaker(mock_sm_cls) + session.scalar.return_value = MagicMock() + + BuiltinToolManageService.set_default_provider("tenant-1", "google", "cred-id") + + session.execute.assert_called_once() + update_stmt = session.execute.call_args.args[0] + compiled = str(update_stmt.compile(compile_kwargs={"literal_binds": True})) + assert "user_id" not in compiled + assert "tenant_id" in compiled + assert "provider" in compiled + class TestUpdateBuiltinToolProvider: @patch(f"{MODULE}.sessionmaker") diff --git a/api/tests/unit_tests/services/workflow/test_draft_var_loader_simple.py b/api/tests/unit_tests/services/workflow/test_draft_var_loader_simple.py index 497c26a9b3..fb5cf7bc6e 100644 --- a/api/tests/unit_tests/services/workflow/test_draft_var_loader_simple.py +++ b/api/tests/unit_tests/services/workflow/test_draft_var_loader_simple.py @@ -33,42 +33,6 @@ class TestDraftVarLoaderSimple: fallback_variables=[], ) - def test_load_offloaded_variable_string_type_unit(self, draft_var_loader): - """Test _load_offloaded_variable with string type - isolated unit test.""" - # Create mock objects - upload_file = Mock(spec=UploadFile) - upload_file.key = "storage/key/test.txt" - - variable_file = Mock(spec=WorkflowDraftVariableFile) - variable_file.value_type = SegmentType.STRING - variable_file.upload_file = upload_file - - draft_var = Mock(spec=WorkflowDraftVariable) - draft_var.id = "draft-var-id" - draft_var.node_id = "test-node-id" - draft_var.name = "test_variable" - draft_var.description = "test description" - draft_var.get_selector.return_value = ["test-node-id", "test_variable"] - draft_var.variable_file = variable_file - - test_content = "This is the full string content" - - with patch("services.workflow_draft_variable_service.storage") as mock_storage: - mock_storage.load.return_value = test_content.encode() - - # Execute the method - selector_tuple, variable = draft_var_loader._load_offloaded_variable(draft_var) - - # Verify results - assert selector_tuple == ("test-node-id", "test_variable") - assert variable.id == "draft-var-id" - assert variable.name == "test_variable" - assert variable.description == "test description" - assert variable.value == test_content - - # Verify storage was called correctly - mock_storage.load.assert_called_once_with("storage/key/test.txt") - def test_load_offloaded_variable_object_type_unit(self, draft_var_loader): """Test _load_offloaded_variable with object type - isolated unit test.""" # Create mock objects @@ -139,47 +103,6 @@ class TestDraftVarLoaderSimple: result = draft_var_loader._selector_to_tuple(selector) assert result == ("node_id", "var_name") - def test_load_offloaded_variable_number_type_unit(self, draft_var_loader): - """Test _load_offloaded_variable with number type - isolated unit test.""" - # Create mock objects - upload_file = Mock(spec=UploadFile) - upload_file.key = "storage/key/test_number.json" - - variable_file = Mock(spec=WorkflowDraftVariableFile) - variable_file.value_type = SegmentType.NUMBER - variable_file.upload_file = upload_file - - draft_var = Mock(spec=WorkflowDraftVariable) - draft_var.id = "draft-var-id" - draft_var.node_id = "test-node-id" - draft_var.name = "test_number" - draft_var.description = "test number description" - draft_var.get_selector.return_value = ["test-node-id", "test_number"] - draft_var.variable_file = variable_file - - test_number = 123.45 - test_json_content = json.dumps(test_number) - - with patch("services.workflow_draft_variable_service.storage") as mock_storage: - mock_storage.load.return_value = test_json_content.encode() - from graphon.variables.segments import FloatSegment - - mock_segment = FloatSegment(value=test_number) - draft_var.build_segment_from_serialized_value.return_value = mock_segment - - # Execute the method - selector_tuple, variable = draft_var_loader._load_offloaded_variable(draft_var) - - # Verify results - assert selector_tuple == ("test-node-id", "test_number") - assert variable.id == "draft-var-id" - assert variable.name == "test_number" - assert variable.description == "test number description" - - # Verify method calls - mock_storage.load.assert_called_once_with("storage/key/test_number.json") - draft_var.build_segment_from_serialized_value.assert_called_once_with(SegmentType.NUMBER, test_number) - def test_load_offloaded_variable_array_type_unit(self, draft_var_loader): """Test _load_offloaded_variable with array type - isolated unit test.""" # Create mock objects @@ -229,12 +152,13 @@ class TestDraftVarLoaderSimple: variable_file.value_type = SegmentType.FILE variable_file.upload_file = upload_file - draft_var = WorkflowDraftVariable() - draft_var.id = "draft-var-id" - draft_var.app_id = "app-1" - draft_var.node_id = "test-node-id" - draft_var.name = "test_file" - draft_var.description = "test file description" + draft_var = WorkflowDraftVariable( + id="draft-var-id", + app_id="app-1", + node_id="test-node-id", + name="test_file", + description="test file description", + ) draft_var._set_selector(["test-node-id", "test_file"]) draft_var.variable_file = variable_file diff --git a/api/tests/unit_tests/services/workflow/test_workflow_draft_variable_service.py b/api/tests/unit_tests/services/workflow/test_workflow_draft_variable_service.py index b14d767568..b5b9f0bd97 100644 --- a/api/tests/unit_tests/services/workflow/test_workflow_draft_variable_service.py +++ b/api/tests/unit_tests/services/workflow/test_workflow_draft_variable_service.py @@ -200,7 +200,7 @@ class TestDraftVariableSaver: user=mock_user, ) - def test_draft_saver_with_small_variables(self, draft_saver, mock_session): + def test_draft_saver_with_small_variables(self, draft_saver: DraftVariableSaver, mock_session): with patch( "services.workflow_draft_variable_service.DraftVariableSaver._try_offload_large_variable", autospec=True ) as _mock_try_offload: @@ -212,18 +212,21 @@ class TestDraftVariableSaver: assert draft_var.file_id is None _mock_try_offload.return_value = None - def test_draft_saver_with_large_variables(self, draft_saver, mock_session): + def test_draft_saver_with_large_variables(self, draft_saver: DraftVariableSaver, mock_session): with patch( "services.workflow_draft_variable_service.DraftVariableSaver._try_offload_large_variable", autospec=True ) as _mock_try_offload: mock_segment = StringSegment(value="small value") mock_draft_var_file = WorkflowDraftVariableFile( - id=str(uuidv7()), + tenant_id=str(uuidv7()), + app_id=str(uuidv7()), + user_id=str(uuidv7()), size=1024, length=10, value_type=SegmentType.ARRAY_STRING, - upload_file_id=str(uuid.uuid4()), + upload_file_id=str(uuidv7()), ) + mock_draft_var_file.id = str(uuidv7()) _mock_try_offload.return_value = mock_segment, mock_draft_var_file draft_var = draft_saver._create_draft_variable(name="small_var", value=mock_segment, visible=True) @@ -395,7 +398,7 @@ class TestWorkflowDraftVariableService: self, mock_engine, mock_session, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, ): """Test resetting a node variable when execution record doesn't exist""" mock_repo_session = Mock(spec=Session) @@ -432,7 +435,7 @@ class TestWorkflowDraftVariableService: def test_reset_node_variable_with_valid_execution_record( self, mock_session, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, ): """Test resetting a node variable with valid execution record - should restore from execution""" mock_repo_session = Mock(spec=Session) diff --git a/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service.py b/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service.py index dfdbd9acd6..17e9a077d6 100644 --- a/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service.py +++ b/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service.py @@ -414,8 +414,8 @@ def test_parse_event_message_should_parse_only_json_object( def test_is_terminal_event_should_recognize_finished_and_optional_paused_events() -> None: # Arrange - finished_event = {"event": StreamEvent.WORKFLOW_FINISHED.value} - paused_event = {"event": StreamEvent.WORKFLOW_PAUSED.value} + finished_event = {"event": StreamEvent.WORKFLOW_FINISHED} + paused_event = {"event": StreamEvent.WORKFLOW_PAUSED} # Act is_finished = service_module._is_terminal_event(finished_event, close_on_pause=False) @@ -426,7 +426,7 @@ def test_is_terminal_event_should_recognize_finished_and_optional_paused_events( assert is_finished is True assert paused_without_flag is False assert paused_with_flag is True - assert service_module._is_terminal_event(StreamEvent.PING.value, close_on_pause=True) is False + assert service_module._is_terminal_event(StreamEvent.PING, close_on_pause=True) is False def test_apply_message_context_should_update_payload_when_context_exists() -> None: @@ -569,7 +569,7 @@ def test_build_workflow_event_stream_should_emit_ping_and_terminal_snapshot_even monkeypatch.setattr( service_module, "_build_snapshot_events", - MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED.value, "task_id": "task-1"}]), + MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED, "task_id": "task-1"}]), ) # Act @@ -584,9 +584,9 @@ def test_build_workflow_event_stream_should_emit_ping_and_terminal_snapshot_even ) # Assert - assert events[0] == StreamEvent.PING.value + assert events[0] == StreamEvent.PING finished_event = cast(Mapping[str, Any], events[1]) - assert finished_event["event"] == StreamEvent.WORKFLOW_FINISHED.value + assert finished_event["event"] == StreamEvent.WORKFLOW_FINISHED assert buffer_state.stop_event.is_set() is True node_repo.get_execution_snapshots_by_workflow_run.assert_called_once() called_kwargs = node_repo.get_execution_snapshots_by_workflow_run.call_args.kwargs @@ -643,7 +643,7 @@ def test_build_workflow_event_stream_should_emit_periodic_ping_and_stop_after_id ) # Assert - assert events == [StreamEvent.PING.value, StreamEvent.PING.value] + assert events == [StreamEvent.PING, StreamEvent.PING] assert buffer_state.stop_event.is_set() is True @@ -686,7 +686,7 @@ def test_build_workflow_event_stream_should_exit_when_buffer_done_and_empty( ) # Assert - assert events == [StreamEvent.PING.value] + assert events == [StreamEvent.PING] assert buffer_state.stop_event.is_set() is True @@ -706,7 +706,7 @@ def test_build_workflow_event_stream_should_continue_when_pause_loading_fails( monkeypatch.setattr(service_module.MessageGenerator, "get_response_topic", MagicMock(return_value=topic)) monkeypatch.setattr(service_module, "_load_resumption_context", MagicMock(return_value=None)) monkeypatch.setattr(service_module, "_resolve_task_id", MagicMock(return_value="task-1")) - snapshot_builder = MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED.value}]) + snapshot_builder = MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED}]) monkeypatch.setattr(service_module, "_build_snapshot_events", snapshot_builder) buffer_state = BufferState( queue=queue.Queue(), @@ -729,7 +729,7 @@ def test_build_workflow_event_stream_should_continue_when_pause_loading_fails( ) # Assert - assert events[0] == StreamEvent.PING.value + assert events[0] == StreamEvent.PING assert snapshot_builder.call_args.kwargs["pause_entity"] is None @@ -779,7 +779,7 @@ def test_build_snapshot_events_preserves_public_form_token(monkeypatch: pytest.M session_maker=cast(sessionmaker[Session], session_maker), ) - assert events[-2]["event"] == StreamEvent.HUMAN_INPUT_REQUIRED.value + assert events[-2]["event"] == StreamEvent.HUMAN_INPUT_REQUIRED assert events[-2]["data"]["form_token"] == "wtok" assert events[-2]["data"]["expiration_time"] == int(datetime(2024, 1, 1, tzinfo=UTC).timestamp()) pause_data = events[-1]["data"] @@ -837,6 +837,6 @@ def test_build_workflow_event_stream_loads_pause_tokens_without_flask_app_contex ) pause_event = cast(Mapping[str, Any], events[-1]) - assert pause_event["event"] == StreamEvent.WORKFLOW_PAUSED.value + assert pause_event["event"] == StreamEvent.WORKFLOW_PAUSED assert pause_event["data"]["reasons"][0]["form_token"] == "wtok" assert pause_event["data"]["reasons"][0]["expiration_time"] == int(datetime(2024, 1, 1, tzinfo=UTC).timestamp()) diff --git a/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service_additional.py b/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service_additional.py index d2634d7d7b..4d711f1bf8 100644 --- a/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service_additional.py +++ b/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service_additional.py @@ -215,8 +215,8 @@ class TestWorkflowEventSnapshotHelpers: assert result == expected def test_is_terminal_event_should_recognize_finished_and_optional_paused_events(self) -> None: - finished_event = {"event": StreamEvent.WORKFLOW_FINISHED.value} - paused_event = {"event": StreamEvent.WORKFLOW_PAUSED.value} + finished_event = {"event": StreamEvent.WORKFLOW_FINISHED} + paused_event = {"event": StreamEvent.WORKFLOW_PAUSED} is_finished = service_module._is_terminal_event(finished_event, include_paused=False) paused_without_flag = service_module._is_terminal_event(paused_event, include_paused=False) @@ -225,7 +225,7 @@ class TestWorkflowEventSnapshotHelpers: assert is_finished is True assert paused_without_flag is False assert paused_with_flag is True - assert service_module._is_terminal_event(StreamEvent.PING.value, include_paused=True) is False + assert service_module._is_terminal_event(StreamEvent.PING, include_paused=True) is False def test_apply_message_context_should_update_payload_when_context_exists(self) -> None: payload: dict[str, Any] = {"event": "workflow_started"} @@ -352,7 +352,7 @@ class TestBuildWorkflowEventStream: monkeypatch.setattr( service_module, "_build_snapshot_events", - MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED.value, "task_id": "task-1"}]), + MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED, "task_id": "task-1"}]), ) events = list( @@ -365,9 +365,9 @@ class TestBuildWorkflowEventStream: ) ) - assert events[0] == StreamEvent.PING.value + assert events[0] == StreamEvent.PING finished_event = cast(Mapping[str, Any], events[1]) - assert finished_event["event"] == StreamEvent.WORKFLOW_FINISHED.value + assert finished_event["event"] == StreamEvent.WORKFLOW_FINISHED assert buffer_state.stop_event.is_set() is True node_repo.get_execution_snapshots_by_workflow_run.assert_called_once() called_kwargs = node_repo.get_execution_snapshots_by_workflow_run.call_args.kwargs @@ -421,7 +421,7 @@ class TestBuildWorkflowEventStream: ) ) - assert events == [StreamEvent.PING.value, StreamEvent.PING.value] + assert events == [StreamEvent.PING, StreamEvent.PING] assert buffer_state.stop_event.is_set() is True def test_build_workflow_event_stream_should_exit_when_buffer_done_and_empty( @@ -461,7 +461,7 @@ class TestBuildWorkflowEventStream: ) ) - assert events == [StreamEvent.PING.value] + assert events == [StreamEvent.PING] assert buffer_state.stop_event.is_set() is True def test_build_workflow_event_stream_should_continue_when_pause_loading_fails( @@ -480,7 +480,7 @@ class TestBuildWorkflowEventStream: monkeypatch.setattr(service_module.MessageGenerator, "get_response_topic", MagicMock(return_value=topic)) monkeypatch.setattr(service_module, "_load_resumption_context", MagicMock(return_value=None)) monkeypatch.setattr(service_module, "_resolve_task_id", MagicMock(return_value="task-1")) - snapshot_builder = MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED.value}]) + snapshot_builder = MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED}]) monkeypatch.setattr(service_module, "_build_snapshot_events", snapshot_builder) buffer_state = BufferState( queue=queue.Queue(), @@ -501,5 +501,5 @@ class TestBuildWorkflowEventStream: ) ) - assert events[0] == StreamEvent.PING.value + assert events[0] == StreamEvent.PING assert snapshot_builder.call_args.kwargs["pause_entity"] is None diff --git a/api/tests/unit_tests/tasks/test_ops_trace_task.py b/api/tests/unit_tests/tasks/test_ops_trace_task.py new file mode 100644 index 0000000000..5844c55c04 --- /dev/null +++ b/api/tests/unit_tests/tasks/test_ops_trace_task.py @@ -0,0 +1,301 @@ +import json +import sys +from contextlib import contextmanager +from types import ModuleType +from unittest.mock import MagicMock, patch + +import pytest +from celery.exceptions import Retry + +from core.ops.entities.config_entity import OPS_TRACE_FAILED_KEY +from core.ops.exceptions import RetryableTraceDispatchError +from tasks.ops_trace_task import process_trace_tasks + + +@contextmanager +def fake_app_context(): + yield + + +class FakeCurrentApp: + def app_context(self): + return fake_app_context() + + +def _install_trace_manager( + trace_instance: MagicMock, + *, + enterprise_enabled: bool = False, + enterprise_trace_cls: MagicMock | None = None, +) -> dict[str, ModuleType]: + ops_trace_manager_module = ModuleType("core.ops.ops_trace_manager") + + class StubOpsTraceManager: + @staticmethod + def get_ops_trace_instance(app_id: str) -> MagicMock: + return trace_instance + + telemetry_module = ModuleType("extensions.ext_enterprise_telemetry") + telemetry_module.is_enabled = lambda: enterprise_enabled + + ops_trace_manager_module.OpsTraceManager = StubOpsTraceManager + modules = { + "core.ops.ops_trace_manager": ops_trace_manager_module, + "extensions.ext_enterprise_telemetry": telemetry_module, + } + if enterprise_trace_cls is not None: + enterprise_module = ModuleType("enterprise") + enterprise_telemetry_module = ModuleType("enterprise.telemetry") + enterprise_trace_module = ModuleType("enterprise.telemetry.enterprise_trace") + enterprise_trace_module.EnterpriseOtelTrace = enterprise_trace_cls + modules.update( + { + "enterprise": enterprise_module, + "enterprise.telemetry": enterprise_telemetry_module, + "enterprise.telemetry.enterprise_trace": enterprise_trace_module, + } + ) + return modules + + +def _make_payload() -> str: + return json.dumps({"trace_info": {}, "trace_info_type": None}) + + +def _decode_saved_payload(payload: bytes | str) -> dict[str, object]: + if isinstance(payload, bytes): + payload = payload.decode("utf-8") + return json.loads(payload) + + +def _retryable_dispatch_error() -> RetryableTraceDispatchError: + return RetryableTraceDispatchError("transient trace dispatch failure") + + +def _run_task(file_info: dict[str, str], retries: int = 0) -> None: + process_trace_tasks.push_request(retries=retries) + try: + process_trace_tasks.run(file_info) + finally: + process_trace_tasks.pop_request() + + +def test_process_trace_tasks_retries_retryable_dispatch_failure_and_preserves_payload(): + file_info = {"app_id": "app-id", "file_id": "file-id"} + trace_instance = MagicMock() + pending_error = _retryable_dispatch_error() + trace_instance.trace.side_effect = pending_error + retry_error = Retry() + + with ( + patch.dict(sys.modules, _install_trace_manager(trace_instance)), + patch("tasks.ops_trace_task.current_app", FakeCurrentApp()), + patch("tasks.ops_trace_task.storage.load", return_value=_make_payload()), + patch("tasks.ops_trace_task.storage.delete") as mock_delete, + patch("tasks.ops_trace_task.redis_client.incr") as mock_incr, + patch.object(process_trace_tasks, "retry", side_effect=retry_error) as mock_retry, + pytest.raises(Retry), + ): + _run_task(file_info) + + mock_retry.assert_called_once_with( + exc=pending_error, + countdown=process_trace_tasks.default_retry_delay, + ) + mock_delete.assert_not_called() + mock_incr.assert_not_called() + + +def test_process_trace_tasks_marks_enterprise_trace_dispatched_before_retryable_dispatch_retry(): + file_info = {"app_id": "app-id", "file_id": "file-id"} + trace_instance = MagicMock() + pending_error = _retryable_dispatch_error() + trace_instance.trace.side_effect = pending_error + retry_error = Retry() + enterprise_tracer = MagicMock() + enterprise_trace_cls = MagicMock(return_value=enterprise_tracer) + + with ( + patch.dict( + sys.modules, + _install_trace_manager( + trace_instance, + enterprise_enabled=True, + enterprise_trace_cls=enterprise_trace_cls, + ), + ), + patch("tasks.ops_trace_task.current_app", FakeCurrentApp()), + patch("tasks.ops_trace_task.storage.load", return_value=_make_payload()), + patch("tasks.ops_trace_task.storage.save") as mock_save, + patch("tasks.ops_trace_task.storage.delete") as mock_delete, + patch("tasks.ops_trace_task.redis_client.incr") as mock_incr, + patch.object(process_trace_tasks, "retry", side_effect=retry_error) as mock_retry, + pytest.raises(Retry), + ): + _run_task(file_info) + + enterprise_tracer.trace.assert_called_once_with({}) + saved_path, saved_payload = mock_save.call_args.args + assert saved_path == "ops_trace/app-id/file-id.json" + assert _decode_saved_payload(saved_payload)["_enterprise_trace_dispatched"] is True + mock_retry.assert_called_once_with( + exc=pending_error, + countdown=process_trace_tasks.default_retry_delay, + ) + mock_delete.assert_not_called() + mock_incr.assert_not_called() + + +def test_process_trace_tasks_does_not_mark_failed_enterprise_trace_as_dispatched_before_retry(): + file_info = {"app_id": "app-id", "file_id": "file-id"} + trace_instance = MagicMock() + pending_error = _retryable_dispatch_error() + trace_instance.trace.side_effect = pending_error + retry_error = Retry() + enterprise_tracer = MagicMock() + enterprise_tracer.trace.side_effect = RuntimeError("enterprise trace failed") + enterprise_trace_cls = MagicMock(return_value=enterprise_tracer) + + with ( + patch.dict( + sys.modules, + _install_trace_manager( + trace_instance, + enterprise_enabled=True, + enterprise_trace_cls=enterprise_trace_cls, + ), + ), + patch("tasks.ops_trace_task.current_app", FakeCurrentApp()), + patch("tasks.ops_trace_task.storage.load", return_value=_make_payload()), + patch("tasks.ops_trace_task.storage.save") as mock_save, + patch("tasks.ops_trace_task.storage.delete") as mock_delete, + patch("tasks.ops_trace_task.redis_client.incr") as mock_incr, + patch.object(process_trace_tasks, "retry", side_effect=retry_error) as mock_retry, + pytest.raises(Retry), + ): + _run_task(file_info) + + enterprise_tracer.trace.assert_called_once_with({}) + mock_save.assert_not_called() + mock_retry.assert_called_once_with( + exc=pending_error, + countdown=process_trace_tasks.default_retry_delay, + ) + mock_delete.assert_not_called() + mock_incr.assert_not_called() + + +def test_process_trace_tasks_skips_enterprise_trace_when_retry_payload_was_already_dispatched(): + file_info = {"app_id": "app-id", "file_id": "file-id"} + trace_instance = MagicMock() + enterprise_trace_cls = MagicMock() + payload = json.dumps({"trace_info": {}, "trace_info_type": None, "_enterprise_trace_dispatched": True}) + + with ( + patch.dict( + sys.modules, + _install_trace_manager( + trace_instance, + enterprise_enabled=True, + enterprise_trace_cls=enterprise_trace_cls, + ), + ), + patch("tasks.ops_trace_task.current_app", FakeCurrentApp()), + patch("tasks.ops_trace_task.storage.load", return_value=payload), + patch("tasks.ops_trace_task.storage.save") as mock_save, + patch("tasks.ops_trace_task.storage.delete") as mock_delete, + patch("tasks.ops_trace_task.redis_client.incr") as mock_incr, + ): + _run_task(file_info) + + enterprise_trace_cls.assert_not_called() + trace_instance.trace.assert_called_once_with({}) + mock_save.assert_not_called() + mock_delete.assert_called_once_with("ops_trace/app-id/file-id.json") + mock_incr.assert_not_called() + + +def test_process_trace_tasks_default_retry_window_covers_parent_span_context_ttl(): + assert process_trace_tasks.max_retries * process_trace_tasks.default_retry_delay >= 300 + + +def test_process_trace_tasks_deletes_payload_on_success(): + file_info = {"app_id": "app-id", "file_id": "file-id"} + trace_instance = MagicMock() + + with ( + patch.dict(sys.modules, _install_trace_manager(trace_instance)), + patch("tasks.ops_trace_task.current_app", FakeCurrentApp()), + patch("tasks.ops_trace_task.storage.load", return_value=_make_payload()), + patch("tasks.ops_trace_task.storage.delete") as mock_delete, + patch("tasks.ops_trace_task.redis_client.incr") as mock_incr, + ): + _run_task(file_info) + + trace_instance.trace.assert_called_once_with({}) + mock_delete.assert_called_once_with("ops_trace/app-id/file-id.json") + mock_incr.assert_not_called() + + +def test_process_trace_tasks_deletes_payload_and_counts_terminal_failure(): + file_info = {"app_id": "app-id", "file_id": "file-id"} + trace_instance = MagicMock() + trace_instance.trace.side_effect = RuntimeError("trace failed") + + with ( + patch.dict(sys.modules, _install_trace_manager(trace_instance)), + patch("tasks.ops_trace_task.current_app", FakeCurrentApp()), + patch("tasks.ops_trace_task.storage.load", return_value=_make_payload()), + patch("tasks.ops_trace_task.storage.delete") as mock_delete, + patch("tasks.ops_trace_task.redis_client.incr") as mock_incr, + ): + _run_task(file_info) + + mock_delete.assert_called_once_with("ops_trace/app-id/file-id.json") + mock_incr.assert_called_once_with(f"{OPS_TRACE_FAILED_KEY}_app-id") + + +def test_process_trace_tasks_treats_retry_enqueue_failure_as_terminal_failure(): + file_info = {"app_id": "app-id", "file_id": "file-id"} + trace_instance = MagicMock() + pending_error = _retryable_dispatch_error() + retry_enqueue_error = RuntimeError("retry enqueue failed") + trace_instance.trace.side_effect = pending_error + + with ( + patch.dict(sys.modules, _install_trace_manager(trace_instance)), + patch("tasks.ops_trace_task.current_app", FakeCurrentApp()), + patch("tasks.ops_trace_task.storage.load", return_value=_make_payload()), + patch("tasks.ops_trace_task.storage.delete") as mock_delete, + patch("tasks.ops_trace_task.redis_client.incr") as mock_incr, + patch.object(process_trace_tasks, "retry", side_effect=retry_enqueue_error) as mock_retry, + ): + _run_task(file_info) + + mock_retry.assert_called_once_with( + exc=pending_error, + countdown=process_trace_tasks.default_retry_delay, + ) + mock_delete.assert_called_once_with("ops_trace/app-id/file-id.json") + mock_incr.assert_called_once_with(f"{OPS_TRACE_FAILED_KEY}_app-id") + + +def test_process_trace_tasks_deletes_payload_and_counts_exhausted_retryable_dispatch_failure(): + file_info = {"app_id": "app-id", "file_id": "file-id"} + trace_instance = MagicMock() + pending_error = _retryable_dispatch_error() + trace_instance.trace.side_effect = pending_error + + with ( + patch.dict(sys.modules, _install_trace_manager(trace_instance)), + patch("tasks.ops_trace_task.current_app", FakeCurrentApp()), + patch("tasks.ops_trace_task.storage.load", return_value=_make_payload()), + patch("tasks.ops_trace_task.storage.delete") as mock_delete, + patch("tasks.ops_trace_task.redis_client.incr") as mock_incr, + patch.object(process_trace_tasks, "retry") as mock_retry, + ): + _run_task(file_info, retries=process_trace_tasks.max_retries) + + mock_retry.assert_not_called() + mock_delete.assert_called_once_with("ops_trace/app-id/file-id.json") + mock_incr.assert_called_once_with(f"{OPS_TRACE_FAILED_KEY}_app-id") diff --git a/api/tests/unit_tests/tasks/test_workflow_execute_task.py b/api/tests/unit_tests/tasks/test_workflow_execute_task.py index 72508bef52..2544c9d61a 100644 --- a/api/tests/unit_tests/tasks/test_workflow_execute_task.py +++ b/api/tests/unit_tests/tasks/test_workflow_execute_task.py @@ -122,7 +122,7 @@ def test_resume_app_execution_queries_message_by_conversation_and_workflow_run(m workflow_run = SimpleNamespace( workflow_id="wf-id", app_id="app-id", - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by="account-id", tenant_id="tenant-id", ) @@ -208,7 +208,7 @@ def test_resume_app_execution_returns_early_when_advanced_chat_missing_conversat workflow_run = SimpleNamespace( workflow_id="wf-id", app_id="app-id", - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by="account-id", tenant_id="tenant-id", ) diff --git a/api/uv.lock b/api/uv.lock index 1b52f8b53f..6861abdbdc 100644 --- a/api/uv.lock +++ b/api/uv.lock @@ -50,7 +50,10 @@ members = [ "dify-vdb-vikingdb", "dify-vdb-weaviate", ] -overrides = [{ name = "pyarrow", specifier = ">=18.0.0" }] +overrides = [ + { name = "litellm", specifier = ">=1.83.7" }, + { name = "pyarrow", specifier = ">=18.0.0" }, +] [[package]] name = "abnf" @@ -481,7 +484,7 @@ wheels = [ [[package]] name = "bce-python-sdk" -version = "0.9.70" +version = "0.9.71" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "crc32c" }, @@ -489,9 +492,9 @@ dependencies = [ { name = "pycryptodome" }, { name = "six" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f7/a9/7c21a9073eb9ad7e8cacf6f8a0e47c0d01ad7bf8fd8e0dc42164b117d60b/bce_python_sdk-0.9.70.tar.gz", hash = "sha256:3b37fd7448278dd33f745a6a23198a2cc2490fded9cb8d59b72500784853df4e", size = 299967, upload-time = "2026-04-14T12:02:42.034Z" } +sdist = { url = "https://files.pythonhosted.org/packages/5a/74/72058f098b9e7184376f2b3d4c1d233ca7fdc52d0f527078f3ce4d9828b9/bce_python_sdk-0.9.71.tar.gz", hash = "sha256:7a917edaee39082694776e25a9e6556ec8072400a3be649f28eb13f9c7a0b5b5", size = 301508, upload-time = "2026-04-28T06:23:21.061Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/3c/2d/70fc866ff98d1f6bd75b0a4235694129b3c519b014254d7bcfc02ffe1bee/bce_python_sdk-0.9.70-py3-none-any.whl", hash = "sha256:fd1f31113e4a8dca314f040662b7caf07ec11cf896c5da232627a9a2c9d2e3a1", size = 415660, upload-time = "2026-04-14T12:02:40.034Z" }, + { url = "https://files.pythonhosted.org/packages/2d/2d/821ae8878dc36b77e56bb7e5dbf9a8e73209c11d38c0ba6b38b5778668ae/bce_python_sdk-0.9.71-py3-none-any.whl", hash = "sha256:9f64a99267616456bac487983d92cc778720bf4f102c8931e8e38aea3cb63268", size = 417000, upload-time = "2026-04-28T06:23:19.078Z" }, ] [[package]] @@ -604,29 +607,29 @@ wheels = [ [[package]] name = "boto3" -version = "1.42.96" +version = "1.43.6" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "botocore" }, { name = "jmespath" }, { name = "s3transfer" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a6/2d/69fb3acd50bab83fb295c167d33c4b653faeb5fb0f42bfca4d9b69d6fb68/boto3-1.42.96.tar.gz", hash = "sha256:b38a9e4a3fbbee9017252576f1379780d0a5814768676c08df2f539d31fcdd68", size = 113203, upload-time = "2026-04-24T19:47:18.677Z" } +sdist = { url = "https://files.pythonhosted.org/packages/0a/37/78c630d1308964aa9abf44951d9c4df776546ff37251ec2434944e205c4e/boto3-1.43.6.tar.gz", hash = "sha256:e6315effaf12b890b99956e6f8e2c3000a3f64e4ee91943cec3895ce9a836afb", size = 113153, upload-time = "2026-05-07T20:49:59.694Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/2b/9d/b3f617d011c42eb804d993103b8fa9acdce153e181a3042f58bfe33d7cb4/boto3-1.42.96-py3-none-any.whl", hash = "sha256:2f4566da2c209a98bdbfc874d813ef231c84ad24e4f815e9bc91de5f63351a24", size = 140557, upload-time = "2026-04-24T19:47:15.824Z" }, + { url = "https://files.pythonhosted.org/packages/c8/e2/3c2eef44f55eafab256836d1d9479bd6a74f70c26cbfdc0639a0e23e4327/boto3-1.43.6-py3-none-any.whl", hash = "sha256:179601ec2992726a718053bf41e43c223ceba397d31ceab11f64d9c910d9fc3a", size = 140502, upload-time = "2026-05-07T20:49:57.8Z" }, ] [[package]] name = "boto3-stubs" -version = "1.42.96" +version = "1.43.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "botocore-stubs" }, { name = "types-s3transfer" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/77/86/65f45f84621cccc2471871088bab8fe515b4346ba9e48d9001484ec440d6/boto3_stubs-1.42.96.tar.gz", hash = "sha256:1e7819c34d1eae8e5e3cfaf9d144fdcad65aad184b380488871de1d0b2851879", size = 102691, upload-time = "2026-04-24T20:25:13.984Z" } +sdist = { url = "https://files.pythonhosted.org/packages/8a/7f/399bcdeaa60a89aafe5292c8364c313177d22b886dffc1bd7b56fe817900/boto3_stubs-1.43.2.tar.gz", hash = "sha256:0d46636f3e761a92070114b39a76b154c5da6c5794c890e1440a7f191bf1ff2e", size = 102658, upload-time = "2026-05-01T20:31:36.963Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a7/51/bdac1ff9fd4321091183776c5adffce5fc7b4d0fec7e38af9064e24a2497/boto3_stubs-1.42.96-py3-none-any.whl", hash = "sha256:2c112e257f40006147a53f6f62075804689154271973b2807f5656feaa804216", size = 70668, upload-time = "2026-04-24T20:25:09.736Z" }, + { url = "https://files.pythonhosted.org/packages/da/df/17647562444b2047ca325eaaf2fea738571822b7b4efdaa6bacf0fd4fff9/boto3_stubs-1.43.2-py3-none-any.whl", hash = "sha256:941f2907236223a1209704eaf708d3cdf1ecc8695618c558f9fb9e23e90c513b", size = 70653, upload-time = "2026-05-01T20:31:30.057Z" }, ] [package.optional-dependencies] @@ -636,16 +639,16 @@ bedrock-runtime = [ [[package]] name = "botocore" -version = "1.42.96" +version = "1.43.6" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "jmespath" }, { name = "python-dateutil" }, { name = "urllib3" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/61/77/2c333622a1d47cf5bf73cdcab0cb6c92addafbef2ec05f81b9f75687d9e5/botocore-1.42.96.tar.gz", hash = "sha256:75b3b841ffacaa944f645196655a21ca777591dd8911e732bfb6614545af0250", size = 15263344, upload-time = "2026-04-24T19:47:05.283Z" } +sdist = { url = "https://files.pythonhosted.org/packages/79/a7/23d0f5028011455096a1eeac0ddf3cbe147b3e855e127342f8202552194d/botocore-1.43.6.tar.gz", hash = "sha256:b1e395b347356860398da42e61c808cf1e34b6fa7180cf2b9d87d986e1a06ba0", size = 15336070, upload-time = "2026-05-07T20:49:48.14Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/45/56/152c3a859ca1b9d77ed16deac3cf81682013677c68cf5715698781fc81bd/botocore-1.42.96-py3-none-any.whl", hash = "sha256:db2c3e2006628be6fde81a24124a6563c363d6982fb92728837cf174bad9d98a", size = 14945920, upload-time = "2026-04-24T19:47:00.323Z" }, + { url = "https://files.pythonhosted.org/packages/e5/c8/6f47223840e8d8cfa8c9f7c0ec1b77970417f257fc885169ff4f6326ce09/botocore-1.43.6-py3-none-any.whl", hash = "sha256:b6d1fdbc6f65a5fe0b7e947823aa37535d3f39f3ba4d21110fab1f55bbbcc04b", size = 15017094, upload-time = "2026-05-07T20:49:44.964Z" }, ] [[package]] @@ -889,14 +892,14 @@ wheels = [ [[package]] name = "click" -version = "8.3.1" +version = "8.1.8" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/3d/fa/656b739db8587d7b5dfa22e22ed02566950fbfbcdc20311993483657a5c0/click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a", size = 295065, upload-time = "2025-11-15T20:45:42.706Z" } +sdist = { url = "https://files.pythonhosted.org/packages/b9/2e/0090cbf739cee7d23781ad4b89a9894a41538e4fcf4c31dcdd705b78eb8b/click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a", size = 226593, upload-time = "2024-12-21T18:38:44.339Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6", size = 108274, upload-time = "2025-11-15T20:45:41.139Z" }, + { url = "https://files.pythonhosted.org/packages/7e/d4/7ebdbd03970677812aac39c869717059dbb71a4cfc033ca6e5221787892c/click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2", size = 98188, upload-time = "2024-12-21T18:38:41.666Z" }, ] [[package]] @@ -1289,7 +1292,7 @@ wheels = [ [[package]] name = "dify-api" -version = "1.13.3" +version = "1.14.0" source = { virtual = "." } dependencies = [ { name = "aliyun-log-python-sdk" }, @@ -1578,7 +1581,7 @@ requires-dist = [ { name = "aliyun-log-python-sdk", specifier = ">=0.9.44,<1.0.0" }, { name = "azure-identity", specifier = ">=1.25.3,<2.0.0" }, { name = "bleach", specifier = ">=6.3.0" }, - { name = "boto3", specifier = ">=1.42.96" }, + { name = "boto3", specifier = ">=1.43.6" }, { name = "celery", specifier = ">=5.6.3" }, { name = "croniter", specifier = ">=6.2.2" }, { name = "fastopenapi", extras = ["flask"], specifier = "~=0.7.0" }, @@ -1592,10 +1595,10 @@ requires-dist = [ { name = "gevent", specifier = ">=26.4.0" }, { name = "gevent-websocket", specifier = ">=0.10.1" }, { name = "gmpy2", specifier = ">=2.3.0" }, - { name = "google-api-python-client", specifier = ">=2.194.0" }, - { name = "google-cloud-aiplatform", specifier = ">=1.148.1,<2.0.0" }, - { name = "graphon", specifier = "~=0.2.2" }, - { name = "gunicorn", specifier = ">=25.3.0" }, + { name = "google-api-python-client", specifier = ">=2.196.0" }, + { name = "google-cloud-aiplatform", specifier = ">=1.151.0,<2.0.0" }, + { name = "graphon", specifier = "~=0.3.1" }, + { name = "gunicorn", specifier = ">=26.0.0" }, { name = "httpx", extras = ["socks"], specifier = ">=0.28.1,<1.0.0" }, { name = "httpx-sse", specifier = "~=0.4.0" }, { name = "json-repair", specifier = "~=0.59.4" }, @@ -1619,17 +1622,17 @@ requires-dist = [ [package.metadata.requires-dev] dev = [ { name = "basedpyright", specifier = ">=1.39.3" }, - { name = "boto3-stubs", specifier = ">=1.42.96" }, + { name = "boto3-stubs", specifier = ">=1.43.2" }, { name = "celery-types", specifier = ">=0.23.0" }, { name = "coverage", specifier = ">=7.13.4" }, { name = "dotenv-linter", specifier = ">=0.7.0" }, { name = "faker", specifier = ">=40.15.0" }, - { name = "hypothesis", specifier = ">=6.152.3" }, + { name = "hypothesis", specifier = ">=6.152.4" }, { name = "import-linter", specifier = ">=2.3" }, { name = "lxml-stubs", specifier = ">=0.5.1" }, { name = "mypy", specifier = ">=1.20.2" }, { name = "pandas-stubs", specifier = ">=3.0.0" }, - { name = "pyrefly", specifier = ">=0.62.0" }, + { name = "pyrefly", specifier = ">=0.64.0" }, { name = "pytest", specifier = ">=9.0.3" }, { name = "pytest-benchmark", specifier = ">=5.2.3" }, { name = "pytest-cov", specifier = ">=7.1.0" }, @@ -1642,8 +1645,8 @@ dev = [ { name = "testcontainers", specifier = ">=4.14.2" }, { name = "types-aiofiles", specifier = ">=25.1.0" }, { name = "types-beautifulsoup4", specifier = ">=4.12.0" }, - { name = "types-cachetools", specifier = ">=6.2.0" }, - { name = "types-cffi", specifier = ">=2.0.0.20260408" }, + { name = "types-cachetools", specifier = ">=7.0.0.20260503" }, + { name = "types-cffi", specifier = ">=2.0.0.20260429" }, { name = "types-colorama", specifier = ">=0.4.15" }, { name = "types-defusedxml", specifier = ">=0.7.0" }, { name = "types-deprecated", specifier = ">=1.3.1" }, @@ -1651,7 +1654,7 @@ dev = [ { name = "types-flask-cors", specifier = ">=6.0.0" }, { name = "types-flask-migrate", specifier = ">=4.1.0" }, { name = "types-gevent", specifier = ">=26.4.0" }, - { name = "types-greenlet", specifier = ">=3.4.0" }, + { name = "types-greenlet", specifier = ">=3.5.0.20260428" }, { name = "types-html5lib", specifier = ">=1.1.11" }, { name = "types-jmespath", specifier = ">=1.1.0.20260408" }, { name = "types-markdown", specifier = ">=3.10.2" }, @@ -1660,7 +1663,7 @@ dev = [ { name = "types-olefile", specifier = ">=0.47.0" }, { name = "types-openpyxl", specifier = ">=3.1.5" }, { name = "types-pexpect", specifier = ">=4.9.0" }, - { name = "types-protobuf", specifier = ">=7.34.1" }, + { name = "types-protobuf", specifier = ">=7.34.1.20260503" }, { name = "types-psutil", specifier = ">=7.2.2" }, { name = "types-psycopg2", specifier = ">=2.9.21.20260422" }, { name = "types-pygments", specifier = ">=2.20.0" }, @@ -1683,13 +1686,13 @@ dev = [ ] storage = [ { name = "azure-storage-blob", specifier = ">=12.28.0" }, - { name = "bce-python-sdk", specifier = ">=0.9.70" }, + { name = "bce-python-sdk", specifier = ">=0.9.71" }, { name = "cos-python-sdk-v5", specifier = ">=1.9.42" }, { name = "esdk-obs-python", specifier = ">=3.22.2" }, { name = "google-cloud-storage", specifier = ">=3.10.1" }, { name = "opendal", specifier = ">=0.46.0" }, { name = "oss2", specifier = ">=2.19.1" }, - { name = "supabase", specifier = ">=2.29.0" }, + { name = "supabase", specifier = ">=2.30.0" }, { name = "tos", specifier = ">=2.9.0" }, ] tools = [ @@ -2657,14 +2660,14 @@ wheels = [ [[package]] name = "gitpython" -version = "3.1.47" +version = "3.1.50" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "gitdb" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/c1/bd/50db468e9b1310529a19fce651b3b0e753b5c07954d486cba31bbee9a5d5/gitpython-3.1.47.tar.gz", hash = "sha256:dba27f922bd2b42cb54c87a8ab3cb6beb6bf07f3d564e21ac848913a05a8a3cd", size = 216978, upload-time = "2026-04-22T02:44:44.059Z" } +sdist = { url = "https://files.pythonhosted.org/packages/33/f6/354ae6491228b5eb40e10d89c4d13c651fe1cf7556e35ebdded50cff57ce/gitpython-3.1.50.tar.gz", hash = "sha256:80da2d12504d52e1f998772dc5baf6e553f8d2fcfe1fcc226c9d9a2ee3372dcc", size = 219798, upload-time = "2026-05-06T04:01:26.571Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f2/c5/a1bc0996af85757903cf2bf444a7824e68e0035ce63fb41d6f76f9def68b/gitpython-3.1.47-py3-none-any.whl", hash = "sha256:489f590edfd6d20571b2c0e72c6a6ac6915ee8b8cd04572330e3842207a78905", size = 209547, upload-time = "2026-04-22T02:44:41.271Z" }, + { url = "https://files.pythonhosted.org/packages/20/7a/1c6e3562dfd8950adbb11ffbc65d21e7c89d01a6e4f137fa981056de25c5/gitpython-3.1.50-py3-none-any.whl", hash = "sha256:d352abe2908d07355014abdd21ddf798c2a961469239afec4962e9da884858f9", size = 212507, upload-time = "2026-05-06T04:01:23.799Z" }, ] [[package]] @@ -2719,7 +2722,7 @@ grpc = [ [[package]] name = "google-api-python-client" -version = "2.194.0" +version = "2.196.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "google-api-core" }, @@ -2728,9 +2731,9 @@ dependencies = [ { name = "httplib2" }, { name = "uritemplate" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/60/ab/e83af0eb043e4ccc49571ca7a6a49984e9d00f4e9e6e6f1238d60bc84dce/google_api_python_client-2.194.0.tar.gz", hash = "sha256:db92647bd1a90f40b79c9618461553c2b20b6a43ce7395fa6de07132dc14f023", size = 14443469, upload-time = "2026-04-08T23:07:35.757Z" } +sdist = { url = "https://files.pythonhosted.org/packages/6d/f3/34ef8aca7909675fe327f96c1ed927f0520e7acf68af19157e96acc05e76/google_api_python_client-2.196.0.tar.gz", hash = "sha256:9f335d38f6caaa2747bcf64335ed1a9a19047d53e86538eda6a1b17d37f1743d", size = 14628129, upload-time = "2026-05-06T23:47:35.655Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b0/34/5a624e49f179aa5b0cb87b2ce8093960299030ff40423bfbde09360eb908/google_api_python_client-2.194.0-py3-none-any.whl", hash = "sha256:61eaaac3b8fc8fdf11c08af87abc3d1342d1b37319cc1b57405f86ef7697e717", size = 15016514, upload-time = "2026-04-08T23:07:33.093Z" }, + { url = "https://files.pythonhosted.org/packages/99/c7/1817b4edf966d5afcac1c0781ca36d621bc0cb58104c4e7c2a475ab185f7/google_api_python_client-2.196.0-py3-none-any.whl", hash = "sha256:2591e9b47dcb17e4e62a09370aaee3bcf323af8f28ccecdabcd0a42a23ca4db5", size = 15206663, upload-time = "2026-05-06T23:47:32.886Z" }, ] [[package]] @@ -2766,7 +2769,7 @@ wheels = [ [[package]] name = "google-cloud-aiplatform" -version = "1.148.1" +version = "1.151.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "docstring-parser" }, @@ -2782,9 +2785,9 @@ dependencies = [ { name = "pydantic" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/c9/f3/b2a9417014c93858a2e3266134f931eefd972c2d410b25d7b8782fc6f143/google_cloud_aiplatform-1.148.1.tar.gz", hash = "sha256:75d605fba34e68714bd08e1e482755d0a6e3ae972805f809d088e686c30879e7", size = 10278758, upload-time = "2026-04-17T23:45:26.738Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ec/f6/e2fbe175a011f5080da8c1f7d9169a6875a00ea2c7bee4193d952b097400/google_cloud_aiplatform-1.151.0.tar.gz", hash = "sha256:2f29b1853f790a7371a746c747bf1f664380b534254682441acd4b5ee26fafd2", size = 10617421, upload-time = "2026-05-07T21:56:52.91Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/56/5b/e3515d7bbba602c2b0f6a0da5431785e897252443682e4735d0e6873dc8f/google_cloud_aiplatform-1.148.1-py2.py3-none-any.whl", hash = "sha256:035101e2d8e65c6a706cc3930b2452de7ddcbde50dd130320fcea0d8b03b0c5a", size = 8434481, upload-time = "2026-04-17T23:45:22.919Z" }, + { url = "https://files.pythonhosted.org/packages/f6/4a/cd35f8ba622d563b1335222284d2838aa789b953b40516b1b997e50fe5b6/google_cloud_aiplatform-1.151.0-py2.py3-none-any.whl", hash = "sha256:61372bb0923b14b8027f45b83393452df3a85bf4ea86fa48e08844fb5ec50049", size = 8732627, upload-time = "2026-05-07T21:56:49.014Z" }, ] [[package]] @@ -2937,7 +2940,7 @@ httpx = [ [[package]] name = "graphon" -version = "0.2.2" +version = "0.3.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "charset-normalizer" }, @@ -2958,9 +2961,9 @@ dependencies = [ { name = "unstructured", extra = ["docx", "epub", "md", "ppt", "pptx"] }, { name = "webvtt-py" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/08/50/e745a79c5f742f88f6011a1f7c9ba2c2f9cc1beedd982f0b192f1ab8c748/graphon-0.2.2.tar.gz", hash = "sha256:141f0de536171850f1af6f738dc66f0285aadd3c097f1dad2a038636789e0aa5", size = 236360, upload-time = "2026-04-17T08:52:28.047Z" } +sdist = { url = "https://files.pythonhosted.org/packages/5a/ef/43217842e84160acca64a95858f1689389a50e04a53fc94f2aa836b4eaf7/graphon-0.3.1.tar.gz", hash = "sha256:49971baed1eb16c8e1983f755e659902e4f117a68dc62fad19e91472950b937d", size = 242210, upload-time = "2026-05-07T06:58:21.879Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/de/89/a6340afdaf5169d17a318e00fc685fb67ed99baa602c2cbbbf6af6a76096/graphon-0.2.2-py3-none-any.whl", hash = "sha256:754e544d08779138f99eac6547ab08559463680e2c76488b05e1c978210392b4", size = 340808, upload-time = "2026-04-17T08:52:26.5Z" }, + { url = "https://files.pythonhosted.org/packages/62/37/bef16ed3d6da7446b36769fa388f4dc79f95337ffa16d6dfc3177152507e/graphon-0.3.1-py3-none-any.whl", hash = "sha256:e6422c7e3f1ce7d2185979c17e08201816ca25d46d400ebdd035c95d501c04fe", size = 349368, upload-time = "2026-05-07T06:58:20.217Z" }, ] [[package]] @@ -3099,14 +3102,14 @@ wheels = [ [[package]] name = "gunicorn" -version = "25.3.0" +version = "26.0.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "packaging" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/c4/f4/e78fa054248fab913e2eab0332c6c2cb07421fca1ce56d8fe43b6aef57a4/gunicorn-25.3.0.tar.gz", hash = "sha256:f74e1b2f9f76f6cd1ca01198968bd2dd65830edc24b6e8e4d78de8320e2fe889", size = 634883, upload-time = "2026-03-27T00:00:26.092Z" } +sdist = { url = "https://files.pythonhosted.org/packages/6d/b7/a4a3f632f823e432ce6bc65f62961b7980c898c77f075a2f7118cb3846fe/gunicorn-26.0.0.tar.gz", hash = "sha256:ca9346f85e3a4aeeb64d491045c16b9a35647abd37ea15efe53080eb8b090baf", size = 727286, upload-time = "2026-05-05T06:38:25.529Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/43/c8/8aaf447698c4d59aa853fd318eed300b5c9e44459f242ab8ead6c9c09792/gunicorn-25.3.0-py3-none-any.whl", hash = "sha256:cacea387dab08cd6776501621c295a904fe8e3b7aae9a1a3cbb26f4e7ed54660", size = 208403, upload-time = "2026-03-27T00:00:27.386Z" }, + { url = "https://files.pythonhosted.org/packages/e6/40/9c2384fc2be4ad25dd4a49decd5ad9ea5a3639814c11bd40ab77cb9f0a14/gunicorn-26.0.0-py3-none-any.whl", hash = "sha256:40233d26a5f0d1872916188c276e21641155111c2853f0c2cd55260aec0d24fc", size = 212009, upload-time = "2026-05-05T06:38:23.007Z" }, ] [[package]] @@ -3319,14 +3322,14 @@ wheels = [ [[package]] name = "hypothesis" -version = "6.152.3" +version = "6.152.4" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "sortedcontainers" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/70/90/fc0b263b6f2622e5f8d2aa93f2e95ba79718a5faa7d2a74bfab10d6b0905/hypothesis-6.152.3.tar.gz", hash = "sha256:c4e5300d3755b6c8a270a28fe5abff40153e927328e89d2bb0229c1384618998", size = 466478, upload-time = "2026-04-26T17:31:07.657Z" } +sdist = { url = "https://files.pythonhosted.org/packages/fa/c7/3147bd903d6b18324a016d43a259cf5b4bb4545e1ead6773dc8a0374e70a/hypothesis-6.152.4.tar.gz", hash = "sha256:31c8f9ce619716f543e2710b489b1633c833586641d9e6c94cee03f109a5afc4", size = 466444, upload-time = "2026-04-27T20:18:37.594Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/90/38/15475b91a4c12721d2be3349e9d6cf8649c76ed9bc1287e2de7c8d06c261/hypothesis-6.152.3-py3-none-any.whl", hash = "sha256:4b47f00916c858ed49cf870a2f08b04e5fff5afae0bb78f3b4a6d9c74fd6c7bc", size = 532154, upload-time = "2026-04-26T17:31:04.42Z" }, + { url = "https://files.pythonhosted.org/packages/19/89/0f50dd0d92e8a7dffc24f69ab910ff81db89b2f082ba42682bd57695e4d2/hypothesis-6.152.4-py3-none-any.whl", hash = "sha256:e730fd93c7578182efadc7f90b3c5437ee4d55edf738930eb5043c81ac1d97e8", size = 532145, upload-time = "2026-04-27T20:18:35.043Z" }, ] [[package]] @@ -3355,14 +3358,14 @@ wheels = [ [[package]] name = "importlib-metadata" -version = "8.4.0" +version = "8.5.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "zipp" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/c0/bd/fa8ce65b0a7d4b6d143ec23b0f5fd3f7ab80121078c465bc02baeaab22dc/importlib_metadata-8.4.0.tar.gz", hash = "sha256:9a547d3bc3608b025f93d403fdd1aae741c24fbb8314df4b155675742ce303c5", size = 54320, upload-time = "2024-08-20T17:11:42.348Z" } +sdist = { url = "https://files.pythonhosted.org/packages/cd/12/33e59336dca5be0c398a7482335911a33aa0e20776128f038019f1a95f1b/importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7", size = 55304, upload-time = "2024-09-11T14:56:08.937Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c0/14/362d31bf1076b21e1bcdcb0dc61944822ff263937b804a79231df2774d28/importlib_metadata-8.4.0-py3-none-any.whl", hash = "sha256:66f342cc6ac9818fc6ff340576acd24d65ba0b3efabb2b4ac08b598965a4a2f1", size = 26269, upload-time = "2024-08-20T17:11:41.102Z" }, + { url = "https://files.pythonhosted.org/packages/a0/d9/a1e041c5e7caa9a05c925f4bdbdfb7f006d1f74996af53467bc394c97be7/importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b", size = 26514, upload-time = "2024-09-11T14:56:07.019Z" }, ] [[package]] @@ -3503,7 +3506,7 @@ wheels = [ [[package]] name = "jsonschema" -version = "4.25.1" +version = "4.23.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "attrs" }, @@ -3511,9 +3514,9 @@ dependencies = [ { name = "referencing" }, { name = "rpds-py" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/74/69/f7185de793a29082a9f3c7728268ffb31cb5095131a9c139a74078e27336/jsonschema-4.25.1.tar.gz", hash = "sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85", size = 357342, upload-time = "2025-08-18T17:03:50.038Z" } +sdist = { url = "https://files.pythonhosted.org/packages/38/2e/03362ee4034a4c917f697890ccd4aec0800ccf9ded7f511971c75451deec/jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4", size = 325778, upload-time = "2024-07-08T18:40:05.546Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/bf/9c/8c95d856233c1f82500c2450b8c68576b4cf1c871db3afac5c34ff84e6fd/jsonschema-4.25.1-py3-none-any.whl", hash = "sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63", size = 90040, upload-time = "2025-08-18T17:03:48.373Z" }, + { url = "https://files.pythonhosted.org/packages/69/4a/4f9dbeb84e8850557c02365a0eee0649abe5eb1d84af92a25731c6c0f922/jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566", size = 88462, upload-time = "2024-07-08T18:40:00.165Z" }, ] [[package]] @@ -3654,7 +3657,7 @@ wheels = [ [[package]] name = "litellm" -version = "1.83.0" +version = "1.83.14" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp" }, @@ -3670,9 +3673,9 @@ dependencies = [ { name = "tiktoken" }, { name = "tokenizers" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/22/92/6ce9737554994ca8e536e5f4f6a87cc7c4774b656c9eb9add071caf7d54b/litellm-1.83.0.tar.gz", hash = "sha256:860bebc76c4bb27b4cf90b4a77acd66dba25aced37e3db98750de8a1766bfb7a", size = 17333062, upload-time = "2026-03-31T05:08:25.331Z" } +sdist = { url = "https://files.pythonhosted.org/packages/8d/7c/c095649380adc96c8630273c1768c2ad1e74aa2ee1dd8dd05d218a60569f/litellm-1.83.14.tar.gz", hash = "sha256:24aef9b47cdc424c833e32f3727f411741c690832cd1fe4405e0077144fe09c9", size = 14836599, upload-time = "2026-04-26T03:16:10.176Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/19/2c/a670cc050fcd6f45c6199eb99e259c73aea92edba8d5c2fc1b3686d36217/litellm-1.83.0-py3-none-any.whl", hash = "sha256:88c536d339248f3987571493015784671ba3f193a328e1ea6780dbebaa2094a8", size = 15610306, upload-time = "2026-03-31T05:08:21.987Z" }, + { url = "https://files.pythonhosted.org/packages/7f/5c/1b5691575420135e90578543b2bf219497caa33cfd0af64cb38f30288450/litellm-1.83.14-py3-none-any.whl", hash = "sha256:92b11ba2a32cf80707ddf388d18526696c7999a21b418c5e3b6eda1243d2cfdb", size = 16457054, upload-time = "2026-04-26T03:16:05.72Z" }, ] [[package]] @@ -3740,14 +3743,14 @@ wheels = [ [[package]] name = "mako" -version = "1.3.11" +version = "1.3.12" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "markupsafe" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/59/8a/805404d0c0b9f3d7a326475ca008db57aea9c5c9f2e1e39ed0faa335571c/mako-1.3.11.tar.gz", hash = "sha256:071eb4ab4c5010443152255d77db7faa6ce5916f35226eb02dc34479b6858069", size = 399811, upload-time = "2026-04-14T20:19:51.493Z" } +sdist = { url = "https://files.pythonhosted.org/packages/00/62/791b31e69ae182791ec67f04850f2f062716bbd205483d63a215f3e062d3/mako-1.3.12.tar.gz", hash = "sha256:9f778e93289bd410bb35daadeb4fc66d95a746f0b75777b942088b7fd7af550a", size = 400219, upload-time = "2026-04-28T19:01:08.512Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/68/a5/19d7aaa7e433713ffe881df33705925a196afb9532efc8475d26593921a6/mako-1.3.11-py3-none-any.whl", hash = "sha256:e372c6e333cf004aa736a15f425087ec977e1fcbd2966aae7f17c8dc1da27a77", size = 78503, upload-time = "2026-04-14T20:19:53.233Z" }, + { url = "https://files.pythonhosted.org/packages/bc/b1/a0ec7a5a9db730a08daef1fdfb8090435b82465abbf758a596f0ea88727e/mako-1.3.12-py3-none-any.whl", hash = "sha256:8f61569480282dbf557145ce441e4ba888be453c30989f879f0d652e39f53ea9", size = 78521, upload-time = "2026-04-28T19:01:10.393Z" }, ] [[package]] @@ -3969,11 +3972,11 @@ wheels = [ [[package]] name = "mypy-boto3-bedrock-runtime" -version = "1.42.42" +version = "1.43.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/46/bb/65dc1b2c5796a6ab5f60bdb57343bd6c3ecb82251c580eca415c8548333e/mypy_boto3_bedrock_runtime-1.42.42.tar.gz", hash = "sha256:3a4088218478b6fbbc26055c03c95bee4fc04624a801090b3cce3037e8275c8d", size = 29840, upload-time = "2026-02-04T20:53:05.999Z" } +sdist = { url = "https://files.pythonhosted.org/packages/21/f2/61519c0162307b1e4d47f63ed8b25390874640934f3d2d25c5d6c5078dd8/mypy_boto3_bedrock_runtime-1.43.0.tar.gz", hash = "sha256:19fc3167de6e66dd7a0ab293adc55c93e2fd67be35e8ab4fc3a7523a380752ce", size = 29903, upload-time = "2026-04-29T22:57:57.561Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/00/43/7ea062f2228f47b5779dcfa14dab48d6e29f979b35d1a5102b0ba80b9c1b/mypy_boto3_bedrock_runtime-1.42.42-py3-none-any.whl", hash = "sha256:b2d16eae22607d0685f90796b3a0afc78c0b09d45872e00eafd634a31dd9358f", size = 36077, upload-time = "2026-02-04T20:53:01.768Z" }, + { url = "https://files.pythonhosted.org/packages/40/4d/7e4c4d55af23b2b1304d6814db8c406beab7977056963200230417c1a2db/mypy_boto3_bedrock_runtime-1.43.0-py3-none-any.whl", hash = "sha256:a125296f992093d58bdcd95176002680fa81ca8a8b8bdf02afad7e5f2d8966aa", size = 36172, upload-time = "2026-04-29T22:57:54.777Z" }, ] [[package]] @@ -4135,7 +4138,7 @@ wheels = [ [[package]] name = "openai" -version = "2.8.1" +version = "2.24.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -4147,9 +4150,9 @@ dependencies = [ { name = "tqdm" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d5/e4/42591e356f1d53c568418dc7e30dcda7be31dd5a4d570bca22acb0525862/openai-2.8.1.tar.gz", hash = "sha256:cb1b79eef6e809f6da326a7ef6038719e35aa944c42d081807bfa1be8060f15f", size = 602490, upload-time = "2025-11-17T22:39:59.549Z" } +sdist = { url = "https://files.pythonhosted.org/packages/55/13/17e87641b89b74552ed408a92b231283786523edddc95f3545809fab673c/openai-2.24.0.tar.gz", hash = "sha256:1e5769f540dbd01cb33bc4716a23e67b9d695161a734aff9c5f925e2bf99a673", size = 658717, upload-time = "2026-02-24T20:02:07.958Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/55/4f/dbc0c124c40cb390508a82770fb9f6e3ed162560181a85089191a851c59a/openai-2.8.1-py3-none-any.whl", hash = "sha256:c6c3b5a04994734386e8dad3c00a393f56d3b68a27cd2e8acae91a59e4122463", size = 1022688, upload-time = "2025-11-17T22:39:57.675Z" }, + { url = "https://files.pythonhosted.org/packages/c9/30/844dc675ee6902579b8eef01ed23917cc9319a1c9c0c14ec6e39340c96d0/openai-2.24.0-py3-none-any.whl", hash = "sha256:fed30480d7d6c884303287bde864980a4b137b60553ffbcf9ab4a233b7a73d94", size = 1120122, upload-time = "2026-02-24T20:02:05.669Z" }, ] [[package]] @@ -4262,32 +4265,32 @@ wheels = [ [[package]] name = "opentelemetry-exporter-otlp" -version = "1.41.0" +version = "1.41.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "opentelemetry-exporter-otlp-proto-grpc" }, { name = "opentelemetry-exporter-otlp-proto-http" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/65/b7/845565a2ab5d22c1486bc7729a06b05cd0964c61539d766e1f107c9eea0c/opentelemetry_exporter_otlp-1.41.0.tar.gz", hash = "sha256:97ff847321f8d4c919032a67d20d3137fb7b34eac0c47f13f71112858927fc5b", size = 6152, upload-time = "2026-04-09T14:38:35.895Z" } +sdist = { url = "https://files.pythonhosted.org/packages/42/84/d55baf8e1a222f40282956083e67de9fa92d5fa451108df4839505fa2a24/opentelemetry_exporter_otlp-1.41.1.tar.gz", hash = "sha256:299a2f0541ca175df186f5ac58fd5db177ba1e9b72b0826049062f750d55b47f", size = 6152, upload-time = "2026-04-24T13:15:40.006Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e0/f2/f1076fff152858773f22cda146713f9ae3661795af6bacd411a76f2151ac/opentelemetry_exporter_otlp-1.41.0-py3-none-any.whl", hash = "sha256:443b6a45c990ae4c55e147f97049a86c5f5b704f3d78b48b44a073a886ec4d6e", size = 7022, upload-time = "2026-04-09T14:38:13.934Z" }, + { url = "https://files.pythonhosted.org/packages/6d/d5/ea4aa7dfc458fd537bd9519ea0e7226eef2a6212dfe952694984167daaba/opentelemetry_exporter_otlp-1.41.1-py3-none-any.whl", hash = "sha256:db276c5a80c02b063994e80950d00ca1bfddcf6520f608335b7dc2db0c0eb9c6", size = 7025, upload-time = "2026-04-24T13:15:17.839Z" }, ] [[package]] name = "opentelemetry-exporter-otlp-proto-common" -version = "1.41.0" +version = "1.41.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "opentelemetry-proto" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/8c/28/e8eca94966fe9a1465f6094dc5ddc5398473682180279c94020bc23b4906/opentelemetry_exporter_otlp_proto_common-1.41.0.tar.gz", hash = "sha256:966bbce537e9edb166154779a7c4f8ab6b8654a03a28024aeaf1a3eacb07d6ee", size = 20411, upload-time = "2026-04-09T14:38:36.572Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ae/fa/f9e3bd3c4d692b3ce9a2880a167d1f79681a1bea11f00d5bf76adc03e6ea/opentelemetry_exporter_otlp_proto_common-1.41.1.tar.gz", hash = "sha256:0e253156ea9c36b0bd3d2440c5c9ba7dd1f3fb64ba7a08fc85fbac536b56e1fb", size = 20409, upload-time = "2026-04-24T13:15:40.924Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/26/c4/78b9bf2d9c1d5e494f44932988d9d91c51a66b9a7b48adf99b62f7c65318/opentelemetry_exporter_otlp_proto_common-1.41.0-py3-none-any.whl", hash = "sha256:7a99177bf61f85f4f9ed2072f54d676364719c066f6d11f515acc6c745c7acf0", size = 18366, upload-time = "2026-04-09T14:38:15.135Z" }, + { url = "https://files.pythonhosted.org/packages/29/48/bce76d3ea772b609757e9bc844e02ab408a6446609bf74fb562062ba6b71/opentelemetry_exporter_otlp_proto_common-1.41.1-py3-none-any.whl", hash = "sha256:10da74dad6a49344b9b7b21b6182e3060373a235fde1528616d5f01f92e66aa9", size = 18366, upload-time = "2026-04-24T13:15:18.917Z" }, ] [[package]] name = "opentelemetry-exporter-otlp-proto-grpc" -version = "1.41.0" +version = "1.41.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "googleapis-common-protos" }, @@ -4298,14 +4301,14 @@ dependencies = [ { name = "opentelemetry-sdk" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/42/46/d75a3f8c91915f2e58f61d0a2e4ada63891e7c7a37a20ff7949ba184a6b2/opentelemetry_exporter_otlp_proto_grpc-1.41.0.tar.gz", hash = "sha256:f704201251c6f65772b11bddea1c948000554459101bdbb0116e0a01b70592f6", size = 25754, upload-time = "2026-04-09T14:38:37.423Z" } +sdist = { url = "https://files.pythonhosted.org/packages/1e/9b/e4503060b8695579dbaad187dc8cef4554188de68748c88060599b77489e/opentelemetry_exporter_otlp_proto_grpc-1.41.1.tar.gz", hash = "sha256:b05df8fa1333dc9a3fda36b676b96b5095ab6016d3f0c3296d430d629ba1443b", size = 25755, upload-time = "2026-04-24T13:15:41.93Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/81/f6/b09e2e0c9f0b5750cebc6eaf31527b910821453cef40a5a0fe93550422b2/opentelemetry_exporter_otlp_proto_grpc-1.41.0-py3-none-any.whl", hash = "sha256:3a1a86bd24806ccf136ec9737dbfa4c09b069f9130ff66b0acb014f9c5255fd1", size = 20299, upload-time = "2026-04-09T14:38:17.01Z" }, + { url = "https://files.pythonhosted.org/packages/ac/f2/c54f33c92443d087703e57e52e55f22f111373a5c4c4aa349ea60efe512e/opentelemetry_exporter_otlp_proto_grpc-1.41.1-py3-none-any.whl", hash = "sha256:537926dcef951136992479af1d9cd88f25e33d56c530e9f020ed57774dca2f94", size = 20297, upload-time = "2026-04-24T13:15:20.212Z" }, ] [[package]] name = "opentelemetry-exporter-otlp-proto-http" -version = "1.41.0" +version = "1.41.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "googleapis-common-protos" }, @@ -4316,9 +4319,9 @@ dependencies = [ { name = "requests" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/19/63/d9f43cd75f3fabb7e01148c89cfa9491fc18f6580a6764c554ff7c953c46/opentelemetry_exporter_otlp_proto_http-1.41.0.tar.gz", hash = "sha256:dcd6e0686f56277db4eecbadd5262124e8f2cc739cadbc3fae3d08a12c976cf5", size = 24139, upload-time = "2026-04-09T14:38:38.128Z" } +sdist = { url = "https://files.pythonhosted.org/packages/33/5b/9d3c7f70cca10136ba82a81e738dee626c8e7fc61c6887ea9a58bf34c606/opentelemetry_exporter_otlp_proto_http-1.41.1.tar.gz", hash = "sha256:4747a9604c8550ab38c6fd6180e2fcb80de3267060bef2c306bad3cb443302bc", size = 24139, upload-time = "2026-04-24T13:15:42.977Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/64/b5/a214cd907eedc17699d1c2d602288ae17cb775526df04db3a3b3585329d2/opentelemetry_exporter_otlp_proto_http-1.41.0-py3-none-any.whl", hash = "sha256:a9c4ee69cce9c3f4d7ee736ad1b44e3c9654002c0816900abbafd9f3cf289751", size = 22673, upload-time = "2026-04-09T14:38:18.349Z" }, + { url = "https://files.pythonhosted.org/packages/ba/4d/ef07ff2fc630849f2080ae0ae73a61f67257905b7ac79066640bfa0c5739/opentelemetry_exporter_otlp_proto_http-1.41.1-py3-none-any.whl", hash = "sha256:1a21e8f49c7a946d935551e90947d6c3eb39236723c6624401da0f33d68edcb4", size = 22673, upload-time = "2026-04-24T13:15:21.313Z" }, ] [[package]] @@ -4476,14 +4479,14 @@ wheels = [ [[package]] name = "opentelemetry-proto" -version = "1.41.0" +version = "1.41.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "protobuf" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/e0/d9/08e3dc6156878713e8c811682bc76151f5fe1a3cb7f3abda3966fd56e71e/opentelemetry_proto-1.41.0.tar.gz", hash = "sha256:95d2e576f9fb1800473a3e4cfcca054295d06bdb869fda4dc9f4f779dc68f7b6", size = 45669, upload-time = "2026-04-09T14:38:45.978Z" } +sdist = { url = "https://files.pythonhosted.org/packages/99/e8/633c6d8a9c8840338b105907e55c32d3da1983abab5e52f899f72a82c3d1/opentelemetry_proto-1.41.1.tar.gz", hash = "sha256:4b9d2eb631237ea43b80e16c073af438554e32bc7e9e3f8ca4a9582f900020e5", size = 45670, upload-time = "2026-04-24T13:15:49.768Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/49/8c/65ef7a9383a363864772022e822b5d5c6988e6f9dabeebb9278f5b86ebc3/opentelemetry_proto-1.41.0-py3-none-any.whl", hash = "sha256:b970ab537309f9eed296be482c3e7cca05d8aca8165346e929f658dbe153b247", size = 72074, upload-time = "2026-04-09T14:38:29.38Z" }, + { url = "https://files.pythonhosted.org/packages/e4/1e/5cd77035e3e82070e2265a63a760f715aacd3cb16dddc7efee913f297fcc/opentelemetry_proto-1.41.1-py3-none-any.whl", hash = "sha256:0496713b804d127a4147e32849fbaf5683fac8ee98550e8e7679cd706c289720", size = 72076, upload-time = "2026-04-24T13:15:32.542Z" }, ] [[package]] @@ -4810,7 +4813,7 @@ wheels = [ [[package]] name = "postgrest" -version = "2.29.0" +version = "2.30.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "deprecation" }, @@ -4818,9 +4821,9 @@ dependencies = [ { name = "pydantic" }, { name = "yarl" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/52/98/f216b8b5c4d116ab6a2fb21339b5821da279ee773e163612418e1c56c012/postgrest-2.29.0.tar.gz", hash = "sha256:a87081858f627fcd57e8e7137004a1ef0adbdf0dbdfed1384e9ea1d7a9c525ec", size = 14217, upload-time = "2026-04-24T13:13:00.281Z" } +sdist = { url = "https://files.pythonhosted.org/packages/56/7c/54e7be05adc9fd6fd98dc572ddfc8982d45bec314a55711e37277d440698/postgrest-2.30.0.tar.gz", hash = "sha256:4f89eec56ce605ab6fbddd9b96d526a9bb44962796d44a5d85cb77640eb766c3", size = 14430, upload-time = "2026-05-06T17:35:21.559Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/2c/0b/08b670a93a90d625c557b9e64b8a5fdeec80c3542d2d0265f0b4d6b16646/postgrest-2.29.0-py3-none-any.whl", hash = "sha256:3ee48e146f726272733d20e2b12de354cdb6cb9dd9cc3a61ed97ce69047aeb96", size = 22735, upload-time = "2026-04-24T13:12:58.405Z" }, + { url = "https://files.pythonhosted.org/packages/22/aa/ff2e09f99f95ea96fddeb373646bf907dd89a24fc00b5d38e5674ca7c9ca/postgrest-2.30.0-py3-none-any.whl", hash = "sha256:30631e7993da542419f4217cf3b60aa641084731ea15e66a18526a3a52e40a7d", size = 23108, upload-time = "2026-05-06T17:35:20.531Z" }, ] [[package]] @@ -5359,19 +5362,19 @@ wheels = [ [[package]] name = "pyrefly" -version = "0.62.0" +version = "0.64.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/bb/ad/8874ed25781e7dd561c6d75fb4a7becf10a18d75b074f25b845cc334f781/pyrefly-0.62.0.tar.gz", hash = "sha256:da1fbe1075dc1e6c8e3134e9370b0a0e7a296061d782cca5bf83dbb8e4c10d7c", size = 5537672, upload-time = "2026-04-20T17:12:15.718Z" } +sdist = { url = "https://files.pythonhosted.org/packages/85/99/923622d7b52ef84e83f357b19bd08dff063ccc5f4472b003105e1f308d93/pyrefly-0.64.0.tar.gz", hash = "sha256:fbfcdb0031adadc340b6c64cb41c6094c95349ee952fe3d4c143866add829172", size = 5678516, upload-time = "2026-05-06T17:28:44.056Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/1b/ea/09bd9da7d5df294db800312fb415be2fefbaa5594178e9e49f44fa071aea/pyrefly-0.62.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:9d78ec4f126dee1fa76215b193b964490ce10e62a32d2787a72c51623658b803", size = 13020414, upload-time = "2026-04-20T17:11:43.617Z" }, - { url = "https://files.pythonhosted.org/packages/4b/f0/f84afac4f220c4c8c801b779ee2ff28ad3f7731f4283c2e1b6ee9012e8c2/pyrefly-0.62.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:2a41a34902d20756264486f9e309f22633d100261bd960feea6e858a098d985d", size = 12515659, upload-time = "2026-04-20T17:11:46.59Z" }, - { url = "https://files.pythonhosted.org/packages/40/0b/620c39cefa9ae1b25ee7a2da9d8d3c278b095649cb8435c5e01ea64f7c17/pyrefly-0.62.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4666c6b65aea662e5f77b64dc91c091b7ea5cede6aa66c0f4cbae26480403583", size = 36228332, upload-time = "2026-04-20T17:11:50.523Z" }, - { url = "https://files.pythonhosted.org/packages/2d/fb/47b8b76438c12761e509a3666cd5a99d4af7f21976ba8385feb475cbfe30/pyrefly-0.62.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1aefab798f47d37c13ded791192fee9b39a6d2b12e31f38ae06a1f80c4b26e22", size = 38995741, upload-time = "2026-04-20T17:11:54.702Z" }, - { url = "https://files.pythonhosted.org/packages/55/d2/03bd17673f61147cd5609cd7d6a1455eeccc17a07a7e141ed9931b0c42c0/pyrefly-0.62.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8fa986b50d56740da1d7ae7c660a505143cb9d286fa98cc7e5f4a759cc6eaa5d", size = 37205321, upload-time = "2026-04-20T17:11:58.9Z" }, - { url = "https://files.pythonhosted.org/packages/75/14/20ba7b7f2d182f9b7c1e24a3041dac9b5730ae28cfe1614a2c98706650f2/pyrefly-0.62.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32e9b175805c82ffb967e4708f4910bace7e1a12736907380cc9afdbaabb0efb", size = 41786834, upload-time = "2026-04-20T17:12:03.221Z" }, - { url = "https://files.pythonhosted.org/packages/fa/c8/5a7ba88c4fa1b5090d877f70fa1b742b921b9e7d8d3f4b6b9b1ba1820850/pyrefly-0.62.0-py3-none-win32.whl", hash = "sha256:1cd98edc20cab5bac8016c9220ee66080e39bd22e7f0e9bb3e2c4e2be1555eed", size = 12010170, upload-time = "2026-04-20T17:12:06.791Z" }, - { url = "https://files.pythonhosted.org/packages/2e/78/d8f810de010ff2ed594c630c724fd817ef430963249e9eb396ce8f785e9d/pyrefly-0.62.0-py3-none-win_amd64.whl", hash = "sha256:6994f8ee7d6720325ee52207fbdaca98a799a1efe462bb5ba90c47160f7f3e6e", size = 12861816, upload-time = "2026-04-20T17:12:09.689Z" }, - { url = "https://files.pythonhosted.org/packages/c7/a9/ac824ef6a3f50b7c0ec5974471f8f2cb205cd1edd53a5abbcf7ba37feb5d/pyrefly-0.62.0-py3-none-win_arm64.whl", hash = "sha256:362a5d47a5ac5aaa5258091e878a1759ff8b687d8cf462af1c516144f7b0108a", size = 12352977, upload-time = "2026-04-20T17:12:12.736Z" }, + { url = "https://files.pythonhosted.org/packages/b8/1c/b001b7e84a811dbb3c85e31bd4bfc3edfa3c94438140cd1d6e8c06b7c1df/pyrefly-0.64.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:683b317d8d0e815fb2ad75b7e0fa6c15eed5be4bcbc407dc13312984da3a9c47", size = 13287462, upload-time = "2026-05-06T17:28:19.169Z" }, + { url = "https://files.pythonhosted.org/packages/89/02/1e6fcd311bd7c24aaccc0afb998d584e1fa6c370e1428b4b091103760efe/pyrefly-0.64.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:96913cc4f066a7bd008b9dba8e3951234e92bb8a3a2cb1aea0e274fd2a444c55", size = 12777104, upload-time = "2026-05-06T17:28:22.047Z" }, + { url = "https://files.pythonhosted.org/packages/d6/2b/3f347b8d97c9065d6ace14a22591c8d91e64610e74e0d4f214b3025ebcf7/pyrefly-0.64.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c2ae557e1b6a6a5bda844806cae10b212cf84ea786ece10d55083a0321ee1705", size = 37064924, upload-time = "2026-05-06T17:28:24.743Z" }, + { url = "https://files.pythonhosted.org/packages/73/dd/0b40175e930a96139a8e9f62a8e1db7f9a5e9df8e6cef08bf280affcb05e/pyrefly-0.64.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d062ac1744346efacd7df23c6bbff662ad29ed495923cb59ede656a306355655", size = 39719832, upload-time = "2026-05-06T17:28:28.042Z" }, + { url = "https://files.pythonhosted.org/packages/9a/4b/0afb4ad02eb67ddb299ff3f7108ceb307e520578b00e900d07f2371423ca/pyrefly-0.64.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6850b305d45121911fbe25ad56497d2e887b387ea50644ba15a8ad2a8cf855f4", size = 37861666, upload-time = "2026-05-06T17:28:31.234Z" }, + { url = "https://files.pythonhosted.org/packages/e5/1b/f5390f8678433708288afab13f043ddd021a55dba3f665360d2c9396ee04/pyrefly-0.64.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a259925620a84fe87cd30a82643ec524eeef631f0c4ec5af81a21e006c2f5b1", size = 42634235, upload-time = "2026-05-06T17:28:34.405Z" }, + { url = "https://files.pythonhosted.org/packages/47/f7/4b66934e375dde3e4d75373b1a94eb7e7c0c0c788e94267641a223930180/pyrefly-0.64.0-py3-none-win32.whl", hash = "sha256:20317f6dd97e22bc508b8dbc537e59b0ab58e384113ee61920c87ed1a6a12f62", size = 12213388, upload-time = "2026-05-06T17:28:37.146Z" }, + { url = "https://files.pythonhosted.org/packages/0a/15/653523d99795041a1be6dadf7a73225317cb2aae4b21e6df57edbce807f0/pyrefly-0.64.0-py3-none-win_amd64.whl", hash = "sha256:e88fc6a83add9b7c2224be0f74df1b0db10b3af856ae30e4e0a90ba3644c712f", size = 13136719, upload-time = "2026-05-06T17:28:39.767Z" }, + { url = "https://files.pythonhosted.org/packages/50/bb/9ea1c26b511b38a3e1eefc1bd3de7d3f65b2bbfdb59295f3244f61564a81/pyrefly-0.64.0-py3-none-win_arm64.whl", hash = "sha256:73744bd95e836abda0d08e9cdcf008142090ae0124c8f8ff477c944b60c0343c", size = 12526050, upload-time = "2026-05-06T17:28:42.077Z" }, ] [[package]] @@ -5723,16 +5726,16 @@ wheels = [ [[package]] name = "realtime" -version = "2.29.0" +version = "2.30.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "pydantic" }, { name = "typing-extensions" }, { name = "websockets" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/6e/f1/08c42a42653942fadfbef495d5b0239356140e7186cc528704956c5f06d4/realtime-2.29.0.tar.gz", hash = "sha256:8efe4a1b3a548a5fda09de701bd041fa0970c5a2fe7d13db0b9861ce11828be2", size = 18715, upload-time = "2026-04-24T13:13:02.315Z" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/a2/0328d49d3b5fb427068e9200e7de5b0d708d021a1ad98d004bc685d2529e/realtime-2.30.0.tar.gz", hash = "sha256:7aa593da52ed5f92c34ec4e50e32043afa62f219c94f717ad64a66ab0ef9f1ba", size = 18718, upload-time = "2026-05-06T17:35:23.925Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/77/48/f6375c0a24923beb988f0c71c052604c96641cf43c2d22b91ec1df86afa0/realtime-2.29.0-py3-none-any.whl", hash = "sha256:1a4891e6c82e88ac9d96ac715e435e086f6f8c7665212a8717346de829cbb509", size = 22374, upload-time = "2026-04-24T13:13:01.103Z" }, + { url = "https://files.pythonhosted.org/packages/b4/75/1b2cfc949595e22d8c05a2aa2cfc222921f7f94177d7e8a90542f3f73b33/realtime-2.30.0-py3-none-any.whl", hash = "sha256:7c93b63d2cf99aa1da4fa8826b03b00cd32f7b38abb27ff47b19eb5dcb5707c6", size = 22376, upload-time = "2026-05-06T17:35:22.568Z" }, ] [[package]] @@ -5914,14 +5917,14 @@ wheels = [ [[package]] name = "s3transfer" -version = "0.16.0" +version = "0.17.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "botocore" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/05/04/74127fc843314818edfa81b5540e26dd537353b123a4edc563109d8f17dd/s3transfer-0.16.0.tar.gz", hash = "sha256:8e990f13268025792229cd52fa10cb7163744bf56e719e0b9cb925ab79abf920", size = 153827, upload-time = "2025-12-01T02:30:59.114Z" } +sdist = { url = "https://files.pythonhosted.org/packages/9b/ec/7c692cde9125b77e84b307354d4fb705f98b8ccad59a036d5957ca75bfc3/s3transfer-0.17.0.tar.gz", hash = "sha256:9edeb6d1c3c2f89d6050348548834ad8289610d886e5bf7b7207728bd43ce33a", size = 155337, upload-time = "2026-04-29T22:07:36.33Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/fc/51/727abb13f44c1fcf6d145979e1535a35794db0f6e450a0cb46aa24732fe2/s3transfer-0.16.0-py3-none-any.whl", hash = "sha256:18e25d66fed509e3868dc1572b3f427ff947dd2c56f844a5bf09481ad3f3b2fe", size = 86830, upload-time = "2025-12-01T02:30:57.729Z" }, + { url = "https://files.pythonhosted.org/packages/87/72/c6c32d2b657fa3dad1de340254e14390b1e334ce38268b7ad51abda3c8c2/s3transfer-0.17.0-py3-none-any.whl", hash = "sha256:ce3801712acf4ad3e89fb9990df97b4972e93f4b3b0004d214be5bce12814c20", size = 86811, upload-time = "2026-04-29T22:07:34.966Z" }, ] [[package]] @@ -6214,7 +6217,7 @@ wheels = [ [[package]] name = "storage3" -version = "2.29.0" +version = "2.30.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "deprecation" }, @@ -6223,9 +6226,9 @@ dependencies = [ { name = "pyiceberg" }, { name = "yarl" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d7/be/771246434b5caf3c6187bfdc932eaede00bf5f2937b47475ab25209ede3e/storage3-2.29.0.tar.gz", hash = "sha256:b0cc2f6714655d725c998d2c5ae8c6fb4f56a513bd31e4f85770df557fe021e3", size = 20160, upload-time = "2026-04-24T13:13:04.626Z" } +sdist = { url = "https://files.pythonhosted.org/packages/9b/b2/6df208d64630744704d00f2c07197170390d6b4d0098617740f6a7a4fa98/storage3-2.30.0.tar.gz", hash = "sha256:b74e3cac149f2c0553dcb5f4d55d8c35d420d88183a1a2df77727d482665972b", size = 20162, upload-time = "2026-05-06T17:35:25.71Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/fc/c3/790c31866f52c13b26f108b45759bf50dafae3a0bafb4511fadc98ba7c33/storage3-2.29.0-py3-none-any.whl", hash = "sha256:043ef7ff27cc8b9da12be403cf78ee4586180edfcf62b227ff61e1bd79594b06", size = 28284, upload-time = "2026-04-24T13:13:03.338Z" }, + { url = "https://files.pythonhosted.org/packages/91/5c/bb8c8cc448cfae671c4ffee67f3651892ea59b341f27bed54666190eb8ef/storage3-2.30.0-py3-none-any.whl", hash = "sha256:2bd23a34011c018bd9c130d8a70a09ebd060ae80d946c6204a6fc08161ad728d", size = 28284, upload-time = "2026-05-06T17:35:24.659Z" }, ] [[package]] @@ -6251,7 +6254,7 @@ wheels = [ [[package]] name = "supabase" -version = "2.29.0" +version = "2.30.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "httpx" }, @@ -6262,37 +6265,37 @@ dependencies = [ { name = "supabase-functions" }, { name = "yarl" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/51/a0/2407d616fdf68e8632bbbfb063d1685c38377ac0199e8ca11deaea1f3bf0/supabase-2.29.0.tar.gz", hash = "sha256:a88c4a4eb50fbb903e2e962fbc7c27733b00589140139f9e837bc9fe30dd3615", size = 9689, upload-time = "2026-04-24T13:13:06.728Z" } +sdist = { url = "https://files.pythonhosted.org/packages/5c/a6/d2b17021c2db1a9d219c383e0762ac03a62b25468e61ab126b6b561c2f21/supabase-2.30.0.tar.gz", hash = "sha256:efdba41d474038ed220736ba4e64946df56043057ad785c4c3499d27e459975c", size = 9689, upload-time = "2026-05-06T17:35:27.781Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/22/52/232f6bbf5326e04ae12e2ef04a24f011a0d7cab379a8b9698652bc8ff78f/supabase-2.29.0-py3-none-any.whl", hash = "sha256:16c3ec4b7094f6b92efc5cd3bb3f96826d3b6dd5d24fe15c89c81166efce88fe", size = 16633, upload-time = "2026-04-24T13:13:05.722Z" }, + { url = "https://files.pythonhosted.org/packages/f0/82/d213be7d0ce0bb18018744c0ee38ba0d6648d41dbc46ac8558cffe80541f/supabase-2.30.0-py3-none-any.whl", hash = "sha256:f9b259194554f7bfd2dca6c23261f2df588016ca18b18e774f4d85bc941edb03", size = 16634, upload-time = "2026-05-06T17:35:26.696Z" }, ] [[package]] name = "supabase-auth" -version = "2.29.0" +version = "2.30.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "httpx", extra = ["http2"] }, { name = "pydantic" }, { name = "pyjwt", extra = ["crypto"] }, ] -sdist = { url = "https://files.pythonhosted.org/packages/51/7f/7ceeb4c7a2caa188062e934897f0e08e1af0a0e47e376c7645c26b4c39d8/supabase_auth-2.29.0.tar.gz", hash = "sha256:46efc6a3455a23957b846dc974303a844ba0413718cfa899425477ac977f95b3", size = 39154, upload-time = "2026-04-24T13:13:08.509Z" } +sdist = { url = "https://files.pythonhosted.org/packages/8e/8a/48bbbe0b6703d0670b67e45b90d6a791fd01aace67443d286f760bf48895/supabase_auth-2.30.0.tar.gz", hash = "sha256:6138a53a306a95ed59c03d4e4975469dfc3343a0ade33cc4b37e4ef967ad83f8", size = 39135, upload-time = "2026-05-06T17:35:30.371Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f1/ac/3c35cf52281f940b9497cf17abfc5c2050ca49f342d60cfafe22dac3482b/supabase_auth-2.29.0-py3-none-any.whl", hash = "sha256:64de6ef8cae80f97d3aa8d5ca507d5427dda5c89885c0bcfe9f8b0263b6fb9a4", size = 48379, upload-time = "2026-04-24T13:13:07.417Z" }, + { url = "https://files.pythonhosted.org/packages/db/40/a99cb4373353bcbf302d962e51da9eac78b3b0f257eb0362c0852b1667f4/supabase_auth-2.30.0-py3-none-any.whl", hash = "sha256:e85e1f51ec0de2172c3a2a8514205f71731a9914f9a770ed199ac0cf054bc82c", size = 48352, upload-time = "2026-05-06T17:35:28.936Z" }, ] [[package]] name = "supabase-functions" -version = "2.29.0" +version = "2.30.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "httpx", extra = ["http2"] }, { name = "strenum" }, { name = "yarl" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/e5/19/1a1d22749f38f2a6cbca93a6f5a35c9f816c2c3c06bfaa077fa336e90537/supabase_functions-2.29.0.tar.gz", hash = "sha256:0f8a14a2ea9f12b1c208f61dc6f55e2f4b1121f81bf01c08f9b487d22888744d", size = 4683, upload-time = "2026-04-24T13:13:10.432Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f0/e6/5cd8559ec2bb332e6027840c1be292f9989c2fc7b47bf40800aec5586791/supabase_functions-2.30.0.tar.gz", hash = "sha256:025acfd25f1c000ba43d0f7b8e366b0d2e9dfc784b842528e21973eb33006113", size = 4683, upload-time = "2026-05-06T17:35:32.246Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e0/10/6f8ef0b408ade76b5a439afab588ce5849e9604a23040ca73cfe0b90cb9e/supabase_functions-2.29.0-py3-none-any.whl", hash = "sha256:6f08de52eec5820eae53616868b85e849e181beffaa5d05b8ea1708ceae5e48e", size = 8799, upload-time = "2026-04-24T13:13:09.214Z" }, + { url = "https://files.pythonhosted.org/packages/53/da/9dedab32775df04cc22ca72f194b78e895d940f195bed3e02882a65daa9b/supabase_functions-2.30.0-py3-none-any.whl", hash = "sha256:92419459f102767b954cd034856e4ded8e34c78660b32442d66c8b2899c68011", size = 8803, upload-time = "2026-05-06T17:35:31.342Z" }, ] [[package]] @@ -6447,27 +6450,28 @@ wheels = [ [[package]] name = "tokenizers" -version = "0.22.1" +version = "0.22.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "huggingface-hub" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/1c/46/fb6854cec3278fbfa4a75b50232c77622bc517ac886156e6afbfa4d8fc6e/tokenizers-0.22.1.tar.gz", hash = "sha256:61de6522785310a309b3407bac22d99c4db5dba349935e99e4d15ea2226af2d9", size = 363123, upload-time = "2025-09-19T09:49:23.424Z" } +sdist = { url = "https://files.pythonhosted.org/packages/73/6f/f80cfef4a312e1fb34baf7d85c72d4411afde10978d4657f8cdd811d3ccc/tokenizers-0.22.2.tar.gz", hash = "sha256:473b83b915e547aa366d1eee11806deaf419e17be16310ac0a14077f1e28f917", size = 372115, upload-time = "2026-01-05T10:45:15.988Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/bf/33/f4b2d94ada7ab297328fc671fed209368ddb82f965ec2224eb1892674c3a/tokenizers-0.22.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:59fdb013df17455e5f950b4b834a7b3ee2e0271e6378ccb33aa74d178b513c73", size = 3069318, upload-time = "2025-09-19T09:49:11.848Z" }, - { url = "https://files.pythonhosted.org/packages/1c/58/2aa8c874d02b974990e89ff95826a4852a8b2a273c7d1b4411cdd45a4565/tokenizers-0.22.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:8d4e484f7b0827021ac5f9f71d4794aaef62b979ab7608593da22b1d2e3c4edc", size = 2926478, upload-time = "2025-09-19T09:49:09.759Z" }, - { url = "https://files.pythonhosted.org/packages/1e/3b/55e64befa1e7bfea963cf4b787b2cea1011362c4193f5477047532ce127e/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19d2962dd28bc67c1f205ab180578a78eef89ac60ca7ef7cbe9635a46a56422a", size = 3256994, upload-time = "2025-09-19T09:48:56.701Z" }, - { url = "https://files.pythonhosted.org/packages/71/0b/fbfecf42f67d9b7b80fde4aabb2b3110a97fac6585c9470b5bff103a80cb/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:38201f15cdb1f8a6843e6563e6e79f4abd053394992b9bbdf5213ea3469b4ae7", size = 3153141, upload-time = "2025-09-19T09:48:59.749Z" }, - { url = "https://files.pythonhosted.org/packages/17/a9/b38f4e74e0817af8f8ef925507c63c6ae8171e3c4cb2d5d4624bf58fca69/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1cbe5454c9a15df1b3443c726063d930c16f047a3cc724b9e6e1a91140e5a21", size = 3508049, upload-time = "2025-09-19T09:49:05.868Z" }, - { url = "https://files.pythonhosted.org/packages/d2/48/dd2b3dac46bb9134a88e35d72e1aa4869579eacc1a27238f1577270773ff/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e7d094ae6312d69cc2a872b54b91b309f4f6fbce871ef28eb27b52a98e4d0214", size = 3710730, upload-time = "2025-09-19T09:49:01.832Z" }, - { url = "https://files.pythonhosted.org/packages/93/0e/ccabc8d16ae4ba84a55d41345207c1e2ea88784651a5a487547d80851398/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:afd7594a56656ace95cdd6df4cca2e4059d294c5cfb1679c57824b605556cb2f", size = 3412560, upload-time = "2025-09-19T09:49:03.867Z" }, - { url = "https://files.pythonhosted.org/packages/d0/c6/dc3a0db5a6766416c32c034286d7c2d406da1f498e4de04ab1b8959edd00/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2ef6063d7a84994129732b47e7915e8710f27f99f3a3260b8a38fc7ccd083f4", size = 3250221, upload-time = "2025-09-19T09:49:07.664Z" }, - { url = "https://files.pythonhosted.org/packages/d7/a6/2c8486eef79671601ff57b093889a345dd3d576713ef047776015dc66de7/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ba0a64f450b9ef412c98f6bcd2a50c6df6e2443b560024a09fa6a03189726879", size = 9345569, upload-time = "2025-09-19T09:49:14.214Z" }, - { url = "https://files.pythonhosted.org/packages/6b/16/32ce667f14c35537f5f605fe9bea3e415ea1b0a646389d2295ec348d5657/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:331d6d149fa9c7d632cde4490fb8bbb12337fa3a0232e77892be656464f4b446", size = 9271599, upload-time = "2025-09-19T09:49:16.639Z" }, - { url = "https://files.pythonhosted.org/packages/51/7c/a5f7898a3f6baa3fc2685c705e04c98c1094c523051c805cdd9306b8f87e/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:607989f2ea68a46cb1dfbaf3e3aabdf3f21d8748312dbeb6263d1b3b66c5010a", size = 9533862, upload-time = "2025-09-19T09:49:19.146Z" }, - { url = "https://files.pythonhosted.org/packages/36/65/7e75caea90bc73c1dd8d40438adf1a7bc26af3b8d0a6705ea190462506e1/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a0f307d490295717726598ef6fa4f24af9d484809223bbc253b201c740a06390", size = 9681250, upload-time = "2025-09-19T09:49:21.501Z" }, - { url = "https://files.pythonhosted.org/packages/30/2c/959dddef581b46e6209da82df3b78471e96260e2bc463f89d23b1bf0e52a/tokenizers-0.22.1-cp39-abi3-win32.whl", hash = "sha256:b5120eed1442765cd90b903bb6cfef781fd8fe64e34ccaecbae4c619b7b12a82", size = 2472003, upload-time = "2025-09-19T09:49:27.089Z" }, - { url = "https://files.pythonhosted.org/packages/b3/46/e33a8c93907b631a99377ef4c5f817ab453d0b34f93529421f42ff559671/tokenizers-0.22.1-cp39-abi3-win_amd64.whl", hash = "sha256:65fd6e3fb11ca1e78a6a93602490f134d1fdeb13bcef99389d5102ea318ed138", size = 2674684, upload-time = "2025-09-19T09:49:24.953Z" }, + { url = "https://files.pythonhosted.org/packages/92/97/5dbfabf04c7e348e655e907ed27913e03db0923abb5dfdd120d7b25630e1/tokenizers-0.22.2-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:544dd704ae7238755d790de45ba8da072e9af3eea688f698b137915ae959281c", size = 3100275, upload-time = "2026-01-05T10:41:02.158Z" }, + { url = "https://files.pythonhosted.org/packages/2e/47/174dca0502ef88b28f1c9e06b73ce33500eedfac7a7692108aec220464e7/tokenizers-0.22.2-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:1e418a55456beedca4621dbab65a318981467a2b188e982a23e117f115ce5001", size = 2981472, upload-time = "2026-01-05T10:41:00.276Z" }, + { url = "https://files.pythonhosted.org/packages/d6/84/7990e799f1309a8b87af6b948f31edaa12a3ed22d11b352eaf4f4b2e5753/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2249487018adec45d6e3554c71d46eb39fa8ea67156c640f7513eb26f318cec7", size = 3290736, upload-time = "2026-01-05T10:40:32.165Z" }, + { url = "https://files.pythonhosted.org/packages/78/59/09d0d9ba94dcd5f4f1368d4858d24546b4bdc0231c2354aa31d6199f0399/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25b85325d0815e86e0bac263506dd114578953b7b53d7de09a6485e4a160a7dd", size = 3168835, upload-time = "2026-01-05T10:40:38.847Z" }, + { url = "https://files.pythonhosted.org/packages/47/50/b3ebb4243e7160bda8d34b731e54dd8ab8b133e50775872e7a434e524c28/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bfb88f22a209ff7b40a576d5324bf8286b519d7358663db21d6246fb17eea2d5", size = 3521673, upload-time = "2026-01-05T10:40:56.614Z" }, + { url = "https://files.pythonhosted.org/packages/e0/fa/89f4cb9e08df770b57adb96f8cbb7e22695a4cb6c2bd5f0c4f0ebcf33b66/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c774b1276f71e1ef716e5486f21e76333464f47bece56bbd554485982a9e03e", size = 3724818, upload-time = "2026-01-05T10:40:44.507Z" }, + { url = "https://files.pythonhosted.org/packages/64/04/ca2363f0bfbe3b3d36e95bf67e56a4c88c8e3362b658e616d1ac185d47f2/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df6c4265b289083bf710dff49bc51ef252f9d5be33a45ee2bed151114a56207b", size = 3379195, upload-time = "2026-01-05T10:40:51.139Z" }, + { url = "https://files.pythonhosted.org/packages/2e/76/932be4b50ef6ccedf9d3c6639b056a967a86258c6d9200643f01269211ca/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:369cc9fc8cc10cb24143873a0d95438bb8ee257bb80c71989e3ee290e8d72c67", size = 3274982, upload-time = "2026-01-05T10:40:58.331Z" }, + { url = "https://files.pythonhosted.org/packages/1d/28/5f9f5a4cc211b69e89420980e483831bcc29dade307955cc9dc858a40f01/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:29c30b83d8dcd061078b05ae0cb94d3c710555fbb44861139f9f83dcca3dc3e4", size = 9478245, upload-time = "2026-01-05T10:41:04.053Z" }, + { url = "https://files.pythonhosted.org/packages/6c/fb/66e2da4704d6aadebf8cb39f1d6d1957df667ab24cff2326b77cda0dcb85/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:37ae80a28c1d3265bb1f22464c856bd23c02a05bb211e56d0c5301a435be6c1a", size = 9560069, upload-time = "2026-01-05T10:45:10.673Z" }, + { url = "https://files.pythonhosted.org/packages/16/04/fed398b05caa87ce9b1a1bb5166645e38196081b225059a6edaff6440fac/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:791135ee325f2336f498590eb2f11dc5c295232f288e75c99a36c5dbce63088a", size = 9899263, upload-time = "2026-01-05T10:45:12.559Z" }, + { url = "https://files.pythonhosted.org/packages/05/a1/d62dfe7376beaaf1394917e0f8e93ee5f67fea8fcf4107501db35996586b/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:38337540fbbddff8e999d59970f3c6f35a82de10053206a7562f1ea02d046fa5", size = 10033429, upload-time = "2026-01-05T10:45:14.333Z" }, + { url = "https://files.pythonhosted.org/packages/fd/18/a545c4ea42af3df6effd7d13d250ba77a0a86fb20393143bbb9a92e434d4/tokenizers-0.22.2-cp39-abi3-win32.whl", hash = "sha256:a6bf3f88c554a2b653af81f3204491c818ae2ac6fbc09e76ef4773351292bc92", size = 2502363, upload-time = "2026-01-05T10:45:20.593Z" }, + { url = "https://files.pythonhosted.org/packages/65/71/0670843133a43d43070abeb1949abfdef12a86d490bea9cd9e18e37c5ff7/tokenizers-0.22.2-cp39-abi3-win_amd64.whl", hash = "sha256:c9ea31edff2968b44a88f97d784c2f16dc0729b8b143ed004699ebca91f05c48", size = 2747786, upload-time = "2026-01-05T10:45:18.411Z" }, + { url = "https://files.pythonhosted.org/packages/72/f4/0de46cfa12cdcbcd464cc59fde36912af405696f687e53a091fb432f694c/tokenizers-0.22.2-cp39-abi3-win_arm64.whl", hash = "sha256:9ce725d22864a1e965217204946f830c37876eee3b2ba6fc6255e8e903d5fcbc", size = 2612133, upload-time = "2026-01-05T10:45:17.232Z" }, ] [[package]] @@ -6585,23 +6589,23 @@ wheels = [ [[package]] name = "types-cachetools" -version = "6.2.0.20260408" +version = "7.0.0.20260503" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ec/61/475b0e8f4a92e5e33affcc6f4e6344c6dee540824021d22f695ea170da63/types_cachetools-6.2.0.20260408.tar.gz", hash = "sha256:0d8ae2dd5ba0b4cfe6a55c34396dd0415f1be07d0033d84781cdc4ed9c2ebc6b", size = 9854, upload-time = "2026-04-08T04:31:49.665Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ec/57/5d3b8b3e66b002911ec1274e87f904eeee1d843c8713d95476c25c29cf31/types_cachetools-7.0.0.20260503.tar.gz", hash = "sha256:dfa4dcdf453f397dfc6d69fc0a57423ac1f248393f70aa56b5d05fac2df7a96c", size = 10033, upload-time = "2026-05-03T05:19:54.128Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/bb/7d/579f50f4f004ee93c7d1baa95339591cac1fe02f4e3fb8fc0f900ee4a80f/types_cachetools-6.2.0.20260408-py3-none-any.whl", hash = "sha256:470e0b274737feae74beed3d764885bf4664002ecc393fba3778846b13ce92cb", size = 9350, upload-time = "2026-04-08T04:31:48.826Z" }, + { url = "https://files.pythonhosted.org/packages/3d/a8/84562723d9a3572e0851d82bdea6bed5a7dc033c6bd648f492c76b8c4ac8/types_cachetools-7.0.0.20260503-py3-none-any.whl", hash = "sha256:011b4fe0e85ef05c4a2471a4fda40254a78746b501cc1727359233872bb3a4e9", size = 9493, upload-time = "2026-05-03T05:19:53.124Z" }, ] [[package]] name = "types-cffi" -version = "2.0.0.20260408" +version = "2.0.0.20260429" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "types-setuptools" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/64/67/eb4ef3408fdc0b4e5af38b30c0e6ad4663b41bdae9fb85a9f09a8db61a99/types_cffi-2.0.0.20260408.tar.gz", hash = "sha256:aa8b9c456ab715c079fc655929811f21f331bfb940f4a821987c581bf4e36230", size = 17541, upload-time = "2026-04-08T04:36:03.918Z" } +sdist = { url = "https://files.pythonhosted.org/packages/0c/7d/56b9be8b0f9dfbffb7c73e248aacf178693ff3c6cf765b77c43a1e886e04/types_cffi-2.0.0.20260429.tar.gz", hash = "sha256:afe7d9777a2921139623af0b94647637a5bd0b938b77ec125e5e5e068a1727bd", size = 17562, upload-time = "2026-04-29T05:16:43.29Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c3/a3/7fbd93ededcc7c77e9e5948b9794161733ebdbf618a27965b1bea0e728a4/types_cffi-2.0.0.20260408-py3-none-any.whl", hash = "sha256:68bd296742b4ff7c0afe3547f50bd0acc55416ecf322ffefd2b7344ef6388a42", size = 20101, upload-time = "2026-04-08T04:36:02.995Z" }, + { url = "https://files.pythonhosted.org/packages/b8/2c/79fa47a70d534f63a54b6d22e28cc842f8c6d9ebec93048355b0020bc7a9/types_cffi-2.0.0.20260429-py3-none-any.whl", hash = "sha256:6a4237bfdbd50e4d0726929070d8b9983bde541726a5a6fe0e8e24e78c1b3826", size = 20103, upload-time = "2026-04-29T05:16:42.155Z" }, ] [[package]] @@ -6680,11 +6684,11 @@ wheels = [ [[package]] name = "types-greenlet" -version = "3.4.0.20260409" +version = "3.5.0.20260428" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/27/a6/668751bc864efe820e1eb12c2a77f9e62537f433cc002e483ad01badb04b/types_greenlet-3.4.0.20260409.tar.gz", hash = "sha256:81d2cf628934a16856bb9e54136def8de5356e934f0ad5d5474f219a0c5cb205", size = 8976, upload-time = "2026-04-09T04:22:31.693Z" } +sdist = { url = "https://files.pythonhosted.org/packages/79/50/d255c0e068679d7b9441d9408424ddf9e1f35620548e121003b3660af526/types_greenlet-3.5.0.20260428.tar.gz", hash = "sha256:6c188f5e9c5775d50bd00780a3eb1fb3cde17c396cf9703e3d417936e9e7a082", size = 9003, upload-time = "2026-04-28T05:19:43.062Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/4f/3f/c8a4d8782f78fccb4b5fe91c5eae2efce6648072754bc7096b1e3b5407ad/types_greenlet-3.4.0.20260409-py3-none-any.whl", hash = "sha256:cbceadb4594eccd95b57b3f7fa8a9b851488f5e6c05026f4a3db9aac02ec8333", size = 8812, upload-time = "2026-04-09T04:22:30.734Z" }, + { url = "https://files.pythonhosted.org/packages/30/e5/5ff280f02392ced53cb5e866b660b492b4245b1395a61e57d2a6dc02977b/types_greenlet-3.5.0.20260428-py3-none-any.whl", hash = "sha256:7b0f23ce84ee93474d4aa8058920f0578181e11431be92ce9a4ad4123de2c41b", size = 8809, upload-time = "2026-04-28T05:19:41.976Z" }, ] [[package]] @@ -6764,11 +6768,11 @@ wheels = [ [[package]] name = "types-protobuf" -version = "7.34.1.20260408" +version = "7.34.1.20260503" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/5b/b1/4521e68c2cc17703d80eb42796751345376dd4c706f84007ef5e7c707774/types_protobuf-7.34.1.20260408.tar.gz", hash = "sha256:e2c0a0430e08c75b52671a6f0035abfdcc791aad12af16274282de1b721758ab", size = 68835, upload-time = "2026-04-08T04:26:43.613Z" } +sdist = { url = "https://files.pythonhosted.org/packages/a0/31/87969cb3e62287bde7598b78b3c098d2873d54f5fb5a7cfbcaa73b8c965e/types_protobuf-7.34.1.20260503.tar.gz", hash = "sha256:effbc819aa17e02448dde99f089c6794662d66f4b2797e922f185ffe0b24e766", size = 68830, upload-time = "2026-05-03T05:19:50.739Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ef/b5/0bc9874d89c58fb0ce851e150055ce732d254dbb10b06becbc7635d0d635/types_protobuf-7.34.1.20260408-py3-none-any.whl", hash = "sha256:ebbcd4e27b145aef6a59bc0cb6c013b3528151c1ba5e7f7337aeee355d276a5e", size = 86012, upload-time = "2026-04-08T04:26:42.566Z" }, + { url = "https://files.pythonhosted.org/packages/f9/67/a33fb18090a927794a5ee4b1a30730b528ace0dad6b18932540d21258184/types_protobuf-7.34.1.20260503-py3-none-any.whl", hash = "sha256:75fd66121d56785c91828b8bf7b511f39ba847f11e682573e41847f01e9cd1de", size = 86019, upload-time = "2026-05-03T05:19:49.486Z" }, ] [[package]] @@ -7143,11 +7147,11 @@ wheels = [ [[package]] name = "urllib3" -version = "2.6.3" +version = "2.7.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/c7/24/5f1b3bdffd70275f6661c76461e25f024d5a38a46f04aaca912426a2b1d3/urllib3-2.6.3.tar.gz", hash = "sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed", size = 435556, upload-time = "2026-01-07T16:24:43.925Z" } +sdist = { url = "https://files.pythonhosted.org/packages/53/0c/06f8b233b8fd13b9e5ee11424ef85419ba0d8ba0b3138bf360be2ff56953/urllib3-2.7.0.tar.gz", hash = "sha256:231e0ec3b63ceb14667c67be60f2f2c40a518cb38b03af60abc813da26505f4c", size = 433602, upload-time = "2026-05-07T16:13:18.596Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/39/08/aaaad47bc4e9dc8c725e68f9d04865dbcb2052843ff09c97b08904852d84/urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4", size = 131584, upload-time = "2026-01-07T16:24:42.685Z" }, + { url = "https://files.pythonhosted.org/packages/7f/3e/5db95bcf282c52709639744ca2a8b149baccf648e39c8cc87553df9eae0c/urllib3-2.7.0-py3-none-any.whl", hash = "sha256:9fb4c81ebbb1ce9531cce37674bbc6f1360472bc18ca9a553ede278ef7276897", size = 131087, upload-time = "2026-05-07T16:13:17.151Z" }, ] [[package]] diff --git a/dev/pytest/pytest_config_tests.py b/dev/pytest/pytest_config_tests.py index d56cceff5e..b136f09c61 100644 --- a/dev/pytest/pytest_config_tests.py +++ b/dev/pytest/pytest_config_tests.py @@ -93,10 +93,16 @@ BASE_API_AND_DOCKER_COMPOSE_CONFIG_SET_DIFF: frozenset[str] = frozenset( API_CONFIG_SET = set(dotenv_values(Path("api") / Path(".env.example")).keys()) DOCKER_CONFIG_SET = set(dotenv_values(Path("docker") / Path(".env.example")).keys()) -DOCKER_COMPOSE_CONFIG_SET = set() +DOCKER_COMPOSE_CONFIG_SET = set(DOCKER_CONFIG_SET) -with open(Path("docker") / Path("docker-compose.yaml")) as f: - DOCKER_COMPOSE_CONFIG_SET = set(yaml.safe_load(f.read())["x-shared-env"].keys()) +# Read environment variables from the split env files used by docker-compose +# Walk through all .env.example files in subdirectories (per-module structure) +envs_dir = Path("docker") / Path("envs") +if envs_dir.exists(): + for env_file_path in envs_dir.rglob("*.env.example"): + env_keys = set(dotenv_values(env_file_path).keys()) + DOCKER_CONFIG_SET.update(env_keys) + DOCKER_COMPOSE_CONFIG_SET.update(env_keys) def test_yaml_config(): diff --git a/dev/pytest/pytest_full.sh b/dev/pytest/pytest_full.sh index 2989a74ad8..ca09aeb729 100755 --- a/dev/pytest/pytest_full.sh +++ b/dev/pytest/pytest_full.sh @@ -15,7 +15,7 @@ mkdir -p "${OPENDAL_FS_ROOT}" # Prepare env files like CI cp -n docker/.env.example docker/.env || true -cp -n docker/middleware.env.example docker/middleware.env || true +cp -n docker/envs/middleware.env.example docker/middleware.env || true cp -n api/tests/integration_tests/.env.example api/tests/integration_tests/.env || true # Expose service ports (same as CI) without leaving the repo dirty diff --git a/dev/setup b/dev/setup index 4236ff7fa7..1d2501a48b 100755 --- a/dev/setup +++ b/dev/setup @@ -8,7 +8,7 @@ API_ENV_EXAMPLE="$ROOT/api/.env.example" API_ENV="$ROOT/api/.env" WEB_ENV_EXAMPLE="$ROOT/web/.env.example" WEB_ENV="$ROOT/web/.env.local" -MIDDLEWARE_ENV_EXAMPLE="$ROOT/docker/middleware.env.example" +MIDDLEWARE_ENV_EXAMPLE="$ROOT/docker/envs/middleware.env.example" MIDDLEWARE_ENV="$ROOT/docker/middleware.env" # 1) Copy api/.env.example -> api/.env @@ -17,7 +17,7 @@ cp "$API_ENV_EXAMPLE" "$API_ENV" # 2) Copy web/.env.example -> web/.env.local cp "$WEB_ENV_EXAMPLE" "$WEB_ENV" -# 3) Copy docker/middleware.env.example -> docker/middleware.env +# 3) Copy docker/envs/middleware.env.example -> docker/middleware.env cp "$MIDDLEWARE_ENV_EXAMPLE" "$MIDDLEWARE_ENV" # 4) Install deps diff --git a/docker/.env.example b/docker/.env.example index 29741474fa..5a012973c0 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -1,1249 +1,162 @@ -# ------------------------------ -# Environment Variables for API service & worker -# ------------------------------ +# ------------------------------------------------------------------ +# Essential defaults for Docker Compose deployments. +# Only include variables required for services to start. +# +# For a default deployment, copy this file to .env and run: +# docker compose up -d +# +# Optional and provider-specific variables live under docker/envs/. +# Copy an optional *.env.example file beside itself without the +# .example suffix when you need those advanced settings. +# Values in docker/.env take precedence over docker/envs/*.env files. +# ------------------------------------------------------------------ -# ------------------------------ -# Common Variables -# ------------------------------ - -# The backend URL of the console API, -# used to concatenate the authorization callback. -# If empty, it is the same domain. -# Example: https://api.console.dify.ai +# Core service URLs CONSOLE_API_URL= - -# The front-end URL of the console web, -# used to concatenate some front-end addresses and for CORS configuration use. -# If empty, it is the same domain. -# Example: https://console.dify.ai CONSOLE_WEB_URL= - -# Service API Url, -# used to display Service API Base Url to the front-end. -# If empty, it is the same domain. -# Example: https://api.dify.ai SERVICE_API_URL= - -# Trigger external URL -# used to display trigger endpoint API Base URL to the front-end. -# Example: https://api.dify.ai TRIGGER_URL=http://localhost - -# WebApp API backend Url, -# used to declare the back-end URL for the front-end API. -# If empty, it is the same domain. -# Example: https://api.app.dify.ai APP_API_URL= - -# WebApp Url, -# used to display WebAPP API Base Url to the front-end. -# If empty, it is the same domain. -# Example: https://app.dify.ai APP_WEB_URL= - -# File preview or download Url prefix. -# used to display File preview or download Url to the front-end or as Multi-model inputs; -# Url is signed and has expiration time. -# Setting FILES_URL is required for file processing plugins. -# - For https://example.com, use FILES_URL=https://example.com -# - For http://example.com, use FILES_URL=http://example.com -# Recommendation: use a dedicated domain (e.g., https://upload.example.com). -# Alternatively, use http://:5001 or http://api:5001, -# ensuring port 5001 is externally accessible (see docker-compose.yaml). FILES_URL= - -# INTERNAL_FILES_URL is used for plugin daemon communication within Docker network. -# Set this to the internal Docker service URL for proper plugin file access. -# Example: INTERNAL_FILES_URL=http://api:5001 INTERNAL_FILES_URL= +ENDPOINT_URL_TEMPLATE=http://localhost/e/{hook_id} +NEXT_PUBLIC_SOCKET_URL=ws://localhost -# Ensure UTF-8 encoding +# Runtime and security LANG=C.UTF-8 LC_ALL=C.UTF-8 PYTHONIOENCODING=utf-8 - -# Set UV cache directory to avoid permission issues with non-existent home directory UV_CACHE_DIR=/tmp/.uv-cache - -# ------------------------------ -# Server Configuration -# ------------------------------ - -# The log level for the application. -# Supported values are `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL` -LOG_LEVEL=INFO -# Log output format: text or json -LOG_OUTPUT_FORMAT=text -# Log file path -LOG_FILE=/app/logs/server.log -# Log file max size, the unit is MB -LOG_FILE_MAX_SIZE=20 -# Log file max backup count -LOG_FILE_BACKUP_COUNT=5 -# Log dateformat -LOG_DATEFORMAT=%Y-%m-%d %H:%M:%S -# Log Timezone -LOG_TZ=UTC - -# Debug mode, default is false. -# It is recommended to turn on this configuration for local development -# to prevent some problems caused by monkey patch. -DEBUG=false - -# Flask debug mode, it can output trace information at the interface when turned on, -# which is convenient for debugging. -FLASK_DEBUG=false - -# Enable request logging, which will log the request and response information. -# And the log level is DEBUG -ENABLE_REQUEST_LOGGING=False - -# A secret key that is used for securely signing the session cookie -# and encrypting sensitive information on the database. -# You can generate a strong key using `openssl rand -base64 42`. SECRET_KEY=sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U - -# Password for admin user initialization. -# If left unset, admin user will not be prompted for a password -# when creating the initial admin account. -# The length of the password cannot exceed 30 characters. INIT_PASSWORD= - -# Deployment environment. -# Supported values are `PRODUCTION`, `TESTING`. Default is `PRODUCTION`. -# Testing environment. There will be a distinct color label on the front-end page, -# indicating that this environment is a testing environment. DEPLOY_ENV=PRODUCTION - -# Whether to enable the version check policy. -# If set to empty, https://updates.dify.ai will be called for version check. CHECK_UPDATE_URL=https://updates.dify.ai - -# Used to change the OpenAI base address, default is https://api.openai.com/v1. -# When OpenAI cannot be accessed in China, replace it with a domestic mirror address, -# or when a local model provides OpenAI compatible API, it can be replaced. OPENAI_API_BASE=https://api.openai.com/v1 - -# When enabled, migrations will be executed prior to application startup -# and the application will start after the migrations have completed. MIGRATION_ENABLED=true - -# File Access Time specifies a time interval in seconds for the file to be accessed. -# The default value is 300 seconds. FILES_ACCESS_TIMEOUT=300 +# Remove `collaboration` from COMPOSE_PROFILES to stop the dedicated websocket service. +ENABLE_COLLABORATION_MODE=true -# Collaboration mode toggle -# To open collaboration features, you also need to set SERVER_WORKER_CLASS=geventwebsocket.gunicorn.workers.GeventWebSocketWorker -ENABLE_COLLABORATION_MODE=false - -# Access token expiration time in minutes -ACCESS_TOKEN_EXPIRE_MINUTES=60 - -# Refresh token expiration time in days -REFRESH_TOKEN_EXPIRE_DAYS=30 - -# The default number of active requests for the application, where 0 means unlimited, should be a non-negative integer. -APP_DEFAULT_ACTIVE_REQUESTS=0 -# The maximum number of active requests for the application, where 0 means unlimited, should be a non-negative integer. -APP_MAX_ACTIVE_REQUESTS=0 -APP_MAX_EXECUTION_TIME=1200 - -# ------------------------------ -# Container Startup Related Configuration -# Only effective when starting with docker image or docker-compose. -# ------------------------------ - -# API service binding address, default: 0.0.0.0, i.e., all addresses can be accessed. +# Logging and server workers +LOG_LEVEL=INFO +LOG_OUTPUT_FORMAT=text +LOG_FILE=/app/logs/server.log +LOG_FILE_MAX_SIZE=20 +LOG_FILE_BACKUP_COUNT=5 +LOG_DATEFORMAT=%Y-%m-%d %H:%M:%S +LOG_TZ=UTC +DEBUG=false +FLASK_DEBUG=false +ENABLE_REQUEST_LOGGING=False DIFY_BIND_ADDRESS=0.0.0.0 - -# API service binding port number, default 5001. DIFY_PORT=5001 - -# The number of API server workers, i.e., the number of workers. -# Formula: number of cpu cores x 2 + 1 for sync, 1 for Gevent -# Reference: https://docs.gunicorn.org/en/stable/design.html#how-many-workers SERVER_WORKER_AMOUNT=1 - -# Defaults to gevent. If using windows, it can be switched to sync or solo. -# -# Warning: Changing this parameter requires disabling patching for -# psycopg2 and gRPC (see `gunicorn.conf.py` and `celery_entrypoint.py`). -# Modifying it may also decrease throughput. -# -# It is strongly discouraged to change this parameter. -# If enable collaboration mode, it must be set to geventwebsocket.gunicorn.workers.GeventWebSocketWorker SERVER_WORKER_CLASS=gevent - -# Default number of worker connections, the default is 10. SERVER_WORKER_CONNECTIONS=10 - -# Similar to SERVER_WORKER_CLASS. -# If using windows, it can be switched to sync or solo. -# -# Warning: Changing this parameter requires disabling patching for -# psycopg2 and gRPC (see `gunicorn_conf.py` and `celery_entrypoint.py`). -# Modifying it may also decrease throughput. -# -# It is strongly discouraged to change this parameter. -CELERY_WORKER_CLASS= - -# Request handling timeout. The default is 200, -# it is recommended to set it to 360 to support a longer sse connection time. +API_WEBSOCKET_WORKER_CLASS=geventwebsocket.gunicorn.workers.GeventWebSocketWorker +API_WEBSOCKET_WORKER_CONNECTIONS=1000 +API_WEBSOCKET_GUNICORN_TIMEOUT=360 GUNICORN_TIMEOUT=360 - -# The number of Celery workers. The default is 4 for development environments -# to allow parallel processing of workflows, document indexing, and other async tasks. -# Adjust based on your system resources and workload requirements. +CELERY_WORKER_CLASS= CELERY_WORKER_AMOUNT=4 - -# Flag indicating whether to enable autoscaling of Celery workers. -# -# Autoscaling is useful when tasks are CPU intensive and can be dynamically -# allocated and deallocated based on the workload. -# -# When autoscaling is enabled, the maximum and minimum number of workers can -# be specified. The autoscaling algorithm will dynamically adjust the number -# of workers within the specified range. -# -# Default is false (i.e., autoscaling is disabled). -# -# Example: -# CELERY_AUTO_SCALE=true CELERY_AUTO_SCALE=false - -# The maximum number of Celery workers that can be autoscaled. -# This is optional and only used when autoscaling is enabled. -# Default is not set. CELERY_MAX_WORKERS= - -# The minimum number of Celery workers that can be autoscaled. -# This is optional and only used when autoscaling is enabled. -# Default is not set. CELERY_MIN_WORKERS= +COMPOSE_WORKER_HEALTHCHECK_DISABLED=true +COMPOSE_WORKER_HEALTHCHECK_INTERVAL=30s +COMPOSE_WORKER_HEALTHCHECK_TIMEOUT=30s -# API Tool configuration -API_TOOL_DEFAULT_CONNECT_TIMEOUT=10 -API_TOOL_DEFAULT_READ_TIMEOUT=60 - -# ------------------------------- -# Datasource Configuration -# -------------------------------- -ENABLE_WEBSITE_JINAREADER=true -ENABLE_WEBSITE_FIRECRAWL=true -ENABLE_WEBSITE_WATERCRAWL=true - -# Enable inline LaTeX rendering with single dollar signs ($...$) in the web frontend -# Default is false for security reasons to prevent conflicts with regular text -NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX=false - -# ------------------------------ -# Database Configuration -# The database uses PostgreSQL or MySQL. OceanBase and seekdb are also supported. Please use the public schema. -# It is consistent with the configuration in the database service below. -# You can adjust the database configuration according to your needs. -# ------------------------------ - -# Database type, supported values are `postgresql`, `mysql`, `oceanbase`, `seekdb` +# Database DB_TYPE=postgresql -# For MySQL, only `root` user is supported for now DB_USERNAME=postgres DB_PASSWORD=difyai123456 DB_HOST=db_postgres DB_PORT=5432 DB_DATABASE=dify - -# The size of the database connection pool. -# The default is 30 connections, which can be appropriately increased. SQLALCHEMY_POOL_SIZE=30 -# The default is 10 connections, which allows temporary overflow beyond the pool size. SQLALCHEMY_MAX_OVERFLOW=10 -# Database connection pool recycling time, the default is 3600 seconds. SQLALCHEMY_POOL_RECYCLE=3600 -# Whether to print SQL, default is false. SQLALCHEMY_ECHO=false -# If True, will test connections for liveness upon each checkout SQLALCHEMY_POOL_PRE_PING=false -# Whether to enable the Last in first out option or use default FIFO queue if is false SQLALCHEMY_POOL_USE_LIFO=false -# Number of seconds to wait for a connection from the pool before raising a timeout error. -# Default is 30 SQLALCHEMY_POOL_TIMEOUT=30 - -# Maximum number of connections to the database -# Default is 100 -# -# Reference: https://www.postgresql.org/docs/current/runtime-config-connection.html#GUC-MAX-CONNECTIONS +SQLALCHEMY_POOL_RESET_ON_RETURN=rollback +PGDATA=/var/lib/postgresql/data/pgdata POSTGRES_MAX_CONNECTIONS=200 - -# Sets the amount of shared memory used for postgres's shared buffers. -# Default is 128MB -# Recommended value: 25% of available memory -# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-SHARED-BUFFERS POSTGRES_SHARED_BUFFERS=128MB - -# Sets the amount of memory used by each database worker for working space. -# Default is 4MB -# -# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-WORK-MEM POSTGRES_WORK_MEM=4MB - -# Sets the amount of memory reserved for maintenance activities. -# Default is 64MB -# -# Reference: https://www.postgresql.org/docs/current/runtime-config-resource.html#GUC-MAINTENANCE-WORK-MEM POSTGRES_MAINTENANCE_WORK_MEM=64MB - -# Sets the planner's assumption about the effective cache size. -# Default is 4096MB -# -# Reference: https://www.postgresql.org/docs/current/runtime-config-query.html#GUC-EFFECTIVE-CACHE-SIZE POSTGRES_EFFECTIVE_CACHE_SIZE=4096MB - -# Sets the maximum allowed duration of any statement before termination. -# Default is 0 (no timeout). -# -# Reference: https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-STATEMENT-TIMEOUT -# A value of 0 prevents the server from timing out statements. POSTGRES_STATEMENT_TIMEOUT=0 - -# Sets the maximum allowed duration of any idle in-transaction session before termination. -# Default is 0 (no timeout). -# -# Reference: https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-IDLE-IN-TRANSACTION-SESSION-TIMEOUT -# A value of 0 prevents the server from terminating idle sessions. POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT=0 -# MySQL Performance Configuration -# Maximum number of connections to MySQL -# -# Default is 1000 -MYSQL_MAX_CONNECTIONS=1000 - -# InnoDB buffer pool size -# Default is 512M -# Recommended value: 70-80% of available memory for dedicated MySQL server -# Reference: https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_buffer_pool_size -MYSQL_INNODB_BUFFER_POOL_SIZE=512M - -# InnoDB log file size -# Default is 128M -# Reference: https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_log_file_size -MYSQL_INNODB_LOG_FILE_SIZE=128M - -# InnoDB flush log at transaction commit -# Default is 2 (flush to OS cache, sync every second) -# Options: 0 (no flush), 1 (flush and sync), 2 (flush to OS cache) -# Reference: https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html#sysvar_innodb_flush_log_at_trx_commit -MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT=2 - -# ------------------------------ -# Redis Configuration -# This Redis configuration is used for caching and for pub/sub during conversation. -# ------------------------------ - +# Redis and Celery REDIS_HOST=redis REDIS_PORT=6379 REDIS_USERNAME= REDIS_PASSWORD=difyai123456 REDIS_USE_SSL=false -# SSL configuration for Redis (when REDIS_USE_SSL=true) REDIS_SSL_CERT_REQS=CERT_NONE -# Options: CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED REDIS_SSL_CA_CERTS= -# Path to CA certificate file for SSL verification REDIS_SSL_CERTFILE= -# Path to client certificate file for SSL authentication REDIS_SSL_KEYFILE= -# Path to client private key file for SSL authentication REDIS_DB=0 -# Optional global prefix for Redis keys, topics, streams, and Celery Redis transport artifacts. -# Leave empty to preserve current unprefixed behavior. REDIS_KEY_PREFIX= -# Optional: limit total Redis connections used by API/Worker (unset for default) -# Align with API's REDIS_MAX_CONNECTIONS in configs REDIS_MAX_CONNECTIONS= - -# Whether to use Redis Sentinel mode. -# If set to true, the application will automatically discover and connect to the master node through Sentinel. -REDIS_USE_SENTINEL=false - -# List of Redis Sentinel nodes. If Sentinel mode is enabled, provide at least one Sentinel IP and port. -# Format: `:,:,:` -REDIS_SENTINELS= -REDIS_SENTINEL_SERVICE_NAME= -REDIS_SENTINEL_USERNAME= -REDIS_SENTINEL_PASSWORD= -REDIS_SENTINEL_SOCKET_TIMEOUT=0.1 - -# List of Redis Cluster nodes. If Cluster mode is enabled, provide at least one Cluster IP and port. -# Format: `:,:,:` -REDIS_USE_CLUSTERS=false -REDIS_CLUSTERS= -REDIS_CLUSTERS_PASSWORD= - -# Redis connection and retry configuration -# max redis retry REDIS_RETRY_RETRIES=3 -# Base delay (in seconds) for exponential backoff on retries REDIS_RETRY_BACKOFF_BASE=1.0 -# Cap (in seconds) for exponential backoff on retries REDIS_RETRY_BACKOFF_CAP=10.0 -# Timeout (in seconds) for Redis socket operations REDIS_SOCKET_TIMEOUT=5.0 -# Timeout (in seconds) for establishing a Redis connection REDIS_SOCKET_CONNECT_TIMEOUT=5.0 -# Interval (in seconds) for Redis health checks REDIS_HEALTH_CHECK_INTERVAL=30 - -# ------------------------------ -# Celery Configuration -# ------------------------------ - -# Use standalone redis as the broker, and redis db 1 for celery broker. (redis_username is usually set by default as empty) -# Format as follows: `redis://:@:/`. -# Example: redis://:difyai123456@redis:6379/1 -# If use Redis Sentinel, format as follows: `sentinel://:@:/` -# For high availability, you can configure multiple Sentinel nodes (if provided) separated by semicolons like below example: -# Example: sentinel://:difyai123456@localhost:26379/1;sentinel://:difyai12345@localhost:26379/1;sentinel://:difyai12345@localhost:26379/1 CELERY_BROKER_URL=redis://:difyai123456@redis:6379/1 CELERY_BACKEND=redis BROKER_USE_SSL=false - -# If you are using Redis Sentinel for high availability, configure the following settings. -CELERY_USE_SENTINEL=false -CELERY_SENTINEL_MASTER_NAME= -CELERY_SENTINEL_PASSWORD= -CELERY_SENTINEL_SOCKET_TIMEOUT=0.1 -# e.g. {"tasks.add": {"rate_limit": "10/s"}} CELERY_TASK_ANNOTATIONS=null +EVENT_BUS_REDIS_URL= +EVENT_BUS_REDIS_CHANNEL_TYPE=pubsub +EVENT_BUS_REDIS_USE_CLUSTERS=false -# ------------------------------ -# CORS Configuration -# Used to set the front-end cross-domain access policy. -# ------------------------------ - -# Specifies the allowed origins for cross-origin requests to the Web API, -# e.g. https://dify.app or * for all origins. +# Web and app limits WEB_API_CORS_ALLOW_ORIGINS=* - -# Specifies the allowed origins for cross-origin requests to the console API, -# e.g. https://cloud.dify.ai or * for all origins. CONSOLE_CORS_ALLOW_ORIGINS=* -# When the frontend and backend run on different subdomains, set COOKIE_DOMAIN to the site's top-level domain (e.g., `example.com`). Leading dots are optional. COOKIE_DOMAIN= -# When the frontend and backend run on different subdomains, set NEXT_PUBLIC_COOKIE_DOMAIN=1. NEXT_PUBLIC_COOKIE_DOMAIN= -# WebSocket server URL. -NEXT_PUBLIC_SOCKET_URL=ws://localhost NEXT_PUBLIC_BATCH_CONCURRENCY=5 +API_SENTRY_DSN= +API_SENTRY_TRACES_SAMPLE_RATE=1.0 +API_SENTRY_PROFILES_SAMPLE_RATE=1.0 +WEB_SENTRY_DSN= +AMPLITUDE_API_KEY= +TEXT_GENERATION_TIMEOUT_MS=60000 +CSP_WHITELIST= +ALLOW_EMBED=false +ALLOW_INLINE_STYLES=false +ALLOW_UNSAFE_DATA_SCHEME=false +TOP_K_MAX_VALUE=10 +INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=4000 +LOOP_NODE_MAX_COUNT=100 +MAX_TOOLS_NUM=10 +MAX_PARALLEL_LIMIT=10 +MAX_ITERATIONS_NUM=99 +MAX_TREE_DEPTH=50 +ENABLE_WEBSITE_JINAREADER=true +ENABLE_WEBSITE_FIRECRAWL=true +ENABLE_WEBSITE_WATERCRAWL=true +NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX=false +EXPERIMENTAL_ENABLE_VINEXT=false -# ------------------------------ -# File Storage Configuration -# ------------------------------ - -# The type of storage to use for storing user files. +# Storage and default vector store STORAGE_TYPE=opendal - -# Apache OpenDAL Configuration -# The configuration for OpenDAL consists of the following format: OPENDAL__. -# You can find all the service configurations (CONFIG_NAME) in the repository at: https://github.com/apache/opendal/tree/main/core/src/services. -# Dify will scan configurations starting with OPENDAL_ and automatically apply them. -# The scheme name for the OpenDAL storage. OPENDAL_SCHEME=fs -# Configurations for OpenDAL Local File System. OPENDAL_FS_ROOT=storage - -# ClickZetta Volume Configuration (for storage backend) -# To use ClickZetta Volume as storage backend, set STORAGE_TYPE=clickzetta-volume -# Note: ClickZetta Volume will reuse the existing CLICKZETTA_* connection parameters - -# Volume type selection (three types available): -# - user: Personal/small team use, simple config, user-level permissions -# - table: Enterprise multi-tenant, smart routing, table-level + user-level permissions -# - external: Data lake integration, external storage connection, volume-level + storage-level permissions -CLICKZETTA_VOLUME_TYPE=user - -# External Volume name (required only when TYPE=external) -CLICKZETTA_VOLUME_NAME= - -# Table Volume table prefix (used only when TYPE=table) -CLICKZETTA_VOLUME_TABLE_PREFIX=dataset_ - -# Dify file directory prefix (isolates from other apps, recommended to keep default) -CLICKZETTA_VOLUME_DIFY_PREFIX=dify_km - -# S3 Configuration -# -S3_ENDPOINT= -S3_REGION=us-east-1 -S3_BUCKET_NAME=difyai -S3_ACCESS_KEY= -S3_SECRET_KEY= -S3_ADDRESS_STYLE=auto -# Whether to use AWS managed IAM roles for authenticating with the S3 service. -# If set to false, the access key and secret key must be provided. -S3_USE_AWS_MANAGED_IAM=false - -# Workflow run and Conversation archive storage (S3-compatible) -ARCHIVE_STORAGE_ENABLED=false -ARCHIVE_STORAGE_ENDPOINT= -ARCHIVE_STORAGE_ARCHIVE_BUCKET= -ARCHIVE_STORAGE_EXPORT_BUCKET= -ARCHIVE_STORAGE_ACCESS_KEY= -ARCHIVE_STORAGE_SECRET_KEY= -ARCHIVE_STORAGE_REGION=auto - -# Azure Blob Configuration -# -AZURE_BLOB_ACCOUNT_NAME=difyai -AZURE_BLOB_ACCOUNT_KEY=difyai -AZURE_BLOB_CONTAINER_NAME=difyai-container -AZURE_BLOB_ACCOUNT_URL=https://.blob.core.windows.net - -# Google Storage Configuration -# -GOOGLE_STORAGE_BUCKET_NAME=your-bucket-name -GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64= - -# The Alibaba Cloud OSS configurations, -# -ALIYUN_OSS_BUCKET_NAME=your-bucket-name -ALIYUN_OSS_ACCESS_KEY=your-access-key -ALIYUN_OSS_SECRET_KEY=your-secret-key -ALIYUN_OSS_ENDPOINT=https://oss-ap-southeast-1-internal.aliyuncs.com -ALIYUN_OSS_REGION=ap-southeast-1 -ALIYUN_OSS_AUTH_VERSION=v4 -# Don't start with '/'. OSS doesn't support leading slash in object names. -ALIYUN_OSS_PATH=your-path -# Optional CloudBox ID for Aliyun OSS, DO NOT enable it if you are not using CloudBox. -#ALIYUN_CLOUDBOX_ID=your-cloudbox-id - -# Tencent COS Configuration -# -TENCENT_COS_BUCKET_NAME=your-bucket-name -TENCENT_COS_SECRET_KEY=your-secret-key -TENCENT_COS_SECRET_ID=your-secret-id -TENCENT_COS_REGION=your-region -TENCENT_COS_SCHEME=your-scheme -TENCENT_COS_CUSTOM_DOMAIN=your-custom-domain - -# Oracle Storage Configuration -# -OCI_ENDPOINT=https://your-object-storage-namespace.compat.objectstorage.us-ashburn-1.oraclecloud.com -OCI_BUCKET_NAME=your-bucket-name -OCI_ACCESS_KEY=your-access-key -OCI_SECRET_KEY=your-secret-key -OCI_REGION=us-ashburn-1 - -# Huawei OBS Configuration -# -HUAWEI_OBS_BUCKET_NAME=your-bucket-name -HUAWEI_OBS_SECRET_KEY=your-secret-key -HUAWEI_OBS_ACCESS_KEY=your-access-key -HUAWEI_OBS_SERVER=your-server-url -HUAWEI_OBS_PATH_STYLE=false - -# Volcengine TOS Configuration -# -VOLCENGINE_TOS_BUCKET_NAME=your-bucket-name -VOLCENGINE_TOS_SECRET_KEY=your-secret-key -VOLCENGINE_TOS_ACCESS_KEY=your-access-key -VOLCENGINE_TOS_ENDPOINT=your-server-url -VOLCENGINE_TOS_REGION=your-region - -# Baidu OBS Storage Configuration -# -BAIDU_OBS_BUCKET_NAME=your-bucket-name -BAIDU_OBS_SECRET_KEY=your-secret-key -BAIDU_OBS_ACCESS_KEY=your-access-key -BAIDU_OBS_ENDPOINT=your-server-url - -# Supabase Storage Configuration -# -SUPABASE_BUCKET_NAME=your-bucket-name -SUPABASE_API_KEY=your-access-key -SUPABASE_URL=your-server-url - -# ------------------------------ -# Vector Database Configuration -# ------------------------------ - -# The type of vector store to use. -# Supported values are `weaviate`, `oceanbase`, `seekdb`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `oracle`, `tencent`, `elasticsearch`, `elasticsearch-ja`, `analyticdb`, `couchbase`, `vikingdb`, `opengauss`, `tablestore`, `vastbase`, `tidb`, `tidb_on_qdrant`, `baidu`, `lindorm`, `huawei_cloud`, `upstash`, `matrixone`, `clickzetta`, `alibabacloud_mysql`, `iris`, `hologres`. VECTOR_STORE=weaviate -# Prefix used to create collection name in vector database VECTOR_INDEX_NAME_PREFIX=Vector_index - -# The Weaviate endpoint URL. Only available when VECTOR_STORE is `weaviate`. WEAVIATE_ENDPOINT=http://weaviate:8080 WEAVIATE_API_KEY=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih WEAVIATE_GRPC_ENDPOINT=grpc://weaviate:50051 WEAVIATE_TOKENIZATION=word - -# For OceanBase metadata database configuration, available when `DB_TYPE` is `oceanbase`. -# For OceanBase vector database configuration, available when `VECTOR_STORE` is `oceanbase` -# If you want to use OceanBase as both vector database and metadata database, you need to set both `DB_TYPE` and `VECTOR_STORE` to `oceanbase`, and set Database Configuration is the same as the vector database. -# seekdb is the lite version of OceanBase and shares the connection configuration with OceanBase. -OCEANBASE_VECTOR_HOST=oceanbase -OCEANBASE_VECTOR_PORT=2881 -OCEANBASE_VECTOR_USER=root@test -OCEANBASE_VECTOR_PASSWORD=difyai123456 -OCEANBASE_VECTOR_DATABASE=test -OCEANBASE_CLUSTER_NAME=difyai -OCEANBASE_MEMORY_LIMIT=6G -OCEANBASE_ENABLE_HYBRID_SEARCH=false -# For OceanBase vector database, built-in fulltext parsers are `ngram`, `beng`, `space`, `ngram2`, `ik` -# For OceanBase vector database, external fulltext parsers (require plugin installation) are `japanese_ftparser`, `thai_ftparser` -OCEANBASE_FULLTEXT_PARSER=ik -SEEKDB_MEMORY_LIMIT=2G - -# The Qdrant endpoint URL. Only available when VECTOR_STORE is `qdrant`. -QDRANT_URL=http://qdrant:6333 -QDRANT_API_KEY=difyai123456 -QDRANT_CLIENT_TIMEOUT=20 -QDRANT_GRPC_ENABLED=false -QDRANT_GRPC_PORT=6334 -QDRANT_REPLICATION_FACTOR=1 - -# Milvus configuration. Only available when VECTOR_STORE is `milvus`. -# The milvus uri. -MILVUS_URI=http://host.docker.internal:19530 -MILVUS_DATABASE= -MILVUS_TOKEN= -MILVUS_USER= -MILVUS_PASSWORD= -MILVUS_ENABLE_HYBRID_SEARCH=False -MILVUS_ANALYZER_PARAMS= - -# MyScale configuration, only available when VECTOR_STORE is `myscale` -# For multi-language support, please set MYSCALE_FTS_PARAMS with referring to: -# https://myscale.com/docs/en/text-search/#understanding-fts-index-parameters -MYSCALE_HOST=myscale -MYSCALE_PORT=8123 -MYSCALE_USER=default -MYSCALE_PASSWORD= -MYSCALE_DATABASE=dify -MYSCALE_FTS_PARAMS= - -# Couchbase configurations, only available when VECTOR_STORE is `couchbase` -# The connection string must include hostname defined in the docker-compose file (couchbase-server in this case) -COUCHBASE_CONNECTION_STRING=couchbase://couchbase-server -COUCHBASE_USER=Administrator -COUCHBASE_PASSWORD=password -COUCHBASE_BUCKET_NAME=Embeddings -COUCHBASE_SCOPE_NAME=_default - -# Hologres configurations, only available when VECTOR_STORE is `hologres` -# access_key_id is used as the PG username, access_key_secret is used as the PG password -HOLOGRES_HOST= -HOLOGRES_PORT=80 -HOLOGRES_DATABASE= -HOLOGRES_ACCESS_KEY_ID= -HOLOGRES_ACCESS_KEY_SECRET= -HOLOGRES_SCHEMA=public -HOLOGRES_TOKENIZER=jieba -HOLOGRES_DISTANCE_METHOD=Cosine -HOLOGRES_BASE_QUANTIZATION_TYPE=rabitq -HOLOGRES_MAX_DEGREE=64 -HOLOGRES_EF_CONSTRUCTION=400 - -# pgvector configurations, only available when VECTOR_STORE is `pgvector` -PGVECTOR_HOST=pgvector -PGVECTOR_PORT=5432 -PGVECTOR_USER=postgres -PGVECTOR_PASSWORD=difyai123456 -PGVECTOR_DATABASE=dify -PGVECTOR_MIN_CONNECTION=1 -PGVECTOR_MAX_CONNECTION=5 -PGVECTOR_PG_BIGM=false -PGVECTOR_PG_BIGM_VERSION=1.2-20240606 - -# vastbase configurations, only available when VECTOR_STORE is `vastbase` -VASTBASE_HOST=vastbase -VASTBASE_PORT=5432 -VASTBASE_USER=dify -VASTBASE_PASSWORD=Difyai123456 -VASTBASE_DATABASE=dify -VASTBASE_MIN_CONNECTION=1 -VASTBASE_MAX_CONNECTION=5 - -# pgvecto-rs configurations, only available when VECTOR_STORE is `pgvecto-rs` -PGVECTO_RS_HOST=pgvecto-rs -PGVECTO_RS_PORT=5432 -PGVECTO_RS_USER=postgres -PGVECTO_RS_PASSWORD=difyai123456 -PGVECTO_RS_DATABASE=dify - -# analyticdb configurations, only available when VECTOR_STORE is `analyticdb` -ANALYTICDB_KEY_ID=your-ak -ANALYTICDB_KEY_SECRET=your-sk -ANALYTICDB_REGION_ID=cn-hangzhou -ANALYTICDB_INSTANCE_ID=gp-ab123456 -ANALYTICDB_ACCOUNT=testaccount -ANALYTICDB_PASSWORD=testpassword -ANALYTICDB_NAMESPACE=dify -ANALYTICDB_NAMESPACE_PASSWORD=difypassword -ANALYTICDB_HOST=gp-test.aliyuncs.com -ANALYTICDB_PORT=5432 -ANALYTICDB_MIN_CONNECTION=1 -ANALYTICDB_MAX_CONNECTION=5 - -# TiDB vector configurations, only available when VECTOR_STORE is `tidb_vector` -TIDB_VECTOR_HOST=tidb -TIDB_VECTOR_PORT=4000 -TIDB_VECTOR_USER= -TIDB_VECTOR_PASSWORD= -TIDB_VECTOR_DATABASE=dify - -# Matrixone vector configurations. -MATRIXONE_HOST=matrixone -MATRIXONE_PORT=6001 -MATRIXONE_USER=dump -MATRIXONE_PASSWORD=111 -MATRIXONE_DATABASE=dify - -# Tidb on qdrant configuration, only available when VECTOR_STORE is `tidb_on_qdrant` -TIDB_ON_QDRANT_URL=http://127.0.0.1 -TIDB_ON_QDRANT_API_KEY=dify -TIDB_ON_QDRANT_CLIENT_TIMEOUT=20 -TIDB_ON_QDRANT_GRPC_ENABLED=false -TIDB_ON_QDRANT_GRPC_PORT=6334 -TIDB_PUBLIC_KEY=dify -TIDB_PRIVATE_KEY=dify -TIDB_API_URL=http://127.0.0.1 -TIDB_IAM_API_URL=http://127.0.0.1 -TIDB_REGION=regions/aws-us-east-1 -TIDB_PROJECT_ID=dify -TIDB_SPEND_LIMIT=100 - -# Chroma configuration, only available when VECTOR_STORE is `chroma` -CHROMA_HOST=127.0.0.1 -CHROMA_PORT=8000 -CHROMA_TENANT=default_tenant -CHROMA_DATABASE=default_database -CHROMA_AUTH_PROVIDER=chromadb.auth.token_authn.TokenAuthClientProvider -CHROMA_AUTH_CREDENTIALS= - -# Oracle configuration, only available when VECTOR_STORE is `oracle` -ORACLE_USER=dify -ORACLE_PASSWORD=dify -ORACLE_DSN=oracle:1521/FREEPDB1 -ORACLE_CONFIG_DIR=/app/api/storage/wallet -ORACLE_WALLET_LOCATION=/app/api/storage/wallet -ORACLE_WALLET_PASSWORD=dify -ORACLE_IS_AUTONOMOUS=false - -# AlibabaCloud MySQL configuration, only available when VECTOR_STORE is `alibabcloud_mysql` -ALIBABACLOUD_MYSQL_HOST=127.0.0.1 -ALIBABACLOUD_MYSQL_PORT=3306 -ALIBABACLOUD_MYSQL_USER=root -ALIBABACLOUD_MYSQL_PASSWORD=difyai123456 -ALIBABACLOUD_MYSQL_DATABASE=dify -ALIBABACLOUD_MYSQL_MAX_CONNECTION=5 -ALIBABACLOUD_MYSQL_HNSW_M=6 - -# relyt configurations, only available when VECTOR_STORE is `relyt` -RELYT_HOST=db -RELYT_PORT=5432 -RELYT_USER=postgres -RELYT_PASSWORD=difyai123456 -RELYT_DATABASE=postgres - -# open search configuration, only available when VECTOR_STORE is `opensearch` -OPENSEARCH_HOST=opensearch -OPENSEARCH_PORT=9200 -OPENSEARCH_SECURE=true -OPENSEARCH_VERIFY_CERTS=true -OPENSEARCH_AUTH_METHOD=basic -OPENSEARCH_USER=admin -OPENSEARCH_PASSWORD=admin -# If using AWS managed IAM, e.g. Managed Cluster or OpenSearch Serverless -OPENSEARCH_AWS_REGION=ap-southeast-1 -OPENSEARCH_AWS_SERVICE=aoss - -# tencent vector configurations, only available when VECTOR_STORE is `tencent` -TENCENT_VECTOR_DB_URL=http://127.0.0.1 -TENCENT_VECTOR_DB_API_KEY=dify -TENCENT_VECTOR_DB_TIMEOUT=30 -TENCENT_VECTOR_DB_USERNAME=dify -TENCENT_VECTOR_DB_DATABASE=dify -TENCENT_VECTOR_DB_SHARD=1 -TENCENT_VECTOR_DB_REPLICAS=2 -TENCENT_VECTOR_DB_ENABLE_HYBRID_SEARCH=false - -# ElasticSearch configuration, only available when VECTOR_STORE is `elasticsearch` -ELASTICSEARCH_HOST=0.0.0.0 -ELASTICSEARCH_PORT=9200 -ELASTICSEARCH_USERNAME=elastic -ELASTICSEARCH_PASSWORD=elastic -KIBANA_PORT=5601 - -# Using ElasticSearch Cloud Serverless, or not. -ELASTICSEARCH_USE_CLOUD=false -ELASTICSEARCH_CLOUD_URL=YOUR-ELASTICSEARCH_CLOUD_URL -ELASTICSEARCH_API_KEY=YOUR-ELASTICSEARCH_API_KEY - -ELASTICSEARCH_VERIFY_CERTS=False -ELASTICSEARCH_CA_CERTS= -ELASTICSEARCH_REQUEST_TIMEOUT=100000 -ELASTICSEARCH_RETRY_ON_TIMEOUT=True -ELASTICSEARCH_MAX_RETRIES=10 - -# baidu vector configurations, only available when VECTOR_STORE is `baidu` -BAIDU_VECTOR_DB_ENDPOINT=http://127.0.0.1:5287 -BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS=30000 -BAIDU_VECTOR_DB_ACCOUNT=root -BAIDU_VECTOR_DB_API_KEY=dify -BAIDU_VECTOR_DB_DATABASE=dify -BAIDU_VECTOR_DB_SHARD=1 -BAIDU_VECTOR_DB_REPLICAS=3 -BAIDU_VECTOR_DB_INVERTED_INDEX_ANALYZER=DEFAULT_ANALYZER -BAIDU_VECTOR_DB_INVERTED_INDEX_PARSER_MODE=COARSE_MODE -BAIDU_VECTOR_DB_AUTO_BUILD_ROW_COUNT_INCREMENT=500 -BAIDU_VECTOR_DB_AUTO_BUILD_ROW_COUNT_INCREMENT_RATIO=0.05 -BAIDU_VECTOR_DB_REBUILD_INDEX_TIMEOUT_IN_SECONDS=300 - -# VikingDB configurations, only available when VECTOR_STORE is `vikingdb` -VIKINGDB_ACCESS_KEY=your-ak -VIKINGDB_SECRET_KEY=your-sk -VIKINGDB_REGION=cn-shanghai -VIKINGDB_HOST=api-vikingdb.xxx.volces.com -VIKINGDB_SCHEMA=http -VIKINGDB_CONNECTION_TIMEOUT=30 -VIKINGDB_SOCKET_TIMEOUT=30 - -# Lindorm configuration, only available when VECTOR_STORE is `lindorm` -LINDORM_URL=http://localhost:30070 -LINDORM_USERNAME=admin -LINDORM_PASSWORD=admin -LINDORM_USING_UGC=True -LINDORM_QUERY_TIMEOUT=1 - -# opengauss configurations, only available when VECTOR_STORE is `opengauss` -OPENGAUSS_HOST=opengauss -OPENGAUSS_PORT=6600 -OPENGAUSS_USER=postgres -OPENGAUSS_PASSWORD=Dify@123 -OPENGAUSS_DATABASE=dify -OPENGAUSS_MIN_CONNECTION=1 -OPENGAUSS_MAX_CONNECTION=5 -OPENGAUSS_ENABLE_PQ=false - -# huawei cloud search service vector configurations, only available when VECTOR_STORE is `huawei_cloud` -HUAWEI_CLOUD_HOSTS=https://127.0.0.1:9200 -HUAWEI_CLOUD_USER=admin -HUAWEI_CLOUD_PASSWORD=admin - -# Upstash Vector configuration, only available when VECTOR_STORE is `upstash` -UPSTASH_VECTOR_URL=https://xxx-vector.upstash.io -UPSTASH_VECTOR_TOKEN=dify - -# TableStore Vector configuration -# (only used when VECTOR_STORE is tablestore) -TABLESTORE_ENDPOINT=https://instance-name.cn-hangzhou.ots.aliyuncs.com -TABLESTORE_INSTANCE_NAME=instance-name -TABLESTORE_ACCESS_KEY_ID=xxx -TABLESTORE_ACCESS_KEY_SECRET=xxx -TABLESTORE_NORMALIZE_FULLTEXT_BM25_SCORE=false - -# Clickzetta configuration, only available when VECTOR_STORE is `clickzetta` -CLICKZETTA_USERNAME= -CLICKZETTA_PASSWORD= -CLICKZETTA_INSTANCE= -CLICKZETTA_SERVICE=api.clickzetta.com -CLICKZETTA_WORKSPACE=quick_start -CLICKZETTA_VCLUSTER=default_ap -CLICKZETTA_SCHEMA=dify -CLICKZETTA_BATCH_SIZE=100 -CLICKZETTA_ENABLE_INVERTED_INDEX=true -CLICKZETTA_ANALYZER_TYPE=chinese -CLICKZETTA_ANALYZER_MODE=smart -CLICKZETTA_VECTOR_DISTANCE_FUNCTION=cosine_distance - -# InterSystems IRIS configuration, only available when VECTOR_STORE is `iris` -IRIS_HOST=iris -IRIS_SUPER_SERVER_PORT=1972 -IRIS_WEB_SERVER_PORT=52773 -IRIS_USER=_SYSTEM -IRIS_PASSWORD=Dify@1234 -IRIS_DATABASE=USER -IRIS_SCHEMA=dify -IRIS_CONNECTION_URL= -IRIS_MIN_CONNECTION=1 -IRIS_MAX_CONNECTION=3 -IRIS_TEXT_INDEX=true -IRIS_TEXT_INDEX_LANGUAGE=en -IRIS_TIMEZONE=UTC - -# ------------------------------ -# Knowledge Configuration -# ------------------------------ - -# Upload file size limit, default 15M. -UPLOAD_FILE_SIZE_LIMIT=15 - -# The maximum number of files that can be uploaded at a time, default 5. -UPLOAD_FILE_BATCH_LIMIT=5 - -# Comma-separated list of file extensions blocked from upload for security reasons. -# Extensions should be lowercase without dots (e.g., exe,bat,sh,dll). -# Empty by default to allow all file types. -# Recommended: exe,bat,cmd,com,scr,vbs,ps1,msi,dll -UPLOAD_FILE_EXTENSION_BLACKLIST= - -# Maximum number of files allowed in a single chunk attachment, default 10. -SINGLE_CHUNK_ATTACHMENT_LIMIT=10 - -# Maximum number of files allowed in a image batch upload operation -IMAGE_FILE_BATCH_LIMIT=10 - -# Maximum allowed image file size for attachments in megabytes, default 2. -ATTACHMENT_IMAGE_FILE_SIZE_LIMIT=2 - -# Timeout for downloading image attachments in seconds, default 60. -ATTACHMENT_IMAGE_DOWNLOAD_TIMEOUT=60 - - -# ETL type, support: `dify`, `Unstructured` -# `dify` Dify's proprietary file extraction scheme -# `Unstructured` Unstructured.io file extraction scheme -ETL_TYPE=dify - -# Unstructured API path and API key, needs to be configured when ETL_TYPE is Unstructured -# Or using Unstructured for document extractor node for pptx. -# For example: http://unstructured:8000/general/v0/general -UNSTRUCTURED_API_URL= -UNSTRUCTURED_API_KEY= -SCARF_NO_ANALYTICS=true - -# ------------------------------ -# Model Configuration -# ------------------------------ - -# The maximum number of tokens allowed for prompt generation. -# This setting controls the upper limit of tokens that can be used by the LLM -# when generating a prompt in the prompt generation tool. -# Default: 512 tokens. -PROMPT_GENERATION_MAX_TOKENS=512 - -# The maximum number of tokens allowed for code generation. -# This setting controls the upper limit of tokens that can be used by the LLM -# when generating code in the code generation tool. -# Default: 1024 tokens. -CODE_GENERATION_MAX_TOKENS=1024 - -# Enable or disable plugin based token counting. If disabled, token counting will return 0. -# This can improve performance by skipping token counting operations. -# Default: false (disabled). -PLUGIN_BASED_TOKEN_COUNTING_ENABLED=false - -# ------------------------------ -# Multi-modal Configuration -# ------------------------------ - -# The format of the image/video/audio/document sent when the multi-modal model is input, -# the default is base64, optional url. -# The delay of the call in url mode will be lower than that in base64 mode. -# It is generally recommended to use the more compatible base64 mode. -# If configured as url, you need to configure FILES_URL as an externally accessible address so that the multi-modal model can access the image/video/audio/document. -MULTIMODAL_SEND_FORMAT=base64 -# Upload image file size limit, default 10M. -UPLOAD_IMAGE_FILE_SIZE_LIMIT=10 -# Upload video file size limit, default 100M. -UPLOAD_VIDEO_FILE_SIZE_LIMIT=100 -# Upload audio file size limit, default 50M. -UPLOAD_AUDIO_FILE_SIZE_LIMIT=50 - -# ------------------------------ -# Sentry Configuration -# Used for application monitoring and error log tracking. -# ------------------------------ -SENTRY_DSN= - -# API Service Sentry DSN address, default is empty, when empty, -# all monitoring information is not reported to Sentry. -# If not set, Sentry error reporting will be disabled. -API_SENTRY_DSN= -# API Service The reporting ratio of Sentry events, if it is 0.01, it is 1%. -API_SENTRY_TRACES_SAMPLE_RATE=1.0 -# API Service The reporting ratio of Sentry profiles, if it is 0.01, it is 1%. -API_SENTRY_PROFILES_SAMPLE_RATE=1.0 - -# Web Service Sentry DSN address, default is empty, when empty, -# all monitoring information is not reported to Sentry. -# If not set, Sentry error reporting will be disabled. -WEB_SENTRY_DSN= - -# Plugin_daemon Service Sentry DSN address, default is empty, when empty, -# all monitoring information is not reported to Sentry. -# If not set, Sentry error reporting will be disabled. -PLUGIN_SENTRY_ENABLED=false -PLUGIN_SENTRY_DSN= - -# ------------------------------ -# Notion Integration Configuration -# Variables can be obtained by applying for Notion integration: https://www.notion.so/my-integrations -# ------------------------------ - -# Configure as "public" or "internal". -# Since Notion's OAuth redirect URL only supports HTTPS, -# if deploying locally, please use Notion's internal integration. -NOTION_INTEGRATION_TYPE=public -# Notion OAuth client secret (used for public integration type) -NOTION_CLIENT_SECRET= -# Notion OAuth client id (used for public integration type) -NOTION_CLIENT_ID= -# Notion internal integration secret. -# If the value of NOTION_INTEGRATION_TYPE is "internal", -# you need to configure this variable. -NOTION_INTERNAL_SECRET= - -# ------------------------------ -# Mail related configuration -# ------------------------------ - -# Mail type, support: resend, smtp, sendgrid -MAIL_TYPE=resend - -# Default send from email address, if not specified -# If using SendGrid, use the 'from' field for authentication if necessary. -MAIL_DEFAULT_SEND_FROM= - -# API-Key for the Resend email provider, used when MAIL_TYPE is `resend`. -RESEND_API_URL=https://api.resend.com -RESEND_API_KEY=your-resend-api-key - - -# SMTP server configuration, used when MAIL_TYPE is `smtp` -SMTP_SERVER= -SMTP_PORT=465 -SMTP_USERNAME= -SMTP_PASSWORD= -SMTP_USE_TLS=true -SMTP_OPPORTUNISTIC_TLS=false -# Optional: override the local hostname used for SMTP HELO/EHLO -SMTP_LOCAL_HOSTNAME= - -# Sendgid configuration -SENDGRID_API_KEY= - -# ------------------------------ -# Others Configuration -# ------------------------------ - -# Maximum length of segmentation tokens for indexing -INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=4000 - -# Member invitation link valid time (hours), -# Default: 72. -INVITE_EXPIRY_HOURS=72 - -# Reset password token valid time (minutes), -RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5 -EMAIL_REGISTER_TOKEN_EXPIRY_MINUTES=5 -CHANGE_EMAIL_TOKEN_EXPIRY_MINUTES=5 -OWNER_TRANSFER_TOKEN_EXPIRY_MINUTES=5 - -# The sandbox service endpoint. -CODE_EXECUTION_ENDPOINT=http://sandbox:8194 -CODE_EXECUTION_API_KEY=dify-sandbox -CODE_EXECUTION_SSL_VERIFY=True -CODE_EXECUTION_POOL_MAX_CONNECTIONS=100 -CODE_EXECUTION_POOL_MAX_KEEPALIVE_CONNECTIONS=20 -CODE_EXECUTION_POOL_KEEPALIVE_EXPIRY=5.0 -CODE_MAX_NUMBER=9223372036854775807 -CODE_MIN_NUMBER=-9223372036854775808 -CODE_MAX_DEPTH=5 -CODE_MAX_PRECISION=20 -CODE_MAX_STRING_LENGTH=400000 -CODE_MAX_STRING_ARRAY_LENGTH=30 -CODE_MAX_OBJECT_ARRAY_LENGTH=30 -CODE_MAX_NUMBER_ARRAY_LENGTH=1000 -CODE_EXECUTION_CONNECT_TIMEOUT=10 -CODE_EXECUTION_READ_TIMEOUT=60 -CODE_EXECUTION_WRITE_TIMEOUT=10 -TEMPLATE_TRANSFORM_MAX_LENGTH=400000 - -# Workflow runtime configuration -WORKFLOW_MAX_EXECUTION_STEPS=500 -WORKFLOW_MAX_EXECUTION_TIME=1200 -WORKFLOW_CALL_MAX_DEPTH=5 -MAX_VARIABLE_SIZE=204800 -WORKFLOW_FILE_UPLOAD_LIMIT=10 - -# GraphEngine Worker Pool Configuration -# Minimum number of workers per GraphEngine instance (default: 1) -GRAPH_ENGINE_MIN_WORKERS=1 -# Maximum number of workers per GraphEngine instance (default: 10) -GRAPH_ENGINE_MAX_WORKERS=10 -# Queue depth threshold that triggers worker scale up (default: 3) -GRAPH_ENGINE_SCALE_UP_THRESHOLD=3 -# Seconds of idle time before scaling down workers (default: 5.0) -GRAPH_ENGINE_SCALE_DOWN_IDLE_TIME=5.0 - -# Workflow storage configuration -# Options: rdbms, hybrid -# rdbms: Use only the relational database (default) -# hybrid: Save new data to object storage, read from both object storage and RDBMS -WORKFLOW_NODE_EXECUTION_STORAGE=rdbms - -# Repository configuration -# Core workflow execution repository implementation -# Options: -# - core.repositories.sqlalchemy_workflow_execution_repository.SQLAlchemyWorkflowExecutionRepository (default) -# - core.repositories.celery_workflow_execution_repository.CeleryWorkflowExecutionRepository -# - extensions.logstore.repositories.logstore_workflow_execution_repository.LogstoreWorkflowExecutionRepository -CORE_WORKFLOW_EXECUTION_REPOSITORY=core.repositories.sqlalchemy_workflow_execution_repository.SQLAlchemyWorkflowExecutionRepository - -# Core workflow node execution repository implementation -# Options: -# - core.repositories.sqlalchemy_workflow_node_execution_repository.SQLAlchemyWorkflowNodeExecutionRepository (default) -# - core.repositories.celery_workflow_node_execution_repository.CeleryWorkflowNodeExecutionRepository -# - extensions.logstore.repositories.logstore_workflow_node_execution_repository.LogstoreWorkflowNodeExecutionRepository -CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY=core.repositories.sqlalchemy_workflow_node_execution_repository.SQLAlchemyWorkflowNodeExecutionRepository - -# API workflow run repository implementation -# Options: -# - repositories.sqlalchemy_api_workflow_run_repository.DifyAPISQLAlchemyWorkflowRunRepository (default) -# - extensions.logstore.repositories.logstore_api_workflow_run_repository.LogstoreAPIWorkflowRunRepository -API_WORKFLOW_RUN_REPOSITORY=repositories.sqlalchemy_api_workflow_run_repository.DifyAPISQLAlchemyWorkflowRunRepository - -# API workflow node execution repository implementation -# Options: -# - repositories.sqlalchemy_api_workflow_node_execution_repository.DifyAPISQLAlchemyWorkflowNodeExecutionRepository (default) -# - extensions.logstore.repositories.logstore_api_workflow_node_execution_repository.LogstoreAPIWorkflowNodeExecutionRepository -API_WORKFLOW_NODE_EXECUTION_REPOSITORY=repositories.sqlalchemy_api_workflow_node_execution_repository.DifyAPISQLAlchemyWorkflowNodeExecutionRepository - -# Workflow log cleanup configuration -# Enable automatic cleanup of workflow run logs to manage database size -WORKFLOW_LOG_CLEANUP_ENABLED=false -# Number of days to retain workflow run logs (default: 30 days) -WORKFLOW_LOG_RETENTION_DAYS=30 -# Batch size for workflow log cleanup operations (default: 100) -WORKFLOW_LOG_CLEANUP_BATCH_SIZE=100 -# Comma-separated list of workflow IDs to clean logs for -WORKFLOW_LOG_CLEANUP_SPECIFIC_WORKFLOW_IDS= - -# Aliyun SLS Logstore Configuration -# Aliyun Access Key ID -ALIYUN_SLS_ACCESS_KEY_ID= -# Aliyun Access Key Secret -ALIYUN_SLS_ACCESS_KEY_SECRET= -# Aliyun SLS Endpoint (e.g., cn-hangzhou.log.aliyuncs.com) -ALIYUN_SLS_ENDPOINT= -# Aliyun SLS Region (e.g., cn-hangzhou) -ALIYUN_SLS_REGION= -# Aliyun SLS Project Name -ALIYUN_SLS_PROJECT_NAME= -# Number of days to retain workflow run logs (default: 365 days, 3650 for permanent storage) -ALIYUN_SLS_LOGSTORE_TTL=365 -# Enable dual-write to both SLS LogStore and SQL database (default: false) -LOGSTORE_DUAL_WRITE_ENABLED=false -# Enable dual-read fallback to SQL database when LogStore returns no results (default: true) -# Useful for migration scenarios where historical data exists only in SQL database -LOGSTORE_DUAL_READ_ENABLED=true -# Control flag for whether to write the `graph` field to LogStore. -# If LOGSTORE_ENABLE_PUT_GRAPH_FIELD is "true", write the full `graph` field; -# otherwise write an empty {} instead. Defaults to writing the `graph` field. -LOGSTORE_ENABLE_PUT_GRAPH_FIELD=true - -# HTTP request node in workflow configuration -HTTP_REQUEST_NODE_MAX_BINARY_SIZE=10485760 -HTTP_REQUEST_NODE_MAX_TEXT_SIZE=1048576 -HTTP_REQUEST_NODE_SSL_VERIFY=True - -# HTTP request node timeout configuration -# Maximum timeout values (in seconds) that users can set in HTTP request nodes -# - Connect timeout: Time to wait for establishing connection (default: 10s) -# - Read timeout: Time to wait for receiving response data (default: 600s, 10 minutes) -# - Write timeout: Time to wait for sending request data (default: 600s, 10 minutes) -HTTP_REQUEST_MAX_CONNECT_TIMEOUT=10 -HTTP_REQUEST_MAX_READ_TIMEOUT=600 -HTTP_REQUEST_MAX_WRITE_TIMEOUT=600 - -# Base64 encoded CA certificate data for custom certificate verification (PEM format, optional) -# HTTP_REQUEST_NODE_SSL_CERT_DATA=LS0tLS1CRUdJTi... -# Base64 encoded client certificate data for mutual TLS authentication (PEM format, optional) -# HTTP_REQUEST_NODE_SSL_CLIENT_CERT_DATA=LS0tLS1CRUdJTi... -# Base64 encoded client private key data for mutual TLS authentication (PEM format, optional) -# HTTP_REQUEST_NODE_SSL_CLIENT_KEY_DATA=LS0tLS1CRUdJTi... - -# Webhook request configuration -WEBHOOK_REQUEST_BODY_MAX_SIZE=10485760 - -# Respect X-* headers to redirect clients -RESPECT_XFORWARD_HEADERS_ENABLED=false - -# SSRF Proxy server HTTP URL -SSRF_PROXY_HTTP_URL=http://ssrf_proxy:3128 -# SSRF Proxy server HTTPS URL -SSRF_PROXY_HTTPS_URL=http://ssrf_proxy:3128 - -# Maximum loop count in the workflow -LOOP_NODE_MAX_COUNT=100 - -# The maximum number of tools that can be used in the agent. -MAX_TOOLS_NUM=10 - -# Maximum number of Parallelism branches in the workflow -MAX_PARALLEL_LIMIT=10 - -# The maximum number of iterations for agent setting -MAX_ITERATIONS_NUM=99 - -# ------------------------------ -# Environment Variables for web Service -# ------------------------------ - -# The timeout for the text generation in millisecond -TEXT_GENERATION_TIMEOUT_MS=60000 - -# Enable the experimental vinext runtime shipped in the image. -EXPERIMENTAL_ENABLE_VINEXT=false - -# Allow inline style attributes in Markdown rendering. -# Enable this if your workflows use Jinja2 templates with styled HTML. -# Only recommended for self-hosted deployments with trusted content. -ALLOW_INLINE_STYLES=false - -# Allow rendering unsafe URLs which have "data:" scheme. -ALLOW_UNSAFE_DATA_SCHEME=false - -# Maximum number of tree depth in the workflow -MAX_TREE_DEPTH=50 - -# ------------------------------ -# Environment Variables for database Service -# ------------------------------ -# Postgres data directory -PGDATA=/var/lib/postgresql/data/pgdata - -# MySQL Default Configuration -MYSQL_HOST_VOLUME=./volumes/mysql/data - -# ------------------------------ -# Environment Variables for sandbox Service -# ------------------------------ - -# The API key for the sandbox service -SANDBOX_API_KEY=dify-sandbox -# The mode in which the Gin framework runs -SANDBOX_GIN_MODE=release -# The timeout for the worker in seconds -SANDBOX_WORKER_TIMEOUT=15 -# Enable network for the sandbox service -SANDBOX_ENABLE_NETWORK=true -# HTTP proxy URL for SSRF protection -SANDBOX_HTTP_PROXY=http://ssrf_proxy:3128 -# HTTPS proxy URL for SSRF protection -SANDBOX_HTTPS_PROXY=http://ssrf_proxy:3128 -# The port on which the sandbox service runs -SANDBOX_PORT=8194 - -# ------------------------------ -# Environment Variables for weaviate Service -# (only used when VECTOR_STORE is weaviate) -# ------------------------------ WEAVIATE_PERSISTENCE_DATA_PATH=/var/lib/weaviate WEAVIATE_QUERY_DEFAULTS_LIMIT=25 WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true @@ -1259,118 +172,26 @@ WEAVIATE_ENABLE_TOKENIZER_GSE=false WEAVIATE_ENABLE_TOKENIZER_KAGOME_JA=false WEAVIATE_ENABLE_TOKENIZER_KAGOME_KR=false -# ------------------------------ -# Environment Variables for Chroma -# (only used when VECTOR_STORE is chroma) -# ------------------------------ - -# Authentication credentials for Chroma server -CHROMA_SERVER_AUTHN_CREDENTIALS=difyai123456 -# Authentication provider for Chroma server -CHROMA_SERVER_AUTHN_PROVIDER=chromadb.auth.token_authn.TokenAuthenticationServerProvider -# Persistence setting for Chroma server -CHROMA_IS_PERSISTENT=TRUE - -# ------------------------------ -# Environment Variables for Oracle Service -# (only used when VECTOR_STORE is oracle) -# ------------------------------ -ORACLE_PWD=Dify123456 -ORACLE_CHARACTERSET=AL32UTF8 - -# ------------------------------ -# Environment Variables for milvus Service -# (only used when VECTOR_STORE is milvus) -# ------------------------------ -# ETCD configuration for auto compaction mode -ETCD_AUTO_COMPACTION_MODE=revision -# ETCD configuration for auto compaction retention in terms of number of revisions -ETCD_AUTO_COMPACTION_RETENTION=1000 -# ETCD configuration for backend quota in bytes -ETCD_QUOTA_BACKEND_BYTES=4294967296 -# ETCD configuration for the number of changes before triggering a snapshot -ETCD_SNAPSHOT_COUNT=50000 -# MinIO access key for authentication -MINIO_ACCESS_KEY=minioadmin -# MinIO secret key for authentication -MINIO_SECRET_KEY=minioadmin -# ETCD service endpoints -ETCD_ENDPOINTS=etcd:2379 -# MinIO service address -MINIO_ADDRESS=minio:9000 -# Enable or disable security authorization -MILVUS_AUTHORIZATION_ENABLED=true - -# ------------------------------ -# Environment Variables for pgvector / pgvector-rs Service -# (only used when VECTOR_STORE is pgvector / pgvector-rs) -# ------------------------------ -PGVECTOR_PGUSER=postgres -# The password for the default postgres user. -PGVECTOR_POSTGRES_PASSWORD=difyai123456 -# The name of the default postgres database. -PGVECTOR_POSTGRES_DB=dify -# postgres data directory -PGVECTOR_PGDATA=/var/lib/postgresql/data/pgdata - -# ------------------------------ -# Environment Variables for opensearch -# (only used when VECTOR_STORE is opensearch) -# ------------------------------ -OPENSEARCH_DISCOVERY_TYPE=single-node -OPENSEARCH_BOOTSTRAP_MEMORY_LOCK=true -OPENSEARCH_JAVA_OPTS_MIN=512m -OPENSEARCH_JAVA_OPTS_MAX=1024m -OPENSEARCH_INITIAL_ADMIN_PASSWORD=Qazwsxedc!@#123 -OPENSEARCH_MEMLOCK_SOFT=-1 -OPENSEARCH_MEMLOCK_HARD=-1 -OPENSEARCH_NOFILE_SOFT=65536 -OPENSEARCH_NOFILE_HARD=65536 - -# ------------------------------ -# Environment Variables for Nginx reverse proxy -# ------------------------------ -NGINX_SERVER_NAME=_ -NGINX_HTTPS_ENABLED=false -# HTTP port -NGINX_PORT=80 -# SSL settings are only applied when HTTPS_ENABLED is true -NGINX_SSL_PORT=443 -# if HTTPS_ENABLED is true, you're required to add your own SSL certificates/keys to the `./nginx/ssl` directory -# and modify the env vars below accordingly. -NGINX_SSL_CERT_FILENAME=dify.crt -NGINX_SSL_CERT_KEY_FILENAME=dify.key -NGINX_SSL_PROTOCOLS=TLSv1.2 TLSv1.3 - -# Nginx performance tuning -NGINX_WORKER_PROCESSES=auto -NGINX_CLIENT_MAX_BODY_SIZE=100M -NGINX_KEEPALIVE_TIMEOUT=65 - -# Proxy settings -NGINX_PROXY_READ_TIMEOUT=3600s -NGINX_PROXY_SEND_TIMEOUT=3600s - -# Set true to accept requests for /.well-known/acme-challenge/ -NGINX_ENABLE_CERTBOT_CHALLENGE=false - -# ------------------------------ -# Certbot Configuration -# ------------------------------ - -# Email address (required to get certificates from Let's Encrypt) -CERTBOT_EMAIL=your_email@example.com - -# Domain name -CERTBOT_DOMAIN=your_domain.com - -# certbot command options -# i.e: --force-renewal --dry-run --test-cert --debug -CERTBOT_OPTIONS= - -# ------------------------------ -# Environment Variables for SSRF Proxy -# ------------------------------ +# Sandbox and SSRF proxy +CODE_EXECUTION_ENDPOINT=http://sandbox:8194 +CODE_EXECUTION_API_KEY=dify-sandbox +CODE_EXECUTION_SSL_VERIFY=True +CODE_EXECUTION_POOL_MAX_CONNECTIONS=100 +CODE_EXECUTION_POOL_MAX_KEEPALIVE_CONNECTIONS=20 +CODE_EXECUTION_POOL_KEEPALIVE_EXPIRY=5.0 +CODE_EXECUTION_CONNECT_TIMEOUT=10 +CODE_EXECUTION_READ_TIMEOUT=60 +CODE_EXECUTION_WRITE_TIMEOUT=10 +SANDBOX_API_KEY=dify-sandbox +SANDBOX_GIN_MODE=release +SANDBOX_WORKER_TIMEOUT=15 +SANDBOX_ENABLE_NETWORK=true +SANDBOX_HTTP_PROXY=http://ssrf_proxy:3128 +SANDBOX_HTTPS_PROXY=http://ssrf_proxy:3128 +SANDBOX_PORT=8194 +PIP_MIRROR_URL= +SSRF_PROXY_HTTP_URL=http://ssrf_proxy:3128 +SSRF_PROXY_HTTPS_URL=http://ssrf_proxy:3128 SSRF_HTTP_PORT=3128 SSRF_COREDUMP_DIR=/var/spool/squid SSRF_REVERSE_PROXY_PORT=8194 @@ -1383,67 +204,7 @@ SSRF_POOL_MAX_CONNECTIONS=100 SSRF_POOL_MAX_KEEPALIVE_CONNECTIONS=20 SSRF_POOL_KEEPALIVE_EXPIRY=5.0 -# ------------------------------ -# docker env var for specifying vector db and metadata db type at startup -# (based on the vector db and metadata db type, the corresponding docker -# compose profile will be used) -# if you want to use unstructured, add ',unstructured' to the end -# ------------------------------ -COMPOSE_PROFILES=${VECTOR_STORE:-weaviate},${DB_TYPE:-postgresql} - -# ------------------------------ -# Worker health check configuration for worker and worker_beat services. -# Set to false to enable the health check. -# Note: enabling the health check may cause periodic CPU spikes and increased load, -# as it establishes a broker connection and sends a Celery ping on every check interval. -# ------------------------------ -COMPOSE_WORKER_HEALTHCHECK_DISABLED=true -# Interval between health checks (e.g. 30s, 1m) -COMPOSE_WORKER_HEALTHCHECK_INTERVAL=30s -# Timeout for each health check (e.g. 30s, 1m) -COMPOSE_WORKER_HEALTHCHECK_TIMEOUT=30s - -# ------------------------------ -# Docker Compose Service Expose Host Port Configurations -# ------------------------------ -EXPOSE_NGINX_PORT=80 -EXPOSE_NGINX_SSL_PORT=443 - -# ---------------------------------------------------------------------------- -# ModelProvider & Tool Position Configuration -# Used to specify the model providers and tools that can be used in the app. -# ---------------------------------------------------------------------------- - -# Pin, include, and exclude tools -# Use comma-separated values with no spaces between items. -# Example: POSITION_TOOL_PINS=bing,google -POSITION_TOOL_PINS= -POSITION_TOOL_INCLUDES= -POSITION_TOOL_EXCLUDES= - -# Pin, include, and exclude model providers -# Use comma-separated values with no spaces between items. -# Example: POSITION_PROVIDER_PINS=openai,openllm -POSITION_PROVIDER_PINS= -POSITION_PROVIDER_INCLUDES= -POSITION_PROVIDER_EXCLUDES= - -# CSP https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP -CSP_WHITELIST= - -# Enable or disable create tidb service job -CREATE_TIDB_SERVICE_JOB_ENABLED=false - -# Maximum number of submitted thread count in a ThreadPool for parallel node execution -MAX_SUBMIT_COUNT=100 - -# The maximum number of top-k value for RAG. -TOP_K_MAX_VALUE=10 - -# ------------------------------ -# Plugin Daemon Configuration -# ------------------------------ - +# Plugin daemon DB_PLUGIN_DATABASE=dify_plugin EXPOSE_PLUGIN_DAEMON_PORT=5002 PLUGIN_DAEMON_PORT=5002 @@ -1452,180 +213,45 @@ PLUGIN_DAEMON_URL=http://plugin_daemon:5002 PLUGIN_MAX_PACKAGE_SIZE=52428800 PLUGIN_MODEL_SCHEMA_CACHE_TTL=3600 PLUGIN_PPROF_ENABLED=false - PLUGIN_DEBUGGING_HOST=0.0.0.0 PLUGIN_DEBUGGING_PORT=5003 EXPOSE_PLUGIN_DEBUGGING_HOST=localhost EXPOSE_PLUGIN_DEBUGGING_PORT=5003 - -# If this key is changed, DIFY_INNER_API_KEY in plugin_daemon service must also be updated or agent node will fail. PLUGIN_DIFY_INNER_API_KEY=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1 PLUGIN_DIFY_INNER_API_URL=http://api:5001 - -ENDPOINT_URL_TEMPLATE=http://localhost/e/{hook_id} - -MARKETPLACE_ENABLED=true -MARKETPLACE_API_URL=https://marketplace.dify.ai - -# Creators Platform configuration -CREATORS_PLATFORM_FEATURES_ENABLED=true -CREATORS_PLATFORM_API_URL=https://creators.dify.ai -CREATORS_PLATFORM_OAUTH_CLIENT_ID= - FORCE_VERIFYING_SIGNATURE=true -ENFORCE_LANGGENIUS_PLUGIN_SIGNATURES=true - PLUGIN_STDIO_BUFFER_SIZE=1024 PLUGIN_STDIO_MAX_BUFFER_SIZE=5242880 - PLUGIN_PYTHON_ENV_INIT_TIMEOUT=120 -# Plugin Daemon side timeout (configure to match the API side below) PLUGIN_MAX_EXECUTION_TIMEOUT=600 -# API side timeout (configure to match the Plugin Daemon side above) -PLUGIN_DAEMON_TIMEOUT=600.0 -# PIP_MIRROR_URL=https://pypi.tuna.tsinghua.edu.cn/simple -PIP_MIRROR_URL= - -# https://github.com/langgenius/dify-plugin-daemon/blob/main/.env.example -# Plugin storage type, local aws_s3 tencent_cos azure_blob aliyun_oss volcengine_tos PLUGIN_STORAGE_TYPE=local PLUGIN_STORAGE_LOCAL_ROOT=/app/storage PLUGIN_WORKING_PATH=/app/storage/cwd PLUGIN_INSTALLED_PATH=plugin PLUGIN_PACKAGE_CACHE_PATH=plugin_packages PLUGIN_MEDIA_CACHE_PATH=assets -# Plugin oss bucket PLUGIN_STORAGE_OSS_BUCKET= -# Plugin oss s3 credentials -PLUGIN_S3_USE_AWS=false -PLUGIN_S3_USE_AWS_MANAGED_IAM=false -PLUGIN_S3_ENDPOINT= -PLUGIN_S3_USE_PATH_STYLE=false -PLUGIN_AWS_ACCESS_KEY= -PLUGIN_AWS_SECRET_KEY= -PLUGIN_AWS_REGION= -# Plugin oss azure blob -PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME= -PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING= -# Plugin oss tencent cos -PLUGIN_TENCENT_COS_SECRET_KEY= -PLUGIN_TENCENT_COS_SECRET_ID= -PLUGIN_TENCENT_COS_REGION= -# Plugin oss aliyun oss -PLUGIN_ALIYUN_OSS_REGION= -PLUGIN_ALIYUN_OSS_ENDPOINT= -PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID= -PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET= -PLUGIN_ALIYUN_OSS_AUTH_VERSION=v4 -PLUGIN_ALIYUN_OSS_PATH= -# Plugin oss volcengine tos -PLUGIN_VOLCENGINE_TOS_ENDPOINT= -PLUGIN_VOLCENGINE_TOS_ACCESS_KEY= -PLUGIN_VOLCENGINE_TOS_SECRET_KEY= -PLUGIN_VOLCENGINE_TOS_REGION= +PLUGIN_SENTRY_ENABLED=false +PLUGIN_SENTRY_DSN= +MARKETPLACE_ENABLED=true +MARKETPLACE_API_URL=https://marketplace.dify.ai +MARKETPLACE_URL= -# ------------------------------ -# OTLP Collector Configuration -# ------------------------------ -ENABLE_OTEL=false -OTLP_TRACE_ENDPOINT= -OTLP_METRIC_ENDPOINT= -OTLP_BASE_ENDPOINT=http://localhost:4318 -OTLP_API_KEY= -OTEL_EXPORTER_OTLP_PROTOCOL= -OTEL_EXPORTER_TYPE=otlp -OTEL_SAMPLING_RATE=0.1 -OTEL_BATCH_EXPORT_SCHEDULE_DELAY=5000 -OTEL_MAX_QUEUE_SIZE=2048 -OTEL_MAX_EXPORT_BATCH_SIZE=512 -OTEL_METRIC_EXPORT_INTERVAL=60000 -OTEL_BATCH_EXPORT_TIMEOUT=10000 -OTEL_METRIC_EXPORT_TIMEOUT=30000 - -# Prevent Clickjacking -ALLOW_EMBED=false - -# Dataset queue monitor configuration -QUEUE_MONITOR_THRESHOLD=200 -# You can configure multiple ones, separated by commas. eg: test1@dify.ai,test2@dify.ai -QUEUE_MONITOR_ALERT_EMAILS= -# Monitor interval in minutes, default is 30 minutes -QUEUE_MONITOR_INTERVAL=30 - -# Swagger UI configuration -SWAGGER_UI_ENABLED=false -SWAGGER_UI_PATH=/swagger-ui.html - -# Whether to encrypt dataset IDs when exporting DSL files (default: true) -# Set to false to export dataset IDs as plain text for easier cross-environment import -DSL_EXPORT_ENCRYPT_DATASET_ID=true - -# Maximum number of segments for dataset segments API (0 for unlimited) -DATASET_MAX_SEGMENTS_PER_REQUEST=0 - -# Celery schedule tasks configuration -ENABLE_CLEAN_EMBEDDING_CACHE_TASK=false -ENABLE_CLEAN_UNUSED_DATASETS_TASK=false -ENABLE_CREATE_TIDB_SERVERLESS_TASK=false -ENABLE_UPDATE_TIDB_SERVERLESS_STATUS_TASK=false -ENABLE_CLEAN_MESSAGES=false -ENABLE_WORKFLOW_RUN_CLEANUP_TASK=false -ENABLE_MAIL_CLEAN_DOCUMENT_NOTIFY_TASK=false -ENABLE_DATASETS_QUEUE_MONITOR=false -ENABLE_CHECK_UPGRADABLE_PLUGIN_TASK=true -ENABLE_WORKFLOW_SCHEDULE_POLLER_TASK=true -WORKFLOW_SCHEDULE_POLLER_INTERVAL=1 -WORKFLOW_SCHEDULE_POLLER_BATCH_SIZE=100 -WORKFLOW_SCHEDULE_MAX_DISPATCH_PER_TICK=0 - -# Tenant isolated task queue configuration -TENANT_ISOLATED_TASK_CONCURRENCY=1 - -# Maximum allowed CSV file size for annotation import in megabytes -ANNOTATION_IMPORT_FILE_SIZE_LIMIT=2 -#Maximum number of annotation records allowed in a single import -ANNOTATION_IMPORT_MAX_RECORDS=10000 -# Minimum number of annotation records required in a single import -ANNOTATION_IMPORT_MIN_RECORDS=1 -ANNOTATION_IMPORT_RATE_LIMIT_PER_MINUTE=5 -ANNOTATION_IMPORT_RATE_LIMIT_PER_HOUR=20 -# Maximum number of concurrent annotation import tasks per tenant -ANNOTATION_IMPORT_MAX_CONCURRENT=5 - -# The API key of amplitude -AMPLITUDE_API_KEY= - -# Sandbox expired records clean configuration -SANDBOX_EXPIRED_RECORDS_CLEAN_GRACEFUL_PERIOD=21 -SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_SIZE=1000 -SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_MAX_INTERVAL=200 -SANDBOX_EXPIRED_RECORDS_RETENTION_DAYS=30 - - -# Redis URL used for event bus between API and -# celery worker -# defaults to url constructed from `REDIS_*` -# configurations -EVENT_BUS_REDIS_URL= -# Event transport type. Options are: -# -# - pubsub: normal Pub/Sub (at-most-once) -# - sharded: sharded Pub/Sub (at-most-once) -# - streams: Redis Streams (at-least-once, recommended to avoid subscriber races) -# -# Note: Before enabling 'streams' in production, estimate your expected event volume and retention needs. -# Configure Redis memory limits and stream trimming appropriately (e.g., MAXLEN and key expiry) to reduce -# the risk of data loss from Redis auto-eviction under memory pressure. -# Also accepts ENV: EVENT_BUS_REDIS_CHANNEL_TYPE. -EVENT_BUS_REDIS_CHANNEL_TYPE=pubsub -# Whether to use Redis cluster mode while use redis as event bus. -# It's highly recommended to enable this for large deployments. -EVENT_BUS_REDIS_USE_CLUSTERS=false - -# Whether to Enable human input timeout check task -ENABLE_HUMAN_INPUT_TIMEOUT_TASK=true -# Human input timeout check interval in minutes -HUMAN_INPUT_TIMEOUT_TASK_INTERVAL=1 - - -SANDBOX_EXPIRED_RECORDS_CLEAN_TASK_LOCK_TTL=90000 +# Nginx and Docker Compose +NGINX_SERVER_NAME=_ +NGINX_HTTPS_ENABLED=false +NGINX_PORT=80 +NGINX_SSL_PORT=443 +NGINX_SSL_CERT_FILENAME=dify.crt +NGINX_SSL_CERT_KEY_FILENAME=dify.key +NGINX_SSL_PROTOCOLS=TLSv1.2 TLSv1.3 +NGINX_WORKER_PROCESSES=auto +NGINX_CLIENT_MAX_BODY_SIZE=100M +NGINX_KEEPALIVE_TIMEOUT=65 +NGINX_PROXY_READ_TIMEOUT=3600s +NGINX_PROXY_SEND_TIMEOUT=3600s +NGINX_ENABLE_CERTBOT_CHALLENGE=false +NGINX_SOCKET_IO_UPSTREAM=api_websocket:5001 +EXPOSE_NGINX_PORT=80 +EXPOSE_NGINX_SSL_PORT=443 +COMPOSE_PROFILES=${VECTOR_STORE:-weaviate},${DB_TYPE:-postgresql},collaboration diff --git a/docker/.gitignore b/docker/.gitignore new file mode 100644 index 0000000000..c3a47ad592 --- /dev/null +++ b/docker/.gitignore @@ -0,0 +1,3 @@ +# Ignore actual .env files (keep only .env.example files in git) +*.env +!*.env.example diff --git a/docker/README.md b/docker/README.md index 3130fa9886..a2d9b2eeba 100644 --- a/docker/README.md +++ b/docker/README.md @@ -7,29 +7,31 @@ Welcome to the new `docker` directory for deploying Dify using Docker Compose. T - **Certbot Container**: `docker-compose.yaml` now contains `certbot` for managing SSL certificates. This container automatically renews certificates and ensures secure HTTPS connections.\ For more information, refer `docker/certbot/README.md`. -- **Persistent Environment Variables**: Environment variables are now managed through a `.env` file, ensuring that your configurations persist across deployments. +- **Persistent Environment Variables**: Essential startup defaults are provided in `.env.example`, while local values are stored in `.env`, ensuring that your configurations persist across deployments. > What is `.env`?

- > The `.env` file is a crucial component in Docker and Docker Compose environments, serving as a centralized configuration file where you can define environment variables that are accessible to the containers at runtime. This file simplifies the management of environment settings across different stages of development, testing, and production, providing consistency and ease of configuration to deployments. + > The `.env` file is the local startup file. Copy it from `.env.example` for a default deployment. Optional advanced settings live in `envs/*.env.example` files. - **Unified Vector Database Services**: All vector database services are now managed from a single Docker Compose file `docker-compose.yaml`. You can switch between different vector databases by setting the `VECTOR_STORE` environment variable in your `.env` file. -- **Mandatory .env File**: A `.env` file is now required to run `docker compose up`. This file is crucial for configuring your deployment and for any custom settings to persist through upgrades. - ### How to Deploy Dify with `docker-compose.yaml` 1. **Prerequisites**: Ensure Docker and Docker Compose are installed on your system. 1. **Environment Setup**: - Navigate to the `docker` directory. - - Copy the `.env.example` file to a new file named `.env` by running `cp .env.example .env`. - - Customize the `.env` file as needed. Refer to the `.env.example` file for detailed configuration options. - - **Optional (Recommended for upgrades)**: - You may use the environment synchronization tool to help keep your `.env` file aligned with the latest `.env.example` updates, while preserving your custom settings. - This is especially useful when upgrading Dify or managing a large, customized `.env` file. + - Copy `.env.example` to `.env`. + - Customize `.env` when you need to change essential startup defaults. Copy optional files from `envs/` without the `.example` suffix when you need advanced settings. + - **Optional (for advanced deployments)**: + If you maintain a full `.env` file copied from `.env.example`, you may use the environment synchronization tool to keep it aligned with the latest `.env.example` updates while preserving your custom settings. See the [Environment Variables Synchronization](#environment-variables-synchronization) section below. 1. **Running the Services**: - - Execute `docker compose up` from the `docker` directory to start the services. + - Execute `docker compose up -d` from the `docker` directory to start the services. - To specify a vector database, set the `VECTOR_STORE` variable in your `.env` file to your desired vector database service, such as `milvus`, `weaviate`, or `opensearch`. + ```bash + cp .env.example .env + docker compose up -d + ``` + 1. **SSL Certificate Setup**: - Refer `docker/certbot/README.md` to set up SSL certificates using Certbot. 1. **OpenTelemetry Collector Setup**: @@ -41,7 +43,7 @@ Welcome to the new `docker` directory for deploying Dify using Docker Compose. T 1. **Middleware Setup**: - Use the `docker-compose.middleware.yaml` for setting up essential middleware services like databases and caches. - Navigate to the `docker` directory. - - Ensure the `middleware.env` file is created by running `cp middleware.env.example middleware.env` (refer to the `middleware.env.example` file). + - Ensure the `middleware.env` file is created by running `cp envs/middleware.env.example middleware.env` (refer to the `envs/middleware.env.example` file). 1. **Running Middleware Services**: - Navigate to the `docker` directory. - Execute `docker compose --env-file middleware.env -f docker-compose.middleware.yaml -p dify up -d` to start PostgreSQL/MySQL (per `DB_TYPE`) plus the bundled Weaviate instance. @@ -58,7 +60,13 @@ For users migrating from the `docker-legacy` setup: 1. **Data Migration**: - Ensure that data from services like databases and caches is backed up and migrated appropriately to the new structure if necessary. -### Overview of `.env` +### Overview of `.env`, `.env.example`, and `envs/` + +- `.env.example` contains the essential default configuration for Docker Compose deployments. +- `.env` contains local startup values copied from `.env.example` and any local changes. +- `envs/*.env.example` files contain optional advanced configuration grouped by theme. + +Docker Compose reads `envs/*.env` files when present, then reads `.env` last so values in `.env` take precedence. #### Key Modules and Customization @@ -68,7 +76,7 @@ For users migrating from the `docker-legacy` setup: #### Other notable variables -The `.env.example` file provided in the Docker setup is extensive and covers a wide range of configuration options. It is structured into several sections, each pertaining to different aspects of the application and its services. Here are some of the key sections and variables: +The root `.env.example` file contains the essential startup settings. Optional and provider-specific settings are grouped in `envs/*.env.example` files. Here are some of the key sections and variables: 1. **Common Variables**: @@ -96,7 +104,7 @@ The `.env.example` file provided in the Docker setup is extensive and covers a w 1. **Storage Configuration**: - - `STORAGE_TYPE`, `S3_BUCKET_NAME`, `AZURE_BLOB_ACCOUNT_NAME`: Settings for file storage options like local, S3, Azure Blob, etc. + - `STORAGE_TYPE`, `OPENDAL_SCHEME`, `OPENDAL_FS_ROOT`: Default local file storage settings. Optional storage backends are configured from the files under `envs/`. 1. **Vector Database Configuration**: @@ -118,9 +126,11 @@ The `.env.example` file provided in the Docker setup is extensive and covers a w ### Environment Variables Synchronization -When upgrading Dify or pulling the latest changes, new environment variables may be introduced in `.env.example`. +When upgrading Dify or pulling the latest changes, new environment variables may be introduced in `.env.example` or the optional files under `envs/`. -To help keep your existing `.env` file up to date **without losing your custom values**, an optional environment variables synchronization tool is provided. +If you use the default workflow, review `.env.example` and keep your `.env` aligned with essential startup values. + +If you maintain a customized `.env` file copied from `.env.example`, an optional environment variables synchronization tool is provided. > This tool performs a **one-way synchronization** from `.env.example` to `.env`. > Existing values in `.env` are never overwritten automatically. @@ -143,9 +153,9 @@ Before synchronization, the current `.env` file is saved to the `env-backup/` di **When to use** -- After upgrading Dify to a newer version +- After upgrading Dify to a newer version with a full `.env` file - When `.env.example` has been updated with new environment variables -- When managing a large or heavily customized `.env` file +- When managing a large or heavily customized `.env` file copied from `.env.example` **Usage** diff --git a/docker/docker-compose-template.yaml b/docker/docker-compose-template.yaml index 888f96332c..72c9d4fd90 100644 --- a/docker/docker-compose-template.yaml +++ b/docker/docker-compose-template.yaml @@ -1,4 +1,202 @@ -x-shared-env: &shared-api-worker-env +# Shared configuration using YAML anchors and env_file +x-shared-api-worker-config: &shared-api-worker-config + env_file: + - path: ./envs/core-services/shared.env + required: false + - path: ./envs/core-services/api.env + required: false + - path: ./envs/security.env + required: false + - path: ./envs/databases/db-postgres.env + required: false + - path: ./envs/databases/db-mysql.env + required: false + - path: ./envs/databases/redis.env + required: false + - path: ./envs/vectorstores/weaviate.env + required: false + - path: ./envs/vectorstores/qdrant.env + required: false + - path: ./envs/vectorstores/oceanbase.env + required: false + - path: ./envs/vectorstores/seekdb.env + required: false + - path: ./envs/vectorstores/couchbase.env + required: false + - path: ./envs/vectorstores/pgvector.env + required: false + - path: ./envs/vectorstores/vastbase.env + required: false + - path: ./envs/vectorstores/pgvecto-rs.env + required: false + - path: ./envs/vectorstores/chroma.env + required: false + - path: ./envs/vectorstores/iris.env + required: false + - path: ./envs/vectorstores/oracle.env + required: false + - path: ./envs/vectorstores/opengauss.env + required: false + - path: ./envs/vectorstores/myscale.env + required: false + - path: ./envs/vectorstores/matrixone.env + required: false + - path: ./envs/vectorstores/elasticsearch.env + required: false + - path: ./envs/vectorstores/opensearch.env + required: false + - path: ./envs/vectorstores/milvus.env + required: false + - path: ./envs/infrastructure/nginx.env + required: false + - path: ./envs/infrastructure/certbot.env + required: false + - path: ./envs/infrastructure/ssrf-proxy.env + required: false + - path: ./envs/infrastructure/etcd.env + required: false + - path: ./envs/infrastructure/minio.env + required: false + - path: ./envs/infrastructure/milvus-standalone.env + required: false + - ./.env + networks: + - ssrf_proxy_network + - default + restart: always + +x-shared-worker-config: &shared-worker-config + env_file: + - path: ./envs/core-services/shared.env + required: false + - path: ./envs/core-services/worker.env + required: false + - path: ./envs/security.env + required: false + - path: ./envs/databases/db-postgres.env + required: false + - path: ./envs/databases/db-mysql.env + required: false + - path: ./envs/databases/redis.env + required: false + - path: ./envs/vectorstores/weaviate.env + required: false + - path: ./envs/vectorstores/qdrant.env + required: false + - path: ./envs/vectorstores/oceanbase.env + required: false + - path: ./envs/vectorstores/seekdb.env + required: false + - path: ./envs/vectorstores/couchbase.env + required: false + - path: ./envs/vectorstores/pgvector.env + required: false + - path: ./envs/vectorstores/vastbase.env + required: false + - path: ./envs/vectorstores/pgvecto-rs.env + required: false + - path: ./envs/vectorstores/chroma.env + required: false + - path: ./envs/vectorstores/iris.env + required: false + - path: ./envs/vectorstores/oracle.env + required: false + - path: ./envs/vectorstores/opengauss.env + required: false + - path: ./envs/vectorstores/myscale.env + required: false + - path: ./envs/vectorstores/matrixone.env + required: false + - path: ./envs/vectorstores/elasticsearch.env + required: false + - path: ./envs/vectorstores/opensearch.env + required: false + - path: ./envs/vectorstores/milvus.env + required: false + - path: ./envs/infrastructure/nginx.env + required: false + - path: ./envs/infrastructure/certbot.env + required: false + - path: ./envs/infrastructure/ssrf-proxy.env + required: false + - path: ./envs/infrastructure/etcd.env + required: false + - path: ./envs/infrastructure/minio.env + required: false + - path: ./envs/infrastructure/milvus-standalone.env + required: false + - ./.env + networks: + - ssrf_proxy_network + - default + restart: always + +x-shared-worker-beat-config: &shared-worker-beat-config + env_file: + - path: ./envs/core-services/shared.env + required: false + - path: ./envs/core-services/worker-beat.env + required: false + - path: ./envs/security.env + required: false + - path: ./envs/databases/db-postgres.env + required: false + - path: ./envs/databases/db-mysql.env + required: false + - path: ./envs/databases/redis.env + required: false + - path: ./envs/vectorstores/weaviate.env + required: false + - path: ./envs/vectorstores/qdrant.env + required: false + - path: ./envs/vectorstores/oceanbase.env + required: false + - path: ./envs/vectorstores/seekdb.env + required: false + - path: ./envs/vectorstores/couchbase.env + required: false + - path: ./envs/vectorstores/pgvector.env + required: false + - path: ./envs/vectorstores/vastbase.env + required: false + - path: ./envs/vectorstores/pgvecto-rs.env + required: false + - path: ./envs/vectorstores/chroma.env + required: false + - path: ./envs/vectorstores/iris.env + required: false + - path: ./envs/vectorstores/oracle.env + required: false + - path: ./envs/vectorstores/opengauss.env + required: false + - path: ./envs/vectorstores/myscale.env + required: false + - path: ./envs/vectorstores/matrixone.env + required: false + - path: ./envs/vectorstores/elasticsearch.env + required: false + - path: ./envs/vectorstores/opensearch.env + required: false + - path: ./envs/vectorstores/milvus.env + required: false + - path: ./envs/infrastructure/nginx.env + required: false + - path: ./envs/infrastructure/certbot.env + required: false + - path: ./envs/infrastructure/ssrf-proxy.env + required: false + - path: ./envs/infrastructure/etcd.env + required: false + - path: ./envs/infrastructure/minio.env + required: false + - path: ./envs/infrastructure/milvus-standalone.env + required: false + - ./.env + networks: + - ssrf_proxy_network + - default + restart: always + services: # Init container to fix permissions init_permissions: @@ -21,12 +219,9 @@ services: # API service api: - image: langgenius/dify-api:1.13.3 - restart: always + <<: *shared-api-worker-config + image: langgenius/dify-api:1.14.0 environment: - # Use the shared environment variables. - <<: *shared-api-worker-env - # Startup mode, 'api' starts the API server. MODE: api SENTRY_DSN: ${API_SENTRY_DSN:-} SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} @@ -66,15 +261,37 @@ services: - ssrf_proxy_network - default + # WebSocket service for workflow collaboration. + api_websocket: + <<: *shared-api-worker-config + image: langgenius/dify-api:1.14.0 + profiles: + - collaboration + environment: + MODE: api + SERVER_WORKER_AMOUNT: 1 + SERVER_WORKER_CLASS: ${API_WEBSOCKET_WORKER_CLASS:-geventwebsocket.gunicorn.workers.GeventWebSocketWorker} + SERVER_WORKER_CONNECTIONS: ${API_WEBSOCKET_WORKER_CONNECTIONS:-1000} + GUNICORN_TIMEOUT: ${API_WEBSOCKET_GUNICORN_TIMEOUT:-360} + depends_on: + db_postgres: + condition: service_healthy + required: false + db_mysql: + condition: service_healthy + required: false + redis: + condition: service_started + networks: + - ssrf_proxy_network + - default + # worker service # The Celery worker for processing all queues (dataset, workflow, mail, etc.) worker: - image: langgenius/dify-api:1.13.3 - restart: always + <<: *shared-worker-config + image: langgenius/dify-api:1.14.0 environment: - # Use the shared environment variables. - <<: *shared-api-worker-env - # Startup mode, 'worker' starts the Celery worker for processing all queues. MODE: worker SENTRY_DSN: ${API_SENTRY_DSN:-} SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} @@ -115,12 +332,9 @@ services: # worker_beat service # Celery beat for scheduling periodic tasks. worker_beat: - image: langgenius/dify-api:1.13.3 - restart: always + <<: *shared-worker-beat-config + image: langgenius/dify-api:1.14.0 environment: - # Use the shared environment variables. - <<: *shared-api-worker-env - # Startup mode, 'worker_beat' starts the Celery beat for scheduling periodic tasks. MODE: beat depends_on: init_permissions: @@ -152,8 +366,14 @@ services: # Frontend web application. web: - image: langgenius/dify-web:1.13.3 + image: langgenius/dify-web:1.14.0 restart: always + env_file: + - path: ./envs/core-services/web.env + required: false + - path: ./envs/security.env + required: false + - ./.env environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} APP_API_URL: ${APP_API_URL:-} @@ -170,8 +390,8 @@ services: ALLOW_UNSAFE_DATA_SCHEME: ${ALLOW_UNSAFE_DATA_SCHEME:-false} MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai} MARKETPLACE_URL: ${MARKETPLACE_URL:-https://marketplace.dify.ai} - TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-} - INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-} + TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-10} + INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-4000} LOOP_NODE_MAX_COUNT: ${LOOP_NODE_MAX_COUNT:-100} MAX_TOOLS_NUM: ${MAX_TOOLS_NUM:-10} MAX_PARALLEL_LIMIT: ${MAX_PARALLEL_LIMIT:-10} @@ -228,7 +448,7 @@ services: MYSQL_ROOT_PASSWORD: ${DB_PASSWORD:-difyai123456} MYSQL_DATABASE: ${DB_DATABASE:-dify} command: > - --max_connections=1000 + --max_connections=${MYSQL_MAX_CONNECTIONS:-1000} --innodb_buffer_pool_size=${MYSQL_INNODB_BUFFER_POOL_SIZE:-512M} --innodb_log_file_size=${MYSQL_INNODB_LOG_FILE_SIZE:-128M} --innodb_flush_log_at_trx_commit=${MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT:-2} @@ -268,8 +488,14 @@ services: # The DifySandbox sandbox: - image: langgenius/dify-sandbox:0.2.14 + image: langgenius/dify-sandbox:0.2.15 restart: always + env_file: + - path: ./envs/core-services/sandbox.env + required: false + - path: ./envs/security.env + required: false + - ./.env environment: # The DifySandbox configurations # Make sure you are changing this key for your deployment with a strong key. @@ -292,11 +518,26 @@ services: # plugin daemon plugin_daemon: - image: langgenius/dify-plugin-daemon:0.5.3-local + image: langgenius/dify-plugin-daemon:0.6.0-local restart: always + env_file: + - path: ./envs/core-services/shared.env + required: false + - path: ./envs/core-services/plugin-daemon.env + required: false + - path: ./envs/security.env + required: false + - path: ./envs/databases/db-postgres.env + required: false + - path: ./envs/databases/db-mysql.env + required: false + - path: ./envs/databases/redis.env + required: false + - ./.env + networks: + - ssrf_proxy_network + - default environment: - # Use the shared environment variables. - <<: *shared-api-worker-env DB_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin} DB_SSL_MODE: ${DB_SSL_MODE:-disable} SERVER_PORT: ${PLUGIN_DAEMON_PORT:-5002} @@ -402,8 +643,8 @@ services: - ./certbot/update-cert.template.txt:/update-cert.template.txt - ./certbot/docker-entrypoint.sh:/docker-entrypoint.sh environment: - - CERTBOT_EMAIL=${CERTBOT_EMAIL} - - CERTBOT_DOMAIN=${CERTBOT_DOMAIN} + - CERTBOT_EMAIL=${CERTBOT_EMAIL:-} + - CERTBOT_DOMAIN=${CERTBOT_DOMAIN:-} - CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-} entrypoint: ["/docker-entrypoint.sh"] command: ["tail", "-f", "/dev/null"] @@ -445,6 +686,7 @@ services: NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s} NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s} NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false} + NGINX_SOCKET_IO_UPSTREAM: ${NGINX_SOCKET_IO_UPSTREAM:-api_websocket:5001} CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-} depends_on: - api diff --git a/docker/docker-compose.middleware.yaml b/docker/docker-compose.middleware.yaml index af3d54dfb3..0ad406a63b 100644 --- a/docker/docker-compose.middleware.yaml +++ b/docker/docker-compose.middleware.yaml @@ -51,7 +51,7 @@ services: MYSQL_ROOT_PASSWORD: ${DB_PASSWORD:-difyai123456} MYSQL_DATABASE: ${DB_DATABASE:-dify} command: > - --max_connections=1000 + --max_connections=${MYSQL_MAX_CONNECTIONS:-1000} --innodb_buffer_pool_size=${MYSQL_INNODB_BUFFER_POOL_SIZE:-512M} --innodb_log_file_size=${MYSQL_INNODB_LOG_FILE_SIZE:-128M} --innodb_flush_log_at_trx_commit=${MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT:-2} @@ -103,7 +103,7 @@ services: # The DifySandbox sandbox: - image: langgenius/dify-sandbox:0.2.14 + image: langgenius/dify-sandbox:0.2.15 restart: always env_file: - ./middleware.env @@ -129,7 +129,7 @@ services: # plugin daemon plugin_daemon: - image: langgenius/dify-plugin-daemon:0.5.3-local + image: langgenius/dify-plugin-daemon:0.6.0-local restart: always env_file: - ./middleware.env diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index 60ba510f44..c1d75e01f4 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -4,724 +4,204 @@ # or docker-compose-template.yaml and regenerate this file. # ================================================================== -x-shared-env: &shared-api-worker-env - CONSOLE_API_URL: ${CONSOLE_API_URL:-} - CONSOLE_WEB_URL: ${CONSOLE_WEB_URL:-} - SERVICE_API_URL: ${SERVICE_API_URL:-} - TRIGGER_URL: ${TRIGGER_URL:-http://localhost} - APP_API_URL: ${APP_API_URL:-} - APP_WEB_URL: ${APP_WEB_URL:-} - FILES_URL: ${FILES_URL:-} - INTERNAL_FILES_URL: ${INTERNAL_FILES_URL:-} - LANG: ${LANG:-C.UTF-8} - LC_ALL: ${LC_ALL:-C.UTF-8} - PYTHONIOENCODING: ${PYTHONIOENCODING:-utf-8} - UV_CACHE_DIR: ${UV_CACHE_DIR:-/tmp/.uv-cache} - LOG_LEVEL: ${LOG_LEVEL:-INFO} - LOG_OUTPUT_FORMAT: ${LOG_OUTPUT_FORMAT:-text} - LOG_FILE: ${LOG_FILE:-/app/logs/server.log} - LOG_FILE_MAX_SIZE: ${LOG_FILE_MAX_SIZE:-20} - LOG_FILE_BACKUP_COUNT: ${LOG_FILE_BACKUP_COUNT:-5} - LOG_DATEFORMAT: ${LOG_DATEFORMAT:-%Y-%m-%d %H:%M:%S} - LOG_TZ: ${LOG_TZ:-UTC} - DEBUG: ${DEBUG:-false} - FLASK_DEBUG: ${FLASK_DEBUG:-false} - ENABLE_REQUEST_LOGGING: ${ENABLE_REQUEST_LOGGING:-False} - SECRET_KEY: ${SECRET_KEY:-sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U} - INIT_PASSWORD: ${INIT_PASSWORD:-} - DEPLOY_ENV: ${DEPLOY_ENV:-PRODUCTION} - CHECK_UPDATE_URL: ${CHECK_UPDATE_URL:-https://updates.dify.ai} - OPENAI_API_BASE: ${OPENAI_API_BASE:-https://api.openai.com/v1} - MIGRATION_ENABLED: ${MIGRATION_ENABLED:-true} - FILES_ACCESS_TIMEOUT: ${FILES_ACCESS_TIMEOUT:-300} - ENABLE_COLLABORATION_MODE: ${ENABLE_COLLABORATION_MODE:-false} - ACCESS_TOKEN_EXPIRE_MINUTES: ${ACCESS_TOKEN_EXPIRE_MINUTES:-60} - REFRESH_TOKEN_EXPIRE_DAYS: ${REFRESH_TOKEN_EXPIRE_DAYS:-30} - APP_DEFAULT_ACTIVE_REQUESTS: ${APP_DEFAULT_ACTIVE_REQUESTS:-0} - APP_MAX_ACTIVE_REQUESTS: ${APP_MAX_ACTIVE_REQUESTS:-0} - APP_MAX_EXECUTION_TIME: ${APP_MAX_EXECUTION_TIME:-1200} - DIFY_BIND_ADDRESS: ${DIFY_BIND_ADDRESS:-0.0.0.0} - DIFY_PORT: ${DIFY_PORT:-5001} - SERVER_WORKER_AMOUNT: ${SERVER_WORKER_AMOUNT:-1} - SERVER_WORKER_CLASS: ${SERVER_WORKER_CLASS:-gevent} - SERVER_WORKER_CONNECTIONS: ${SERVER_WORKER_CONNECTIONS:-10} - CELERY_WORKER_CLASS: ${CELERY_WORKER_CLASS:-} - GUNICORN_TIMEOUT: ${GUNICORN_TIMEOUT:-360} - CELERY_WORKER_AMOUNT: ${CELERY_WORKER_AMOUNT:-4} - CELERY_AUTO_SCALE: ${CELERY_AUTO_SCALE:-false} - CELERY_MAX_WORKERS: ${CELERY_MAX_WORKERS:-} - CELERY_MIN_WORKERS: ${CELERY_MIN_WORKERS:-} - API_TOOL_DEFAULT_CONNECT_TIMEOUT: ${API_TOOL_DEFAULT_CONNECT_TIMEOUT:-10} - API_TOOL_DEFAULT_READ_TIMEOUT: ${API_TOOL_DEFAULT_READ_TIMEOUT:-60} - ENABLE_WEBSITE_JINAREADER: ${ENABLE_WEBSITE_JINAREADER:-true} - ENABLE_WEBSITE_FIRECRAWL: ${ENABLE_WEBSITE_FIRECRAWL:-true} - ENABLE_WEBSITE_WATERCRAWL: ${ENABLE_WEBSITE_WATERCRAWL:-true} - NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX: ${NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX:-false} - DB_TYPE: ${DB_TYPE:-postgresql} - DB_USERNAME: ${DB_USERNAME:-postgres} - DB_PASSWORD: ${DB_PASSWORD:-difyai123456} - DB_HOST: ${DB_HOST:-db_postgres} - DB_PORT: ${DB_PORT:-5432} - DB_DATABASE: ${DB_DATABASE:-dify} - SQLALCHEMY_POOL_SIZE: ${SQLALCHEMY_POOL_SIZE:-30} - SQLALCHEMY_MAX_OVERFLOW: ${SQLALCHEMY_MAX_OVERFLOW:-10} - SQLALCHEMY_POOL_RECYCLE: ${SQLALCHEMY_POOL_RECYCLE:-3600} - SQLALCHEMY_ECHO: ${SQLALCHEMY_ECHO:-false} - SQLALCHEMY_POOL_PRE_PING: ${SQLALCHEMY_POOL_PRE_PING:-false} - SQLALCHEMY_POOL_USE_LIFO: ${SQLALCHEMY_POOL_USE_LIFO:-false} - SQLALCHEMY_POOL_TIMEOUT: ${SQLALCHEMY_POOL_TIMEOUT:-30} - POSTGRES_MAX_CONNECTIONS: ${POSTGRES_MAX_CONNECTIONS:-200} - POSTGRES_SHARED_BUFFERS: ${POSTGRES_SHARED_BUFFERS:-128MB} - POSTGRES_WORK_MEM: ${POSTGRES_WORK_MEM:-4MB} - POSTGRES_MAINTENANCE_WORK_MEM: ${POSTGRES_MAINTENANCE_WORK_MEM:-64MB} - POSTGRES_EFFECTIVE_CACHE_SIZE: ${POSTGRES_EFFECTIVE_CACHE_SIZE:-4096MB} - POSTGRES_STATEMENT_TIMEOUT: ${POSTGRES_STATEMENT_TIMEOUT:-0} - POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT: ${POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT:-0} - MYSQL_MAX_CONNECTIONS: ${MYSQL_MAX_CONNECTIONS:-1000} - MYSQL_INNODB_BUFFER_POOL_SIZE: ${MYSQL_INNODB_BUFFER_POOL_SIZE:-512M} - MYSQL_INNODB_LOG_FILE_SIZE: ${MYSQL_INNODB_LOG_FILE_SIZE:-128M} - MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT: ${MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT:-2} - REDIS_HOST: ${REDIS_HOST:-redis} - REDIS_PORT: ${REDIS_PORT:-6379} - REDIS_USERNAME: ${REDIS_USERNAME:-} - REDIS_PASSWORD: ${REDIS_PASSWORD:-difyai123456} - REDIS_USE_SSL: ${REDIS_USE_SSL:-false} - REDIS_SSL_CERT_REQS: ${REDIS_SSL_CERT_REQS:-CERT_NONE} - REDIS_SSL_CA_CERTS: ${REDIS_SSL_CA_CERTS:-} - REDIS_SSL_CERTFILE: ${REDIS_SSL_CERTFILE:-} - REDIS_SSL_KEYFILE: ${REDIS_SSL_KEYFILE:-} - REDIS_DB: ${REDIS_DB:-0} - REDIS_KEY_PREFIX: ${REDIS_KEY_PREFIX:-} - REDIS_MAX_CONNECTIONS: ${REDIS_MAX_CONNECTIONS:-} - REDIS_USE_SENTINEL: ${REDIS_USE_SENTINEL:-false} - REDIS_SENTINELS: ${REDIS_SENTINELS:-} - REDIS_SENTINEL_SERVICE_NAME: ${REDIS_SENTINEL_SERVICE_NAME:-} - REDIS_SENTINEL_USERNAME: ${REDIS_SENTINEL_USERNAME:-} - REDIS_SENTINEL_PASSWORD: ${REDIS_SENTINEL_PASSWORD:-} - REDIS_SENTINEL_SOCKET_TIMEOUT: ${REDIS_SENTINEL_SOCKET_TIMEOUT:-0.1} - REDIS_USE_CLUSTERS: ${REDIS_USE_CLUSTERS:-false} - REDIS_CLUSTERS: ${REDIS_CLUSTERS:-} - REDIS_CLUSTERS_PASSWORD: ${REDIS_CLUSTERS_PASSWORD:-} - REDIS_RETRY_RETRIES: ${REDIS_RETRY_RETRIES:-3} - REDIS_RETRY_BACKOFF_BASE: ${REDIS_RETRY_BACKOFF_BASE:-1.0} - REDIS_RETRY_BACKOFF_CAP: ${REDIS_RETRY_BACKOFF_CAP:-10.0} - REDIS_SOCKET_TIMEOUT: ${REDIS_SOCKET_TIMEOUT:-5.0} - REDIS_SOCKET_CONNECT_TIMEOUT: ${REDIS_SOCKET_CONNECT_TIMEOUT:-5.0} - REDIS_HEALTH_CHECK_INTERVAL: ${REDIS_HEALTH_CHECK_INTERVAL:-30} - CELERY_BROKER_URL: ${CELERY_BROKER_URL:-redis://:difyai123456@redis:6379/1} - CELERY_BACKEND: ${CELERY_BACKEND:-redis} - BROKER_USE_SSL: ${BROKER_USE_SSL:-false} - CELERY_USE_SENTINEL: ${CELERY_USE_SENTINEL:-false} - CELERY_SENTINEL_MASTER_NAME: ${CELERY_SENTINEL_MASTER_NAME:-} - CELERY_SENTINEL_PASSWORD: ${CELERY_SENTINEL_PASSWORD:-} - CELERY_SENTINEL_SOCKET_TIMEOUT: ${CELERY_SENTINEL_SOCKET_TIMEOUT:-0.1} - CELERY_TASK_ANNOTATIONS: ${CELERY_TASK_ANNOTATIONS:-null} - WEB_API_CORS_ALLOW_ORIGINS: ${WEB_API_CORS_ALLOW_ORIGINS:-*} - CONSOLE_CORS_ALLOW_ORIGINS: ${CONSOLE_CORS_ALLOW_ORIGINS:-*} - COOKIE_DOMAIN: ${COOKIE_DOMAIN:-} - NEXT_PUBLIC_COOKIE_DOMAIN: ${NEXT_PUBLIC_COOKIE_DOMAIN:-} - NEXT_PUBLIC_SOCKET_URL: ${NEXT_PUBLIC_SOCKET_URL:-ws://localhost} - NEXT_PUBLIC_BATCH_CONCURRENCY: ${NEXT_PUBLIC_BATCH_CONCURRENCY:-5} - STORAGE_TYPE: ${STORAGE_TYPE:-opendal} - OPENDAL_SCHEME: ${OPENDAL_SCHEME:-fs} - OPENDAL_FS_ROOT: ${OPENDAL_FS_ROOT:-storage} - CLICKZETTA_VOLUME_TYPE: ${CLICKZETTA_VOLUME_TYPE:-user} - CLICKZETTA_VOLUME_NAME: ${CLICKZETTA_VOLUME_NAME:-} - CLICKZETTA_VOLUME_TABLE_PREFIX: ${CLICKZETTA_VOLUME_TABLE_PREFIX:-dataset_} - CLICKZETTA_VOLUME_DIFY_PREFIX: ${CLICKZETTA_VOLUME_DIFY_PREFIX:-dify_km} - S3_ENDPOINT: ${S3_ENDPOINT:-} - S3_REGION: ${S3_REGION:-us-east-1} - S3_BUCKET_NAME: ${S3_BUCKET_NAME:-difyai} - S3_ACCESS_KEY: ${S3_ACCESS_KEY:-} - S3_SECRET_KEY: ${S3_SECRET_KEY:-} - S3_ADDRESS_STYLE: ${S3_ADDRESS_STYLE:-auto} - S3_USE_AWS_MANAGED_IAM: ${S3_USE_AWS_MANAGED_IAM:-false} - ARCHIVE_STORAGE_ENABLED: ${ARCHIVE_STORAGE_ENABLED:-false} - ARCHIVE_STORAGE_ENDPOINT: ${ARCHIVE_STORAGE_ENDPOINT:-} - ARCHIVE_STORAGE_ARCHIVE_BUCKET: ${ARCHIVE_STORAGE_ARCHIVE_BUCKET:-} - ARCHIVE_STORAGE_EXPORT_BUCKET: ${ARCHIVE_STORAGE_EXPORT_BUCKET:-} - ARCHIVE_STORAGE_ACCESS_KEY: ${ARCHIVE_STORAGE_ACCESS_KEY:-} - ARCHIVE_STORAGE_SECRET_KEY: ${ARCHIVE_STORAGE_SECRET_KEY:-} - ARCHIVE_STORAGE_REGION: ${ARCHIVE_STORAGE_REGION:-auto} - AZURE_BLOB_ACCOUNT_NAME: ${AZURE_BLOB_ACCOUNT_NAME:-difyai} - AZURE_BLOB_ACCOUNT_KEY: ${AZURE_BLOB_ACCOUNT_KEY:-difyai} - AZURE_BLOB_CONTAINER_NAME: ${AZURE_BLOB_CONTAINER_NAME:-difyai-container} - AZURE_BLOB_ACCOUNT_URL: ${AZURE_BLOB_ACCOUNT_URL:-https://.blob.core.windows.net} - GOOGLE_STORAGE_BUCKET_NAME: ${GOOGLE_STORAGE_BUCKET_NAME:-your-bucket-name} - GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64: ${GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64:-} - ALIYUN_OSS_BUCKET_NAME: ${ALIYUN_OSS_BUCKET_NAME:-your-bucket-name} - ALIYUN_OSS_ACCESS_KEY: ${ALIYUN_OSS_ACCESS_KEY:-your-access-key} - ALIYUN_OSS_SECRET_KEY: ${ALIYUN_OSS_SECRET_KEY:-your-secret-key} - ALIYUN_OSS_ENDPOINT: ${ALIYUN_OSS_ENDPOINT:-https://oss-ap-southeast-1-internal.aliyuncs.com} - ALIYUN_OSS_REGION: ${ALIYUN_OSS_REGION:-ap-southeast-1} - ALIYUN_OSS_AUTH_VERSION: ${ALIYUN_OSS_AUTH_VERSION:-v4} - ALIYUN_OSS_PATH: ${ALIYUN_OSS_PATH:-your-path} - TENCENT_COS_BUCKET_NAME: ${TENCENT_COS_BUCKET_NAME:-your-bucket-name} - TENCENT_COS_SECRET_KEY: ${TENCENT_COS_SECRET_KEY:-your-secret-key} - TENCENT_COS_SECRET_ID: ${TENCENT_COS_SECRET_ID:-your-secret-id} - TENCENT_COS_REGION: ${TENCENT_COS_REGION:-your-region} - TENCENT_COS_SCHEME: ${TENCENT_COS_SCHEME:-your-scheme} - TENCENT_COS_CUSTOM_DOMAIN: ${TENCENT_COS_CUSTOM_DOMAIN:-your-custom-domain} - OCI_ENDPOINT: ${OCI_ENDPOINT:-https://your-object-storage-namespace.compat.objectstorage.us-ashburn-1.oraclecloud.com} - OCI_BUCKET_NAME: ${OCI_BUCKET_NAME:-your-bucket-name} - OCI_ACCESS_KEY: ${OCI_ACCESS_KEY:-your-access-key} - OCI_SECRET_KEY: ${OCI_SECRET_KEY:-your-secret-key} - OCI_REGION: ${OCI_REGION:-us-ashburn-1} - HUAWEI_OBS_BUCKET_NAME: ${HUAWEI_OBS_BUCKET_NAME:-your-bucket-name} - HUAWEI_OBS_SECRET_KEY: ${HUAWEI_OBS_SECRET_KEY:-your-secret-key} - HUAWEI_OBS_ACCESS_KEY: ${HUAWEI_OBS_ACCESS_KEY:-your-access-key} - HUAWEI_OBS_SERVER: ${HUAWEI_OBS_SERVER:-your-server-url} - HUAWEI_OBS_PATH_STYLE: ${HUAWEI_OBS_PATH_STYLE:-false} - VOLCENGINE_TOS_BUCKET_NAME: ${VOLCENGINE_TOS_BUCKET_NAME:-your-bucket-name} - VOLCENGINE_TOS_SECRET_KEY: ${VOLCENGINE_TOS_SECRET_KEY:-your-secret-key} - VOLCENGINE_TOS_ACCESS_KEY: ${VOLCENGINE_TOS_ACCESS_KEY:-your-access-key} - VOLCENGINE_TOS_ENDPOINT: ${VOLCENGINE_TOS_ENDPOINT:-your-server-url} - VOLCENGINE_TOS_REGION: ${VOLCENGINE_TOS_REGION:-your-region} - BAIDU_OBS_BUCKET_NAME: ${BAIDU_OBS_BUCKET_NAME:-your-bucket-name} - BAIDU_OBS_SECRET_KEY: ${BAIDU_OBS_SECRET_KEY:-your-secret-key} - BAIDU_OBS_ACCESS_KEY: ${BAIDU_OBS_ACCESS_KEY:-your-access-key} - BAIDU_OBS_ENDPOINT: ${BAIDU_OBS_ENDPOINT:-your-server-url} - SUPABASE_BUCKET_NAME: ${SUPABASE_BUCKET_NAME:-your-bucket-name} - SUPABASE_API_KEY: ${SUPABASE_API_KEY:-your-access-key} - SUPABASE_URL: ${SUPABASE_URL:-your-server-url} - VECTOR_STORE: ${VECTOR_STORE:-weaviate} - VECTOR_INDEX_NAME_PREFIX: ${VECTOR_INDEX_NAME_PREFIX:-Vector_index} - WEAVIATE_ENDPOINT: ${WEAVIATE_ENDPOINT:-http://weaviate:8080} - WEAVIATE_API_KEY: ${WEAVIATE_API_KEY:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih} - WEAVIATE_GRPC_ENDPOINT: ${WEAVIATE_GRPC_ENDPOINT:-grpc://weaviate:50051} - WEAVIATE_TOKENIZATION: ${WEAVIATE_TOKENIZATION:-word} - OCEANBASE_VECTOR_HOST: ${OCEANBASE_VECTOR_HOST:-oceanbase} - OCEANBASE_VECTOR_PORT: ${OCEANBASE_VECTOR_PORT:-2881} - OCEANBASE_VECTOR_USER: ${OCEANBASE_VECTOR_USER:-root@test} - OCEANBASE_VECTOR_PASSWORD: ${OCEANBASE_VECTOR_PASSWORD:-difyai123456} - OCEANBASE_VECTOR_DATABASE: ${OCEANBASE_VECTOR_DATABASE:-test} - OCEANBASE_CLUSTER_NAME: ${OCEANBASE_CLUSTER_NAME:-difyai} - OCEANBASE_MEMORY_LIMIT: ${OCEANBASE_MEMORY_LIMIT:-6G} - OCEANBASE_ENABLE_HYBRID_SEARCH: ${OCEANBASE_ENABLE_HYBRID_SEARCH:-false} - OCEANBASE_FULLTEXT_PARSER: ${OCEANBASE_FULLTEXT_PARSER:-ik} - SEEKDB_MEMORY_LIMIT: ${SEEKDB_MEMORY_LIMIT:-2G} - QDRANT_URL: ${QDRANT_URL:-http://qdrant:6333} - QDRANT_API_KEY: ${QDRANT_API_KEY:-difyai123456} - QDRANT_CLIENT_TIMEOUT: ${QDRANT_CLIENT_TIMEOUT:-20} - QDRANT_GRPC_ENABLED: ${QDRANT_GRPC_ENABLED:-false} - QDRANT_GRPC_PORT: ${QDRANT_GRPC_PORT:-6334} - QDRANT_REPLICATION_FACTOR: ${QDRANT_REPLICATION_FACTOR:-1} - MILVUS_URI: ${MILVUS_URI:-http://host.docker.internal:19530} - MILVUS_DATABASE: ${MILVUS_DATABASE:-} - MILVUS_TOKEN: ${MILVUS_TOKEN:-} - MILVUS_USER: ${MILVUS_USER:-} - MILVUS_PASSWORD: ${MILVUS_PASSWORD:-} - MILVUS_ENABLE_HYBRID_SEARCH: ${MILVUS_ENABLE_HYBRID_SEARCH:-False} - MILVUS_ANALYZER_PARAMS: ${MILVUS_ANALYZER_PARAMS:-} - MYSCALE_HOST: ${MYSCALE_HOST:-myscale} - MYSCALE_PORT: ${MYSCALE_PORT:-8123} - MYSCALE_USER: ${MYSCALE_USER:-default} - MYSCALE_PASSWORD: ${MYSCALE_PASSWORD:-} - MYSCALE_DATABASE: ${MYSCALE_DATABASE:-dify} - MYSCALE_FTS_PARAMS: ${MYSCALE_FTS_PARAMS:-} - COUCHBASE_CONNECTION_STRING: ${COUCHBASE_CONNECTION_STRING:-couchbase://couchbase-server} - COUCHBASE_USER: ${COUCHBASE_USER:-Administrator} - COUCHBASE_PASSWORD: ${COUCHBASE_PASSWORD:-password} - COUCHBASE_BUCKET_NAME: ${COUCHBASE_BUCKET_NAME:-Embeddings} - COUCHBASE_SCOPE_NAME: ${COUCHBASE_SCOPE_NAME:-_default} - HOLOGRES_HOST: ${HOLOGRES_HOST:-} - HOLOGRES_PORT: ${HOLOGRES_PORT:-80} - HOLOGRES_DATABASE: ${HOLOGRES_DATABASE:-} - HOLOGRES_ACCESS_KEY_ID: ${HOLOGRES_ACCESS_KEY_ID:-} - HOLOGRES_ACCESS_KEY_SECRET: ${HOLOGRES_ACCESS_KEY_SECRET:-} - HOLOGRES_SCHEMA: ${HOLOGRES_SCHEMA:-public} - HOLOGRES_TOKENIZER: ${HOLOGRES_TOKENIZER:-jieba} - HOLOGRES_DISTANCE_METHOD: ${HOLOGRES_DISTANCE_METHOD:-Cosine} - HOLOGRES_BASE_QUANTIZATION_TYPE: ${HOLOGRES_BASE_QUANTIZATION_TYPE:-rabitq} - HOLOGRES_MAX_DEGREE: ${HOLOGRES_MAX_DEGREE:-64} - HOLOGRES_EF_CONSTRUCTION: ${HOLOGRES_EF_CONSTRUCTION:-400} - PGVECTOR_HOST: ${PGVECTOR_HOST:-pgvector} - PGVECTOR_PORT: ${PGVECTOR_PORT:-5432} - PGVECTOR_USER: ${PGVECTOR_USER:-postgres} - PGVECTOR_PASSWORD: ${PGVECTOR_PASSWORD:-difyai123456} - PGVECTOR_DATABASE: ${PGVECTOR_DATABASE:-dify} - PGVECTOR_MIN_CONNECTION: ${PGVECTOR_MIN_CONNECTION:-1} - PGVECTOR_MAX_CONNECTION: ${PGVECTOR_MAX_CONNECTION:-5} - PGVECTOR_PG_BIGM: ${PGVECTOR_PG_BIGM:-false} - PGVECTOR_PG_BIGM_VERSION: ${PGVECTOR_PG_BIGM_VERSION:-1.2-20240606} - VASTBASE_HOST: ${VASTBASE_HOST:-vastbase} - VASTBASE_PORT: ${VASTBASE_PORT:-5432} - VASTBASE_USER: ${VASTBASE_USER:-dify} - VASTBASE_PASSWORD: ${VASTBASE_PASSWORD:-Difyai123456} - VASTBASE_DATABASE: ${VASTBASE_DATABASE:-dify} - VASTBASE_MIN_CONNECTION: ${VASTBASE_MIN_CONNECTION:-1} - VASTBASE_MAX_CONNECTION: ${VASTBASE_MAX_CONNECTION:-5} - PGVECTO_RS_HOST: ${PGVECTO_RS_HOST:-pgvecto-rs} - PGVECTO_RS_PORT: ${PGVECTO_RS_PORT:-5432} - PGVECTO_RS_USER: ${PGVECTO_RS_USER:-postgres} - PGVECTO_RS_PASSWORD: ${PGVECTO_RS_PASSWORD:-difyai123456} - PGVECTO_RS_DATABASE: ${PGVECTO_RS_DATABASE:-dify} - ANALYTICDB_KEY_ID: ${ANALYTICDB_KEY_ID:-your-ak} - ANALYTICDB_KEY_SECRET: ${ANALYTICDB_KEY_SECRET:-your-sk} - ANALYTICDB_REGION_ID: ${ANALYTICDB_REGION_ID:-cn-hangzhou} - ANALYTICDB_INSTANCE_ID: ${ANALYTICDB_INSTANCE_ID:-gp-ab123456} - ANALYTICDB_ACCOUNT: ${ANALYTICDB_ACCOUNT:-testaccount} - ANALYTICDB_PASSWORD: ${ANALYTICDB_PASSWORD:-testpassword} - ANALYTICDB_NAMESPACE: ${ANALYTICDB_NAMESPACE:-dify} - ANALYTICDB_NAMESPACE_PASSWORD: ${ANALYTICDB_NAMESPACE_PASSWORD:-difypassword} - ANALYTICDB_HOST: ${ANALYTICDB_HOST:-gp-test.aliyuncs.com} - ANALYTICDB_PORT: ${ANALYTICDB_PORT:-5432} - ANALYTICDB_MIN_CONNECTION: ${ANALYTICDB_MIN_CONNECTION:-1} - ANALYTICDB_MAX_CONNECTION: ${ANALYTICDB_MAX_CONNECTION:-5} - TIDB_VECTOR_HOST: ${TIDB_VECTOR_HOST:-tidb} - TIDB_VECTOR_PORT: ${TIDB_VECTOR_PORT:-4000} - TIDB_VECTOR_USER: ${TIDB_VECTOR_USER:-} - TIDB_VECTOR_PASSWORD: ${TIDB_VECTOR_PASSWORD:-} - TIDB_VECTOR_DATABASE: ${TIDB_VECTOR_DATABASE:-dify} - MATRIXONE_HOST: ${MATRIXONE_HOST:-matrixone} - MATRIXONE_PORT: ${MATRIXONE_PORT:-6001} - MATRIXONE_USER: ${MATRIXONE_USER:-dump} - MATRIXONE_PASSWORD: ${MATRIXONE_PASSWORD:-111} - MATRIXONE_DATABASE: ${MATRIXONE_DATABASE:-dify} - TIDB_ON_QDRANT_URL: ${TIDB_ON_QDRANT_URL:-http://127.0.0.1} - TIDB_ON_QDRANT_API_KEY: ${TIDB_ON_QDRANT_API_KEY:-dify} - TIDB_ON_QDRANT_CLIENT_TIMEOUT: ${TIDB_ON_QDRANT_CLIENT_TIMEOUT:-20} - TIDB_ON_QDRANT_GRPC_ENABLED: ${TIDB_ON_QDRANT_GRPC_ENABLED:-false} - TIDB_ON_QDRANT_GRPC_PORT: ${TIDB_ON_QDRANT_GRPC_PORT:-6334} - TIDB_PUBLIC_KEY: ${TIDB_PUBLIC_KEY:-dify} - TIDB_PRIVATE_KEY: ${TIDB_PRIVATE_KEY:-dify} - TIDB_API_URL: ${TIDB_API_URL:-http://127.0.0.1} - TIDB_IAM_API_URL: ${TIDB_IAM_API_URL:-http://127.0.0.1} - TIDB_REGION: ${TIDB_REGION:-regions/aws-us-east-1} - TIDB_PROJECT_ID: ${TIDB_PROJECT_ID:-dify} - TIDB_SPEND_LIMIT: ${TIDB_SPEND_LIMIT:-100} - CHROMA_HOST: ${CHROMA_HOST:-127.0.0.1} - CHROMA_PORT: ${CHROMA_PORT:-8000} - CHROMA_TENANT: ${CHROMA_TENANT:-default_tenant} - CHROMA_DATABASE: ${CHROMA_DATABASE:-default_database} - CHROMA_AUTH_PROVIDER: ${CHROMA_AUTH_PROVIDER:-chromadb.auth.token_authn.TokenAuthClientProvider} - CHROMA_AUTH_CREDENTIALS: ${CHROMA_AUTH_CREDENTIALS:-} - ORACLE_USER: ${ORACLE_USER:-dify} - ORACLE_PASSWORD: ${ORACLE_PASSWORD:-dify} - ORACLE_DSN: ${ORACLE_DSN:-oracle:1521/FREEPDB1} - ORACLE_CONFIG_DIR: ${ORACLE_CONFIG_DIR:-/app/api/storage/wallet} - ORACLE_WALLET_LOCATION: ${ORACLE_WALLET_LOCATION:-/app/api/storage/wallet} - ORACLE_WALLET_PASSWORD: ${ORACLE_WALLET_PASSWORD:-dify} - ORACLE_IS_AUTONOMOUS: ${ORACLE_IS_AUTONOMOUS:-false} - ALIBABACLOUD_MYSQL_HOST: ${ALIBABACLOUD_MYSQL_HOST:-127.0.0.1} - ALIBABACLOUD_MYSQL_PORT: ${ALIBABACLOUD_MYSQL_PORT:-3306} - ALIBABACLOUD_MYSQL_USER: ${ALIBABACLOUD_MYSQL_USER:-root} - ALIBABACLOUD_MYSQL_PASSWORD: ${ALIBABACLOUD_MYSQL_PASSWORD:-difyai123456} - ALIBABACLOUD_MYSQL_DATABASE: ${ALIBABACLOUD_MYSQL_DATABASE:-dify} - ALIBABACLOUD_MYSQL_MAX_CONNECTION: ${ALIBABACLOUD_MYSQL_MAX_CONNECTION:-5} - ALIBABACLOUD_MYSQL_HNSW_M: ${ALIBABACLOUD_MYSQL_HNSW_M:-6} - RELYT_HOST: ${RELYT_HOST:-db} - RELYT_PORT: ${RELYT_PORT:-5432} - RELYT_USER: ${RELYT_USER:-postgres} - RELYT_PASSWORD: ${RELYT_PASSWORD:-difyai123456} - RELYT_DATABASE: ${RELYT_DATABASE:-postgres} - OPENSEARCH_HOST: ${OPENSEARCH_HOST:-opensearch} - OPENSEARCH_PORT: ${OPENSEARCH_PORT:-9200} - OPENSEARCH_SECURE: ${OPENSEARCH_SECURE:-true} - OPENSEARCH_VERIFY_CERTS: ${OPENSEARCH_VERIFY_CERTS:-true} - OPENSEARCH_AUTH_METHOD: ${OPENSEARCH_AUTH_METHOD:-basic} - OPENSEARCH_USER: ${OPENSEARCH_USER:-admin} - OPENSEARCH_PASSWORD: ${OPENSEARCH_PASSWORD:-admin} - OPENSEARCH_AWS_REGION: ${OPENSEARCH_AWS_REGION:-ap-southeast-1} - OPENSEARCH_AWS_SERVICE: ${OPENSEARCH_AWS_SERVICE:-aoss} - TENCENT_VECTOR_DB_URL: ${TENCENT_VECTOR_DB_URL:-http://127.0.0.1} - TENCENT_VECTOR_DB_API_KEY: ${TENCENT_VECTOR_DB_API_KEY:-dify} - TENCENT_VECTOR_DB_TIMEOUT: ${TENCENT_VECTOR_DB_TIMEOUT:-30} - TENCENT_VECTOR_DB_USERNAME: ${TENCENT_VECTOR_DB_USERNAME:-dify} - TENCENT_VECTOR_DB_DATABASE: ${TENCENT_VECTOR_DB_DATABASE:-dify} - TENCENT_VECTOR_DB_SHARD: ${TENCENT_VECTOR_DB_SHARD:-1} - TENCENT_VECTOR_DB_REPLICAS: ${TENCENT_VECTOR_DB_REPLICAS:-2} - TENCENT_VECTOR_DB_ENABLE_HYBRID_SEARCH: ${TENCENT_VECTOR_DB_ENABLE_HYBRID_SEARCH:-false} - ELASTICSEARCH_HOST: ${ELASTICSEARCH_HOST:-0.0.0.0} - ELASTICSEARCH_PORT: ${ELASTICSEARCH_PORT:-9200} - ELASTICSEARCH_USERNAME: ${ELASTICSEARCH_USERNAME:-elastic} - ELASTICSEARCH_PASSWORD: ${ELASTICSEARCH_PASSWORD:-elastic} - KIBANA_PORT: ${KIBANA_PORT:-5601} - ELASTICSEARCH_USE_CLOUD: ${ELASTICSEARCH_USE_CLOUD:-false} - ELASTICSEARCH_CLOUD_URL: ${ELASTICSEARCH_CLOUD_URL:-YOUR-ELASTICSEARCH_CLOUD_URL} - ELASTICSEARCH_API_KEY: ${ELASTICSEARCH_API_KEY:-YOUR-ELASTICSEARCH_API_KEY} - ELASTICSEARCH_VERIFY_CERTS: ${ELASTICSEARCH_VERIFY_CERTS:-False} - ELASTICSEARCH_CA_CERTS: ${ELASTICSEARCH_CA_CERTS:-} - ELASTICSEARCH_REQUEST_TIMEOUT: ${ELASTICSEARCH_REQUEST_TIMEOUT:-100000} - ELASTICSEARCH_RETRY_ON_TIMEOUT: ${ELASTICSEARCH_RETRY_ON_TIMEOUT:-True} - ELASTICSEARCH_MAX_RETRIES: ${ELASTICSEARCH_MAX_RETRIES:-10} - BAIDU_VECTOR_DB_ENDPOINT: ${BAIDU_VECTOR_DB_ENDPOINT:-http://127.0.0.1:5287} - BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS: ${BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS:-30000} - BAIDU_VECTOR_DB_ACCOUNT: ${BAIDU_VECTOR_DB_ACCOUNT:-root} - BAIDU_VECTOR_DB_API_KEY: ${BAIDU_VECTOR_DB_API_KEY:-dify} - BAIDU_VECTOR_DB_DATABASE: ${BAIDU_VECTOR_DB_DATABASE:-dify} - BAIDU_VECTOR_DB_SHARD: ${BAIDU_VECTOR_DB_SHARD:-1} - BAIDU_VECTOR_DB_REPLICAS: ${BAIDU_VECTOR_DB_REPLICAS:-3} - BAIDU_VECTOR_DB_INVERTED_INDEX_ANALYZER: ${BAIDU_VECTOR_DB_INVERTED_INDEX_ANALYZER:-DEFAULT_ANALYZER} - BAIDU_VECTOR_DB_INVERTED_INDEX_PARSER_MODE: ${BAIDU_VECTOR_DB_INVERTED_INDEX_PARSER_MODE:-COARSE_MODE} - BAIDU_VECTOR_DB_AUTO_BUILD_ROW_COUNT_INCREMENT: ${BAIDU_VECTOR_DB_AUTO_BUILD_ROW_COUNT_INCREMENT:-500} - BAIDU_VECTOR_DB_AUTO_BUILD_ROW_COUNT_INCREMENT_RATIO: ${BAIDU_VECTOR_DB_AUTO_BUILD_ROW_COUNT_INCREMENT_RATIO:-0.05} - BAIDU_VECTOR_DB_REBUILD_INDEX_TIMEOUT_IN_SECONDS: ${BAIDU_VECTOR_DB_REBUILD_INDEX_TIMEOUT_IN_SECONDS:-300} - VIKINGDB_ACCESS_KEY: ${VIKINGDB_ACCESS_KEY:-your-ak} - VIKINGDB_SECRET_KEY: ${VIKINGDB_SECRET_KEY:-your-sk} - VIKINGDB_REGION: ${VIKINGDB_REGION:-cn-shanghai} - VIKINGDB_HOST: ${VIKINGDB_HOST:-api-vikingdb.xxx.volces.com} - VIKINGDB_SCHEMA: ${VIKINGDB_SCHEMA:-http} - VIKINGDB_CONNECTION_TIMEOUT: ${VIKINGDB_CONNECTION_TIMEOUT:-30} - VIKINGDB_SOCKET_TIMEOUT: ${VIKINGDB_SOCKET_TIMEOUT:-30} - LINDORM_URL: ${LINDORM_URL:-http://localhost:30070} - LINDORM_USERNAME: ${LINDORM_USERNAME:-admin} - LINDORM_PASSWORD: ${LINDORM_PASSWORD:-admin} - LINDORM_USING_UGC: ${LINDORM_USING_UGC:-True} - LINDORM_QUERY_TIMEOUT: ${LINDORM_QUERY_TIMEOUT:-1} - OPENGAUSS_HOST: ${OPENGAUSS_HOST:-opengauss} - OPENGAUSS_PORT: ${OPENGAUSS_PORT:-6600} - OPENGAUSS_USER: ${OPENGAUSS_USER:-postgres} - OPENGAUSS_PASSWORD: ${OPENGAUSS_PASSWORD:-Dify@123} - OPENGAUSS_DATABASE: ${OPENGAUSS_DATABASE:-dify} - OPENGAUSS_MIN_CONNECTION: ${OPENGAUSS_MIN_CONNECTION:-1} - OPENGAUSS_MAX_CONNECTION: ${OPENGAUSS_MAX_CONNECTION:-5} - OPENGAUSS_ENABLE_PQ: ${OPENGAUSS_ENABLE_PQ:-false} - HUAWEI_CLOUD_HOSTS: ${HUAWEI_CLOUD_HOSTS:-https://127.0.0.1:9200} - HUAWEI_CLOUD_USER: ${HUAWEI_CLOUD_USER:-admin} - HUAWEI_CLOUD_PASSWORD: ${HUAWEI_CLOUD_PASSWORD:-admin} - UPSTASH_VECTOR_URL: ${UPSTASH_VECTOR_URL:-https://xxx-vector.upstash.io} - UPSTASH_VECTOR_TOKEN: ${UPSTASH_VECTOR_TOKEN:-dify} - TABLESTORE_ENDPOINT: ${TABLESTORE_ENDPOINT:-https://instance-name.cn-hangzhou.ots.aliyuncs.com} - TABLESTORE_INSTANCE_NAME: ${TABLESTORE_INSTANCE_NAME:-instance-name} - TABLESTORE_ACCESS_KEY_ID: ${TABLESTORE_ACCESS_KEY_ID:-xxx} - TABLESTORE_ACCESS_KEY_SECRET: ${TABLESTORE_ACCESS_KEY_SECRET:-xxx} - TABLESTORE_NORMALIZE_FULLTEXT_BM25_SCORE: ${TABLESTORE_NORMALIZE_FULLTEXT_BM25_SCORE:-false} - CLICKZETTA_USERNAME: ${CLICKZETTA_USERNAME:-} - CLICKZETTA_PASSWORD: ${CLICKZETTA_PASSWORD:-} - CLICKZETTA_INSTANCE: ${CLICKZETTA_INSTANCE:-} - CLICKZETTA_SERVICE: ${CLICKZETTA_SERVICE:-api.clickzetta.com} - CLICKZETTA_WORKSPACE: ${CLICKZETTA_WORKSPACE:-quick_start} - CLICKZETTA_VCLUSTER: ${CLICKZETTA_VCLUSTER:-default_ap} - CLICKZETTA_SCHEMA: ${CLICKZETTA_SCHEMA:-dify} - CLICKZETTA_BATCH_SIZE: ${CLICKZETTA_BATCH_SIZE:-100} - CLICKZETTA_ENABLE_INVERTED_INDEX: ${CLICKZETTA_ENABLE_INVERTED_INDEX:-true} - CLICKZETTA_ANALYZER_TYPE: ${CLICKZETTA_ANALYZER_TYPE:-chinese} - CLICKZETTA_ANALYZER_MODE: ${CLICKZETTA_ANALYZER_MODE:-smart} - CLICKZETTA_VECTOR_DISTANCE_FUNCTION: ${CLICKZETTA_VECTOR_DISTANCE_FUNCTION:-cosine_distance} - IRIS_HOST: ${IRIS_HOST:-iris} - IRIS_SUPER_SERVER_PORT: ${IRIS_SUPER_SERVER_PORT:-1972} - IRIS_WEB_SERVER_PORT: ${IRIS_WEB_SERVER_PORT:-52773} - IRIS_USER: ${IRIS_USER:-_SYSTEM} - IRIS_PASSWORD: ${IRIS_PASSWORD:-Dify@1234} - IRIS_DATABASE: ${IRIS_DATABASE:-USER} - IRIS_SCHEMA: ${IRIS_SCHEMA:-dify} - IRIS_CONNECTION_URL: ${IRIS_CONNECTION_URL:-} - IRIS_MIN_CONNECTION: ${IRIS_MIN_CONNECTION:-1} - IRIS_MAX_CONNECTION: ${IRIS_MAX_CONNECTION:-3} - IRIS_TEXT_INDEX: ${IRIS_TEXT_INDEX:-true} - IRIS_TEXT_INDEX_LANGUAGE: ${IRIS_TEXT_INDEX_LANGUAGE:-en} - IRIS_TIMEZONE: ${IRIS_TIMEZONE:-UTC} - UPLOAD_FILE_SIZE_LIMIT: ${UPLOAD_FILE_SIZE_LIMIT:-15} - UPLOAD_FILE_BATCH_LIMIT: ${UPLOAD_FILE_BATCH_LIMIT:-5} - UPLOAD_FILE_EXTENSION_BLACKLIST: ${UPLOAD_FILE_EXTENSION_BLACKLIST:-} - SINGLE_CHUNK_ATTACHMENT_LIMIT: ${SINGLE_CHUNK_ATTACHMENT_LIMIT:-10} - IMAGE_FILE_BATCH_LIMIT: ${IMAGE_FILE_BATCH_LIMIT:-10} - ATTACHMENT_IMAGE_FILE_SIZE_LIMIT: ${ATTACHMENT_IMAGE_FILE_SIZE_LIMIT:-2} - ATTACHMENT_IMAGE_DOWNLOAD_TIMEOUT: ${ATTACHMENT_IMAGE_DOWNLOAD_TIMEOUT:-60} - ETL_TYPE: ${ETL_TYPE:-dify} - UNSTRUCTURED_API_URL: ${UNSTRUCTURED_API_URL:-} - UNSTRUCTURED_API_KEY: ${UNSTRUCTURED_API_KEY:-} - SCARF_NO_ANALYTICS: ${SCARF_NO_ANALYTICS:-true} - PROMPT_GENERATION_MAX_TOKENS: ${PROMPT_GENERATION_MAX_TOKENS:-512} - CODE_GENERATION_MAX_TOKENS: ${CODE_GENERATION_MAX_TOKENS:-1024} - PLUGIN_BASED_TOKEN_COUNTING_ENABLED: ${PLUGIN_BASED_TOKEN_COUNTING_ENABLED:-false} - MULTIMODAL_SEND_FORMAT: ${MULTIMODAL_SEND_FORMAT:-base64} - UPLOAD_IMAGE_FILE_SIZE_LIMIT: ${UPLOAD_IMAGE_FILE_SIZE_LIMIT:-10} - UPLOAD_VIDEO_FILE_SIZE_LIMIT: ${UPLOAD_VIDEO_FILE_SIZE_LIMIT:-100} - UPLOAD_AUDIO_FILE_SIZE_LIMIT: ${UPLOAD_AUDIO_FILE_SIZE_LIMIT:-50} - SENTRY_DSN: ${SENTRY_DSN:-} - API_SENTRY_DSN: ${API_SENTRY_DSN:-} - API_SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} - API_SENTRY_PROFILES_SAMPLE_RATE: ${API_SENTRY_PROFILES_SAMPLE_RATE:-1.0} - WEB_SENTRY_DSN: ${WEB_SENTRY_DSN:-} - PLUGIN_SENTRY_ENABLED: ${PLUGIN_SENTRY_ENABLED:-false} - PLUGIN_SENTRY_DSN: ${PLUGIN_SENTRY_DSN:-} - NOTION_INTEGRATION_TYPE: ${NOTION_INTEGRATION_TYPE:-public} - NOTION_CLIENT_SECRET: ${NOTION_CLIENT_SECRET:-} - NOTION_CLIENT_ID: ${NOTION_CLIENT_ID:-} - NOTION_INTERNAL_SECRET: ${NOTION_INTERNAL_SECRET:-} - MAIL_TYPE: ${MAIL_TYPE:-resend} - MAIL_DEFAULT_SEND_FROM: ${MAIL_DEFAULT_SEND_FROM:-} - RESEND_API_URL: ${RESEND_API_URL:-https://api.resend.com} - RESEND_API_KEY: ${RESEND_API_KEY:-your-resend-api-key} - SMTP_SERVER: ${SMTP_SERVER:-} - SMTP_PORT: ${SMTP_PORT:-465} - SMTP_USERNAME: ${SMTP_USERNAME:-} - SMTP_PASSWORD: ${SMTP_PASSWORD:-} - SMTP_USE_TLS: ${SMTP_USE_TLS:-true} - SMTP_OPPORTUNISTIC_TLS: ${SMTP_OPPORTUNISTIC_TLS:-false} - SMTP_LOCAL_HOSTNAME: ${SMTP_LOCAL_HOSTNAME:-} - SENDGRID_API_KEY: ${SENDGRID_API_KEY:-} - INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-4000} - INVITE_EXPIRY_HOURS: ${INVITE_EXPIRY_HOURS:-72} - RESET_PASSWORD_TOKEN_EXPIRY_MINUTES: ${RESET_PASSWORD_TOKEN_EXPIRY_MINUTES:-5} - EMAIL_REGISTER_TOKEN_EXPIRY_MINUTES: ${EMAIL_REGISTER_TOKEN_EXPIRY_MINUTES:-5} - CHANGE_EMAIL_TOKEN_EXPIRY_MINUTES: ${CHANGE_EMAIL_TOKEN_EXPIRY_MINUTES:-5} - OWNER_TRANSFER_TOKEN_EXPIRY_MINUTES: ${OWNER_TRANSFER_TOKEN_EXPIRY_MINUTES:-5} - CODE_EXECUTION_ENDPOINT: ${CODE_EXECUTION_ENDPOINT:-http://sandbox:8194} - CODE_EXECUTION_API_KEY: ${CODE_EXECUTION_API_KEY:-dify-sandbox} - CODE_EXECUTION_SSL_VERIFY: ${CODE_EXECUTION_SSL_VERIFY:-True} - CODE_EXECUTION_POOL_MAX_CONNECTIONS: ${CODE_EXECUTION_POOL_MAX_CONNECTIONS:-100} - CODE_EXECUTION_POOL_MAX_KEEPALIVE_CONNECTIONS: ${CODE_EXECUTION_POOL_MAX_KEEPALIVE_CONNECTIONS:-20} - CODE_EXECUTION_POOL_KEEPALIVE_EXPIRY: ${CODE_EXECUTION_POOL_KEEPALIVE_EXPIRY:-5.0} - CODE_MAX_NUMBER: ${CODE_MAX_NUMBER:-9223372036854775807} - CODE_MIN_NUMBER: ${CODE_MIN_NUMBER:--9223372036854775808} - CODE_MAX_DEPTH: ${CODE_MAX_DEPTH:-5} - CODE_MAX_PRECISION: ${CODE_MAX_PRECISION:-20} - CODE_MAX_STRING_LENGTH: ${CODE_MAX_STRING_LENGTH:-400000} - CODE_MAX_STRING_ARRAY_LENGTH: ${CODE_MAX_STRING_ARRAY_LENGTH:-30} - CODE_MAX_OBJECT_ARRAY_LENGTH: ${CODE_MAX_OBJECT_ARRAY_LENGTH:-30} - CODE_MAX_NUMBER_ARRAY_LENGTH: ${CODE_MAX_NUMBER_ARRAY_LENGTH:-1000} - CODE_EXECUTION_CONNECT_TIMEOUT: ${CODE_EXECUTION_CONNECT_TIMEOUT:-10} - CODE_EXECUTION_READ_TIMEOUT: ${CODE_EXECUTION_READ_TIMEOUT:-60} - CODE_EXECUTION_WRITE_TIMEOUT: ${CODE_EXECUTION_WRITE_TIMEOUT:-10} - TEMPLATE_TRANSFORM_MAX_LENGTH: ${TEMPLATE_TRANSFORM_MAX_LENGTH:-400000} - WORKFLOW_MAX_EXECUTION_STEPS: ${WORKFLOW_MAX_EXECUTION_STEPS:-500} - WORKFLOW_MAX_EXECUTION_TIME: ${WORKFLOW_MAX_EXECUTION_TIME:-1200} - WORKFLOW_CALL_MAX_DEPTH: ${WORKFLOW_CALL_MAX_DEPTH:-5} - MAX_VARIABLE_SIZE: ${MAX_VARIABLE_SIZE:-204800} - WORKFLOW_FILE_UPLOAD_LIMIT: ${WORKFLOW_FILE_UPLOAD_LIMIT:-10} - GRAPH_ENGINE_MIN_WORKERS: ${GRAPH_ENGINE_MIN_WORKERS:-1} - GRAPH_ENGINE_MAX_WORKERS: ${GRAPH_ENGINE_MAX_WORKERS:-10} - GRAPH_ENGINE_SCALE_UP_THRESHOLD: ${GRAPH_ENGINE_SCALE_UP_THRESHOLD:-3} - GRAPH_ENGINE_SCALE_DOWN_IDLE_TIME: ${GRAPH_ENGINE_SCALE_DOWN_IDLE_TIME:-5.0} - WORKFLOW_NODE_EXECUTION_STORAGE: ${WORKFLOW_NODE_EXECUTION_STORAGE:-rdbms} - CORE_WORKFLOW_EXECUTION_REPOSITORY: ${CORE_WORKFLOW_EXECUTION_REPOSITORY:-core.repositories.sqlalchemy_workflow_execution_repository.SQLAlchemyWorkflowExecutionRepository} - CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY: ${CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY:-core.repositories.sqlalchemy_workflow_node_execution_repository.SQLAlchemyWorkflowNodeExecutionRepository} - API_WORKFLOW_RUN_REPOSITORY: ${API_WORKFLOW_RUN_REPOSITORY:-repositories.sqlalchemy_api_workflow_run_repository.DifyAPISQLAlchemyWorkflowRunRepository} - API_WORKFLOW_NODE_EXECUTION_REPOSITORY: ${API_WORKFLOW_NODE_EXECUTION_REPOSITORY:-repositories.sqlalchemy_api_workflow_node_execution_repository.DifyAPISQLAlchemyWorkflowNodeExecutionRepository} - WORKFLOW_LOG_CLEANUP_ENABLED: ${WORKFLOW_LOG_CLEANUP_ENABLED:-false} - WORKFLOW_LOG_RETENTION_DAYS: ${WORKFLOW_LOG_RETENTION_DAYS:-30} - WORKFLOW_LOG_CLEANUP_BATCH_SIZE: ${WORKFLOW_LOG_CLEANUP_BATCH_SIZE:-100} - WORKFLOW_LOG_CLEANUP_SPECIFIC_WORKFLOW_IDS: ${WORKFLOW_LOG_CLEANUP_SPECIFIC_WORKFLOW_IDS:-} - ALIYUN_SLS_ACCESS_KEY_ID: ${ALIYUN_SLS_ACCESS_KEY_ID:-} - ALIYUN_SLS_ACCESS_KEY_SECRET: ${ALIYUN_SLS_ACCESS_KEY_SECRET:-} - ALIYUN_SLS_ENDPOINT: ${ALIYUN_SLS_ENDPOINT:-} - ALIYUN_SLS_REGION: ${ALIYUN_SLS_REGION:-} - ALIYUN_SLS_PROJECT_NAME: ${ALIYUN_SLS_PROJECT_NAME:-} - ALIYUN_SLS_LOGSTORE_TTL: ${ALIYUN_SLS_LOGSTORE_TTL:-365} - LOGSTORE_DUAL_WRITE_ENABLED: ${LOGSTORE_DUAL_WRITE_ENABLED:-false} - LOGSTORE_DUAL_READ_ENABLED: ${LOGSTORE_DUAL_READ_ENABLED:-true} - LOGSTORE_ENABLE_PUT_GRAPH_FIELD: ${LOGSTORE_ENABLE_PUT_GRAPH_FIELD:-true} - HTTP_REQUEST_NODE_MAX_BINARY_SIZE: ${HTTP_REQUEST_NODE_MAX_BINARY_SIZE:-10485760} - HTTP_REQUEST_NODE_MAX_TEXT_SIZE: ${HTTP_REQUEST_NODE_MAX_TEXT_SIZE:-1048576} - HTTP_REQUEST_NODE_SSL_VERIFY: ${HTTP_REQUEST_NODE_SSL_VERIFY:-True} - HTTP_REQUEST_MAX_CONNECT_TIMEOUT: ${HTTP_REQUEST_MAX_CONNECT_TIMEOUT:-10} - HTTP_REQUEST_MAX_READ_TIMEOUT: ${HTTP_REQUEST_MAX_READ_TIMEOUT:-600} - HTTP_REQUEST_MAX_WRITE_TIMEOUT: ${HTTP_REQUEST_MAX_WRITE_TIMEOUT:-600} - WEBHOOK_REQUEST_BODY_MAX_SIZE: ${WEBHOOK_REQUEST_BODY_MAX_SIZE:-10485760} - RESPECT_XFORWARD_HEADERS_ENABLED: ${RESPECT_XFORWARD_HEADERS_ENABLED:-false} - SSRF_PROXY_HTTP_URL: ${SSRF_PROXY_HTTP_URL:-http://ssrf_proxy:3128} - SSRF_PROXY_HTTPS_URL: ${SSRF_PROXY_HTTPS_URL:-http://ssrf_proxy:3128} - LOOP_NODE_MAX_COUNT: ${LOOP_NODE_MAX_COUNT:-100} - MAX_TOOLS_NUM: ${MAX_TOOLS_NUM:-10} - MAX_PARALLEL_LIMIT: ${MAX_PARALLEL_LIMIT:-10} - MAX_ITERATIONS_NUM: ${MAX_ITERATIONS_NUM:-99} - TEXT_GENERATION_TIMEOUT_MS: ${TEXT_GENERATION_TIMEOUT_MS:-60000} - EXPERIMENTAL_ENABLE_VINEXT: ${EXPERIMENTAL_ENABLE_VINEXT:-false} - ALLOW_INLINE_STYLES: ${ALLOW_INLINE_STYLES:-false} - ALLOW_UNSAFE_DATA_SCHEME: ${ALLOW_UNSAFE_DATA_SCHEME:-false} - MAX_TREE_DEPTH: ${MAX_TREE_DEPTH:-50} - PGDATA: ${PGDATA:-/var/lib/postgresql/data/pgdata} - MYSQL_HOST_VOLUME: ${MYSQL_HOST_VOLUME:-./volumes/mysql/data} - SANDBOX_API_KEY: ${SANDBOX_API_KEY:-dify-sandbox} - SANDBOX_GIN_MODE: ${SANDBOX_GIN_MODE:-release} - SANDBOX_WORKER_TIMEOUT: ${SANDBOX_WORKER_TIMEOUT:-15} - SANDBOX_ENABLE_NETWORK: ${SANDBOX_ENABLE_NETWORK:-true} - SANDBOX_HTTP_PROXY: ${SANDBOX_HTTP_PROXY:-http://ssrf_proxy:3128} - SANDBOX_HTTPS_PROXY: ${SANDBOX_HTTPS_PROXY:-http://ssrf_proxy:3128} - SANDBOX_PORT: ${SANDBOX_PORT:-8194} - WEAVIATE_PERSISTENCE_DATA_PATH: ${WEAVIATE_PERSISTENCE_DATA_PATH:-/var/lib/weaviate} - WEAVIATE_QUERY_DEFAULTS_LIMIT: ${WEAVIATE_QUERY_DEFAULTS_LIMIT:-25} - WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED: ${WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED:-true} - WEAVIATE_DEFAULT_VECTORIZER_MODULE: ${WEAVIATE_DEFAULT_VECTORIZER_MODULE:-none} - WEAVIATE_CLUSTER_HOSTNAME: ${WEAVIATE_CLUSTER_HOSTNAME:-node1} - WEAVIATE_AUTHENTICATION_APIKEY_ENABLED: ${WEAVIATE_AUTHENTICATION_APIKEY_ENABLED:-true} - WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS: ${WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS:-WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih} - WEAVIATE_AUTHENTICATION_APIKEY_USERS: ${WEAVIATE_AUTHENTICATION_APIKEY_USERS:-hello@dify.ai} - WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED: ${WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED:-true} - WEAVIATE_AUTHORIZATION_ADMINLIST_USERS: ${WEAVIATE_AUTHORIZATION_ADMINLIST_USERS:-hello@dify.ai} - WEAVIATE_DISABLE_TELEMETRY: ${WEAVIATE_DISABLE_TELEMETRY:-false} - WEAVIATE_ENABLE_TOKENIZER_GSE: ${WEAVIATE_ENABLE_TOKENIZER_GSE:-false} - WEAVIATE_ENABLE_TOKENIZER_KAGOME_JA: ${WEAVIATE_ENABLE_TOKENIZER_KAGOME_JA:-false} - WEAVIATE_ENABLE_TOKENIZER_KAGOME_KR: ${WEAVIATE_ENABLE_TOKENIZER_KAGOME_KR:-false} - CHROMA_SERVER_AUTHN_CREDENTIALS: ${CHROMA_SERVER_AUTHN_CREDENTIALS:-difyai123456} - CHROMA_SERVER_AUTHN_PROVIDER: ${CHROMA_SERVER_AUTHN_PROVIDER:-chromadb.auth.token_authn.TokenAuthenticationServerProvider} - CHROMA_IS_PERSISTENT: ${CHROMA_IS_PERSISTENT:-TRUE} - ORACLE_PWD: ${ORACLE_PWD:-Dify123456} - ORACLE_CHARACTERSET: ${ORACLE_CHARACTERSET:-AL32UTF8} - ETCD_AUTO_COMPACTION_MODE: ${ETCD_AUTO_COMPACTION_MODE:-revision} - ETCD_AUTO_COMPACTION_RETENTION: ${ETCD_AUTO_COMPACTION_RETENTION:-1000} - ETCD_QUOTA_BACKEND_BYTES: ${ETCD_QUOTA_BACKEND_BYTES:-4294967296} - ETCD_SNAPSHOT_COUNT: ${ETCD_SNAPSHOT_COUNT:-50000} - MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin} - MINIO_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin} - ETCD_ENDPOINTS: ${ETCD_ENDPOINTS:-etcd:2379} - MINIO_ADDRESS: ${MINIO_ADDRESS:-minio:9000} - MILVUS_AUTHORIZATION_ENABLED: ${MILVUS_AUTHORIZATION_ENABLED:-true} - PGVECTOR_PGUSER: ${PGVECTOR_PGUSER:-postgres} - PGVECTOR_POSTGRES_PASSWORD: ${PGVECTOR_POSTGRES_PASSWORD:-difyai123456} - PGVECTOR_POSTGRES_DB: ${PGVECTOR_POSTGRES_DB:-dify} - PGVECTOR_PGDATA: ${PGVECTOR_PGDATA:-/var/lib/postgresql/data/pgdata} - OPENSEARCH_DISCOVERY_TYPE: ${OPENSEARCH_DISCOVERY_TYPE:-single-node} - OPENSEARCH_BOOTSTRAP_MEMORY_LOCK: ${OPENSEARCH_BOOTSTRAP_MEMORY_LOCK:-true} - OPENSEARCH_JAVA_OPTS_MIN: ${OPENSEARCH_JAVA_OPTS_MIN:-512m} - OPENSEARCH_JAVA_OPTS_MAX: ${OPENSEARCH_JAVA_OPTS_MAX:-1024m} - OPENSEARCH_INITIAL_ADMIN_PASSWORD: ${OPENSEARCH_INITIAL_ADMIN_PASSWORD:-Qazwsxedc!@#123} - OPENSEARCH_MEMLOCK_SOFT: ${OPENSEARCH_MEMLOCK_SOFT:--1} - OPENSEARCH_MEMLOCK_HARD: ${OPENSEARCH_MEMLOCK_HARD:--1} - OPENSEARCH_NOFILE_SOFT: ${OPENSEARCH_NOFILE_SOFT:-65536} - OPENSEARCH_NOFILE_HARD: ${OPENSEARCH_NOFILE_HARD:-65536} - NGINX_SERVER_NAME: ${NGINX_SERVER_NAME:-_} - NGINX_HTTPS_ENABLED: ${NGINX_HTTPS_ENABLED:-false} - NGINX_PORT: ${NGINX_PORT:-80} - NGINX_SSL_PORT: ${NGINX_SSL_PORT:-443} - NGINX_SSL_CERT_FILENAME: ${NGINX_SSL_CERT_FILENAME:-dify.crt} - NGINX_SSL_CERT_KEY_FILENAME: ${NGINX_SSL_CERT_KEY_FILENAME:-dify.key} - NGINX_SSL_PROTOCOLS: ${NGINX_SSL_PROTOCOLS:-TLSv1.2 TLSv1.3} - NGINX_WORKER_PROCESSES: ${NGINX_WORKER_PROCESSES:-auto} - NGINX_CLIENT_MAX_BODY_SIZE: ${NGINX_CLIENT_MAX_BODY_SIZE:-100M} - NGINX_KEEPALIVE_TIMEOUT: ${NGINX_KEEPALIVE_TIMEOUT:-65} - NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s} - NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s} - NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false} - CERTBOT_EMAIL: ${CERTBOT_EMAIL:-your_email@example.com} - CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-your_domain.com} - CERTBOT_OPTIONS: ${CERTBOT_OPTIONS:-} - SSRF_HTTP_PORT: ${SSRF_HTTP_PORT:-3128} - SSRF_COREDUMP_DIR: ${SSRF_COREDUMP_DIR:-/var/spool/squid} - SSRF_REVERSE_PROXY_PORT: ${SSRF_REVERSE_PROXY_PORT:-8194} - SSRF_SANDBOX_HOST: ${SSRF_SANDBOX_HOST:-sandbox} - SSRF_DEFAULT_TIME_OUT: ${SSRF_DEFAULT_TIME_OUT:-5} - SSRF_DEFAULT_CONNECT_TIME_OUT: ${SSRF_DEFAULT_CONNECT_TIME_OUT:-5} - SSRF_DEFAULT_READ_TIME_OUT: ${SSRF_DEFAULT_READ_TIME_OUT:-5} - SSRF_DEFAULT_WRITE_TIME_OUT: ${SSRF_DEFAULT_WRITE_TIME_OUT:-5} - SSRF_POOL_MAX_CONNECTIONS: ${SSRF_POOL_MAX_CONNECTIONS:-100} - SSRF_POOL_MAX_KEEPALIVE_CONNECTIONS: ${SSRF_POOL_MAX_KEEPALIVE_CONNECTIONS:-20} - SSRF_POOL_KEEPALIVE_EXPIRY: ${SSRF_POOL_KEEPALIVE_EXPIRY:-5.0} - EXPOSE_NGINX_PORT: ${EXPOSE_NGINX_PORT:-80} - EXPOSE_NGINX_SSL_PORT: ${EXPOSE_NGINX_SSL_PORT:-443} - POSITION_TOOL_PINS: ${POSITION_TOOL_PINS:-} - POSITION_TOOL_INCLUDES: ${POSITION_TOOL_INCLUDES:-} - POSITION_TOOL_EXCLUDES: ${POSITION_TOOL_EXCLUDES:-} - POSITION_PROVIDER_PINS: ${POSITION_PROVIDER_PINS:-} - POSITION_PROVIDER_INCLUDES: ${POSITION_PROVIDER_INCLUDES:-} - POSITION_PROVIDER_EXCLUDES: ${POSITION_PROVIDER_EXCLUDES:-} - CSP_WHITELIST: ${CSP_WHITELIST:-} - CREATE_TIDB_SERVICE_JOB_ENABLED: ${CREATE_TIDB_SERVICE_JOB_ENABLED:-false} - MAX_SUBMIT_COUNT: ${MAX_SUBMIT_COUNT:-100} - TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-10} - DB_PLUGIN_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin} - EXPOSE_PLUGIN_DAEMON_PORT: ${EXPOSE_PLUGIN_DAEMON_PORT:-5002} - PLUGIN_DAEMON_PORT: ${PLUGIN_DAEMON_PORT:-5002} - PLUGIN_DAEMON_KEY: ${PLUGIN_DAEMON_KEY:-lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi} - PLUGIN_DAEMON_URL: ${PLUGIN_DAEMON_URL:-http://plugin_daemon:5002} - PLUGIN_MAX_PACKAGE_SIZE: ${PLUGIN_MAX_PACKAGE_SIZE:-52428800} - PLUGIN_MODEL_SCHEMA_CACHE_TTL: ${PLUGIN_MODEL_SCHEMA_CACHE_TTL:-3600} - PLUGIN_PPROF_ENABLED: ${PLUGIN_PPROF_ENABLED:-false} - PLUGIN_DEBUGGING_HOST: ${PLUGIN_DEBUGGING_HOST:-0.0.0.0} - PLUGIN_DEBUGGING_PORT: ${PLUGIN_DEBUGGING_PORT:-5003} - EXPOSE_PLUGIN_DEBUGGING_HOST: ${EXPOSE_PLUGIN_DEBUGGING_HOST:-localhost} - EXPOSE_PLUGIN_DEBUGGING_PORT: ${EXPOSE_PLUGIN_DEBUGGING_PORT:-5003} - PLUGIN_DIFY_INNER_API_KEY: ${PLUGIN_DIFY_INNER_API_KEY:-QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1} - PLUGIN_DIFY_INNER_API_URL: ${PLUGIN_DIFY_INNER_API_URL:-http://api:5001} - ENDPOINT_URL_TEMPLATE: ${ENDPOINT_URL_TEMPLATE:-http://localhost/e/{hook_id}} - MARKETPLACE_ENABLED: ${MARKETPLACE_ENABLED:-true} - MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai} - CREATORS_PLATFORM_FEATURES_ENABLED: ${CREATORS_PLATFORM_FEATURES_ENABLED:-true} - CREATORS_PLATFORM_API_URL: ${CREATORS_PLATFORM_API_URL:-https://creators.dify.ai} - CREATORS_PLATFORM_OAUTH_CLIENT_ID: ${CREATORS_PLATFORM_OAUTH_CLIENT_ID:-} - FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true} - ENFORCE_LANGGENIUS_PLUGIN_SIGNATURES: ${ENFORCE_LANGGENIUS_PLUGIN_SIGNATURES:-true} - PLUGIN_STDIO_BUFFER_SIZE: ${PLUGIN_STDIO_BUFFER_SIZE:-1024} - PLUGIN_STDIO_MAX_BUFFER_SIZE: ${PLUGIN_STDIO_MAX_BUFFER_SIZE:-5242880} - PLUGIN_PYTHON_ENV_INIT_TIMEOUT: ${PLUGIN_PYTHON_ENV_INIT_TIMEOUT:-120} - PLUGIN_MAX_EXECUTION_TIMEOUT: ${PLUGIN_MAX_EXECUTION_TIMEOUT:-600} - PLUGIN_DAEMON_TIMEOUT: ${PLUGIN_DAEMON_TIMEOUT:-600.0} - PIP_MIRROR_URL: ${PIP_MIRROR_URL:-} - PLUGIN_STORAGE_TYPE: ${PLUGIN_STORAGE_TYPE:-local} - PLUGIN_STORAGE_LOCAL_ROOT: ${PLUGIN_STORAGE_LOCAL_ROOT:-/app/storage} - PLUGIN_WORKING_PATH: ${PLUGIN_WORKING_PATH:-/app/storage/cwd} - PLUGIN_INSTALLED_PATH: ${PLUGIN_INSTALLED_PATH:-plugin} - PLUGIN_PACKAGE_CACHE_PATH: ${PLUGIN_PACKAGE_CACHE_PATH:-plugin_packages} - PLUGIN_MEDIA_CACHE_PATH: ${PLUGIN_MEDIA_CACHE_PATH:-assets} - PLUGIN_STORAGE_OSS_BUCKET: ${PLUGIN_STORAGE_OSS_BUCKET:-} - PLUGIN_S3_USE_AWS: ${PLUGIN_S3_USE_AWS:-false} - PLUGIN_S3_USE_AWS_MANAGED_IAM: ${PLUGIN_S3_USE_AWS_MANAGED_IAM:-false} - PLUGIN_S3_ENDPOINT: ${PLUGIN_S3_ENDPOINT:-} - PLUGIN_S3_USE_PATH_STYLE: ${PLUGIN_S3_USE_PATH_STYLE:-false} - PLUGIN_AWS_ACCESS_KEY: ${PLUGIN_AWS_ACCESS_KEY:-} - PLUGIN_AWS_SECRET_KEY: ${PLUGIN_AWS_SECRET_KEY:-} - PLUGIN_AWS_REGION: ${PLUGIN_AWS_REGION:-} - PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME: ${PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME:-} - PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING: ${PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING:-} - PLUGIN_TENCENT_COS_SECRET_KEY: ${PLUGIN_TENCENT_COS_SECRET_KEY:-} - PLUGIN_TENCENT_COS_SECRET_ID: ${PLUGIN_TENCENT_COS_SECRET_ID:-} - PLUGIN_TENCENT_COS_REGION: ${PLUGIN_TENCENT_COS_REGION:-} - PLUGIN_ALIYUN_OSS_REGION: ${PLUGIN_ALIYUN_OSS_REGION:-} - PLUGIN_ALIYUN_OSS_ENDPOINT: ${PLUGIN_ALIYUN_OSS_ENDPOINT:-} - PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID:-} - PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET: ${PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET:-} - PLUGIN_ALIYUN_OSS_AUTH_VERSION: ${PLUGIN_ALIYUN_OSS_AUTH_VERSION:-v4} - PLUGIN_ALIYUN_OSS_PATH: ${PLUGIN_ALIYUN_OSS_PATH:-} - PLUGIN_VOLCENGINE_TOS_ENDPOINT: ${PLUGIN_VOLCENGINE_TOS_ENDPOINT:-} - PLUGIN_VOLCENGINE_TOS_ACCESS_KEY: ${PLUGIN_VOLCENGINE_TOS_ACCESS_KEY:-} - PLUGIN_VOLCENGINE_TOS_SECRET_KEY: ${PLUGIN_VOLCENGINE_TOS_SECRET_KEY:-} - PLUGIN_VOLCENGINE_TOS_REGION: ${PLUGIN_VOLCENGINE_TOS_REGION:-} - ENABLE_OTEL: ${ENABLE_OTEL:-false} - OTLP_TRACE_ENDPOINT: ${OTLP_TRACE_ENDPOINT:-} - OTLP_METRIC_ENDPOINT: ${OTLP_METRIC_ENDPOINT:-} - OTLP_BASE_ENDPOINT: ${OTLP_BASE_ENDPOINT:-http://localhost:4318} - OTLP_API_KEY: ${OTLP_API_KEY:-} - OTEL_EXPORTER_OTLP_PROTOCOL: ${OTEL_EXPORTER_OTLP_PROTOCOL:-} - OTEL_EXPORTER_TYPE: ${OTEL_EXPORTER_TYPE:-otlp} - OTEL_SAMPLING_RATE: ${OTEL_SAMPLING_RATE:-0.1} - OTEL_BATCH_EXPORT_SCHEDULE_DELAY: ${OTEL_BATCH_EXPORT_SCHEDULE_DELAY:-5000} - OTEL_MAX_QUEUE_SIZE: ${OTEL_MAX_QUEUE_SIZE:-2048} - OTEL_MAX_EXPORT_BATCH_SIZE: ${OTEL_MAX_EXPORT_BATCH_SIZE:-512} - OTEL_METRIC_EXPORT_INTERVAL: ${OTEL_METRIC_EXPORT_INTERVAL:-60000} - OTEL_BATCH_EXPORT_TIMEOUT: ${OTEL_BATCH_EXPORT_TIMEOUT:-10000} - OTEL_METRIC_EXPORT_TIMEOUT: ${OTEL_METRIC_EXPORT_TIMEOUT:-30000} - ALLOW_EMBED: ${ALLOW_EMBED:-false} - QUEUE_MONITOR_THRESHOLD: ${QUEUE_MONITOR_THRESHOLD:-200} - QUEUE_MONITOR_ALERT_EMAILS: ${QUEUE_MONITOR_ALERT_EMAILS:-} - QUEUE_MONITOR_INTERVAL: ${QUEUE_MONITOR_INTERVAL:-30} - SWAGGER_UI_ENABLED: ${SWAGGER_UI_ENABLED:-false} - SWAGGER_UI_PATH: ${SWAGGER_UI_PATH:-/swagger-ui.html} - DSL_EXPORT_ENCRYPT_DATASET_ID: ${DSL_EXPORT_ENCRYPT_DATASET_ID:-true} - DATASET_MAX_SEGMENTS_PER_REQUEST: ${DATASET_MAX_SEGMENTS_PER_REQUEST:-0} - ENABLE_CLEAN_EMBEDDING_CACHE_TASK: ${ENABLE_CLEAN_EMBEDDING_CACHE_TASK:-false} - ENABLE_CLEAN_UNUSED_DATASETS_TASK: ${ENABLE_CLEAN_UNUSED_DATASETS_TASK:-false} - ENABLE_CREATE_TIDB_SERVERLESS_TASK: ${ENABLE_CREATE_TIDB_SERVERLESS_TASK:-false} - ENABLE_UPDATE_TIDB_SERVERLESS_STATUS_TASK: ${ENABLE_UPDATE_TIDB_SERVERLESS_STATUS_TASK:-false} - ENABLE_CLEAN_MESSAGES: ${ENABLE_CLEAN_MESSAGES:-false} - ENABLE_WORKFLOW_RUN_CLEANUP_TASK: ${ENABLE_WORKFLOW_RUN_CLEANUP_TASK:-false} - ENABLE_MAIL_CLEAN_DOCUMENT_NOTIFY_TASK: ${ENABLE_MAIL_CLEAN_DOCUMENT_NOTIFY_TASK:-false} - ENABLE_DATASETS_QUEUE_MONITOR: ${ENABLE_DATASETS_QUEUE_MONITOR:-false} - ENABLE_CHECK_UPGRADABLE_PLUGIN_TASK: ${ENABLE_CHECK_UPGRADABLE_PLUGIN_TASK:-true} - ENABLE_WORKFLOW_SCHEDULE_POLLER_TASK: ${ENABLE_WORKFLOW_SCHEDULE_POLLER_TASK:-true} - WORKFLOW_SCHEDULE_POLLER_INTERVAL: ${WORKFLOW_SCHEDULE_POLLER_INTERVAL:-1} - WORKFLOW_SCHEDULE_POLLER_BATCH_SIZE: ${WORKFLOW_SCHEDULE_POLLER_BATCH_SIZE:-100} - WORKFLOW_SCHEDULE_MAX_DISPATCH_PER_TICK: ${WORKFLOW_SCHEDULE_MAX_DISPATCH_PER_TICK:-0} - TENANT_ISOLATED_TASK_CONCURRENCY: ${TENANT_ISOLATED_TASK_CONCURRENCY:-1} - ANNOTATION_IMPORT_FILE_SIZE_LIMIT: ${ANNOTATION_IMPORT_FILE_SIZE_LIMIT:-2} - ANNOTATION_IMPORT_MAX_RECORDS: ${ANNOTATION_IMPORT_MAX_RECORDS:-10000} - ANNOTATION_IMPORT_MIN_RECORDS: ${ANNOTATION_IMPORT_MIN_RECORDS:-1} - ANNOTATION_IMPORT_RATE_LIMIT_PER_MINUTE: ${ANNOTATION_IMPORT_RATE_LIMIT_PER_MINUTE:-5} - ANNOTATION_IMPORT_RATE_LIMIT_PER_HOUR: ${ANNOTATION_IMPORT_RATE_LIMIT_PER_HOUR:-20} - ANNOTATION_IMPORT_MAX_CONCURRENT: ${ANNOTATION_IMPORT_MAX_CONCURRENT:-5} - AMPLITUDE_API_KEY: ${AMPLITUDE_API_KEY:-} - SANDBOX_EXPIRED_RECORDS_CLEAN_GRACEFUL_PERIOD: ${SANDBOX_EXPIRED_RECORDS_CLEAN_GRACEFUL_PERIOD:-21} - SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_SIZE: ${SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_SIZE:-1000} - SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_MAX_INTERVAL: ${SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_MAX_INTERVAL:-200} - SANDBOX_EXPIRED_RECORDS_RETENTION_DAYS: ${SANDBOX_EXPIRED_RECORDS_RETENTION_DAYS:-30} - EVENT_BUS_REDIS_URL: ${EVENT_BUS_REDIS_URL:-} - EVENT_BUS_REDIS_CHANNEL_TYPE: ${EVENT_BUS_REDIS_CHANNEL_TYPE:-pubsub} - EVENT_BUS_REDIS_USE_CLUSTERS: ${EVENT_BUS_REDIS_USE_CLUSTERS:-false} - ENABLE_HUMAN_INPUT_TIMEOUT_TASK: ${ENABLE_HUMAN_INPUT_TIMEOUT_TASK:-true} - HUMAN_INPUT_TIMEOUT_TASK_INTERVAL: ${HUMAN_INPUT_TIMEOUT_TASK_INTERVAL:-1} - SANDBOX_EXPIRED_RECORDS_CLEAN_TASK_LOCK_TTL: ${SANDBOX_EXPIRED_RECORDS_CLEAN_TASK_LOCK_TTL:-90000} +# Shared configuration using YAML anchors and env_file +x-shared-api-worker-config: &shared-api-worker-config + env_file: + - path: ./envs/core-services/shared.env + required: false + - path: ./envs/core-services/api.env + required: false + - path: ./envs/security.env + required: false + - path: ./envs/databases/db-postgres.env + required: false + - path: ./envs/databases/db-mysql.env + required: false + - path: ./envs/databases/redis.env + required: false + - path: ./envs/vectorstores/weaviate.env + required: false + - path: ./envs/vectorstores/qdrant.env + required: false + - path: ./envs/vectorstores/oceanbase.env + required: false + - path: ./envs/vectorstores/seekdb.env + required: false + - path: ./envs/vectorstores/couchbase.env + required: false + - path: ./envs/vectorstores/pgvector.env + required: false + - path: ./envs/vectorstores/vastbase.env + required: false + - path: ./envs/vectorstores/pgvecto-rs.env + required: false + - path: ./envs/vectorstores/chroma.env + required: false + - path: ./envs/vectorstores/iris.env + required: false + - path: ./envs/vectorstores/oracle.env + required: false + - path: ./envs/vectorstores/opengauss.env + required: false + - path: ./envs/vectorstores/myscale.env + required: false + - path: ./envs/vectorstores/matrixone.env + required: false + - path: ./envs/vectorstores/elasticsearch.env + required: false + - path: ./envs/vectorstores/opensearch.env + required: false + - path: ./envs/vectorstores/milvus.env + required: false + - path: ./envs/infrastructure/nginx.env + required: false + - path: ./envs/infrastructure/certbot.env + required: false + - path: ./envs/infrastructure/ssrf-proxy.env + required: false + - path: ./envs/infrastructure/etcd.env + required: false + - path: ./envs/infrastructure/minio.env + required: false + - path: ./envs/infrastructure/milvus-standalone.env + required: false + - ./.env + networks: + - ssrf_proxy_network + - default + restart: always + +x-shared-worker-config: &shared-worker-config + env_file: + - path: ./envs/core-services/shared.env + required: false + - path: ./envs/core-services/worker.env + required: false + - path: ./envs/security.env + required: false + - path: ./envs/databases/db-postgres.env + required: false + - path: ./envs/databases/db-mysql.env + required: false + - path: ./envs/databases/redis.env + required: false + - path: ./envs/vectorstores/weaviate.env + required: false + - path: ./envs/vectorstores/qdrant.env + required: false + - path: ./envs/vectorstores/oceanbase.env + required: false + - path: ./envs/vectorstores/seekdb.env + required: false + - path: ./envs/vectorstores/couchbase.env + required: false + - path: ./envs/vectorstores/pgvector.env + required: false + - path: ./envs/vectorstores/vastbase.env + required: false + - path: ./envs/vectorstores/pgvecto-rs.env + required: false + - path: ./envs/vectorstores/chroma.env + required: false + - path: ./envs/vectorstores/iris.env + required: false + - path: ./envs/vectorstores/oracle.env + required: false + - path: ./envs/vectorstores/opengauss.env + required: false + - path: ./envs/vectorstores/myscale.env + required: false + - path: ./envs/vectorstores/matrixone.env + required: false + - path: ./envs/vectorstores/elasticsearch.env + required: false + - path: ./envs/vectorstores/opensearch.env + required: false + - path: ./envs/vectorstores/milvus.env + required: false + - path: ./envs/infrastructure/nginx.env + required: false + - path: ./envs/infrastructure/certbot.env + required: false + - path: ./envs/infrastructure/ssrf-proxy.env + required: false + - path: ./envs/infrastructure/etcd.env + required: false + - path: ./envs/infrastructure/minio.env + required: false + - path: ./envs/infrastructure/milvus-standalone.env + required: false + - ./.env + networks: + - ssrf_proxy_network + - default + restart: always + +x-shared-worker-beat-config: &shared-worker-beat-config + env_file: + - path: ./envs/core-services/shared.env + required: false + - path: ./envs/core-services/worker-beat.env + required: false + - path: ./envs/security.env + required: false + - path: ./envs/databases/db-postgres.env + required: false + - path: ./envs/databases/db-mysql.env + required: false + - path: ./envs/databases/redis.env + required: false + - path: ./envs/vectorstores/weaviate.env + required: false + - path: ./envs/vectorstores/qdrant.env + required: false + - path: ./envs/vectorstores/oceanbase.env + required: false + - path: ./envs/vectorstores/seekdb.env + required: false + - path: ./envs/vectorstores/couchbase.env + required: false + - path: ./envs/vectorstores/pgvector.env + required: false + - path: ./envs/vectorstores/vastbase.env + required: false + - path: ./envs/vectorstores/pgvecto-rs.env + required: false + - path: ./envs/vectorstores/chroma.env + required: false + - path: ./envs/vectorstores/iris.env + required: false + - path: ./envs/vectorstores/oracle.env + required: false + - path: ./envs/vectorstores/opengauss.env + required: false + - path: ./envs/vectorstores/myscale.env + required: false + - path: ./envs/vectorstores/matrixone.env + required: false + - path: ./envs/vectorstores/elasticsearch.env + required: false + - path: ./envs/vectorstores/opensearch.env + required: false + - path: ./envs/vectorstores/milvus.env + required: false + - path: ./envs/infrastructure/nginx.env + required: false + - path: ./envs/infrastructure/certbot.env + required: false + - path: ./envs/infrastructure/ssrf-proxy.env + required: false + - path: ./envs/infrastructure/etcd.env + required: false + - path: ./envs/infrastructure/minio.env + required: false + - path: ./envs/infrastructure/milvus-standalone.env + required: false + - ./.env + networks: + - ssrf_proxy_network + - default + restart: always services: # Init container to fix permissions @@ -745,12 +225,9 @@ services: # API service api: - image: langgenius/dify-api:1.13.3 - restart: always + <<: *shared-api-worker-config + image: langgenius/dify-api:1.14.0 environment: - # Use the shared environment variables. - <<: *shared-api-worker-env - # Startup mode, 'api' starts the API server. MODE: api SENTRY_DSN: ${API_SENTRY_DSN:-} SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} @@ -790,15 +267,37 @@ services: - ssrf_proxy_network - default + # WebSocket service for workflow collaboration. + api_websocket: + <<: *shared-api-worker-config + image: langgenius/dify-api:1.14.0 + profiles: + - collaboration + environment: + MODE: api + SERVER_WORKER_AMOUNT: 1 + SERVER_WORKER_CLASS: ${API_WEBSOCKET_WORKER_CLASS:-geventwebsocket.gunicorn.workers.GeventWebSocketWorker} + SERVER_WORKER_CONNECTIONS: ${API_WEBSOCKET_WORKER_CONNECTIONS:-1000} + GUNICORN_TIMEOUT: ${API_WEBSOCKET_GUNICORN_TIMEOUT:-360} + depends_on: + db_postgres: + condition: service_healthy + required: false + db_mysql: + condition: service_healthy + required: false + redis: + condition: service_started + networks: + - ssrf_proxy_network + - default + # worker service # The Celery worker for processing all queues (dataset, workflow, mail, etc.) worker: - image: langgenius/dify-api:1.13.3 - restart: always + <<: *shared-worker-config + image: langgenius/dify-api:1.14.0 environment: - # Use the shared environment variables. - <<: *shared-api-worker-env - # Startup mode, 'worker' starts the Celery worker for processing all queues. MODE: worker SENTRY_DSN: ${API_SENTRY_DSN:-} SENTRY_TRACES_SAMPLE_RATE: ${API_SENTRY_TRACES_SAMPLE_RATE:-1.0} @@ -839,12 +338,9 @@ services: # worker_beat service # Celery beat for scheduling periodic tasks. worker_beat: - image: langgenius/dify-api:1.13.3 - restart: always + <<: *shared-worker-beat-config + image: langgenius/dify-api:1.14.0 environment: - # Use the shared environment variables. - <<: *shared-api-worker-env - # Startup mode, 'worker_beat' starts the Celery beat for scheduling periodic tasks. MODE: beat depends_on: init_permissions: @@ -876,8 +372,14 @@ services: # Frontend web application. web: - image: langgenius/dify-web:1.13.3 + image: langgenius/dify-web:1.14.0 restart: always + env_file: + - path: ./envs/core-services/web.env + required: false + - path: ./envs/security.env + required: false + - ./.env environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} APP_API_URL: ${APP_API_URL:-} @@ -894,8 +396,8 @@ services: ALLOW_UNSAFE_DATA_SCHEME: ${ALLOW_UNSAFE_DATA_SCHEME:-false} MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai} MARKETPLACE_URL: ${MARKETPLACE_URL:-https://marketplace.dify.ai} - TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-} - INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-} + TOP_K_MAX_VALUE: ${TOP_K_MAX_VALUE:-10} + INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH: ${INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH:-4000} LOOP_NODE_MAX_COUNT: ${LOOP_NODE_MAX_COUNT:-100} MAX_TOOLS_NUM: ${MAX_TOOLS_NUM:-10} MAX_PARALLEL_LIMIT: ${MAX_PARALLEL_LIMIT:-10} @@ -952,7 +454,7 @@ services: MYSQL_ROOT_PASSWORD: ${DB_PASSWORD:-difyai123456} MYSQL_DATABASE: ${DB_DATABASE:-dify} command: > - --max_connections=1000 + --max_connections=${MYSQL_MAX_CONNECTIONS:-1000} --innodb_buffer_pool_size=${MYSQL_INNODB_BUFFER_POOL_SIZE:-512M} --innodb_log_file_size=${MYSQL_INNODB_LOG_FILE_SIZE:-128M} --innodb_flush_log_at_trx_commit=${MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT:-2} @@ -992,8 +494,14 @@ services: # The DifySandbox sandbox: - image: langgenius/dify-sandbox:0.2.14 + image: langgenius/dify-sandbox:0.2.15 restart: always + env_file: + - path: ./envs/core-services/sandbox.env + required: false + - path: ./envs/security.env + required: false + - ./.env environment: # The DifySandbox configurations # Make sure you are changing this key for your deployment with a strong key. @@ -1016,11 +524,26 @@ services: # plugin daemon plugin_daemon: - image: langgenius/dify-plugin-daemon:0.5.3-local + image: langgenius/dify-plugin-daemon:0.6.0-local restart: always + env_file: + - path: ./envs/core-services/shared.env + required: false + - path: ./envs/core-services/plugin-daemon.env + required: false + - path: ./envs/security.env + required: false + - path: ./envs/databases/db-postgres.env + required: false + - path: ./envs/databases/db-mysql.env + required: false + - path: ./envs/databases/redis.env + required: false + - ./.env + networks: + - ssrf_proxy_network + - default environment: - # Use the shared environment variables. - <<: *shared-api-worker-env DB_DATABASE: ${DB_PLUGIN_DATABASE:-dify_plugin} DB_SSL_MODE: ${DB_SSL_MODE:-disable} SERVER_PORT: ${PLUGIN_DAEMON_PORT:-5002} @@ -1126,8 +649,8 @@ services: - ./certbot/update-cert.template.txt:/update-cert.template.txt - ./certbot/docker-entrypoint.sh:/docker-entrypoint.sh environment: - - CERTBOT_EMAIL=${CERTBOT_EMAIL} - - CERTBOT_DOMAIN=${CERTBOT_DOMAIN} + - CERTBOT_EMAIL=${CERTBOT_EMAIL:-} + - CERTBOT_DOMAIN=${CERTBOT_DOMAIN:-} - CERTBOT_OPTIONS=${CERTBOT_OPTIONS:-} entrypoint: ["/docker-entrypoint.sh"] command: ["tail", "-f", "/dev/null"] @@ -1169,6 +692,7 @@ services: NGINX_PROXY_READ_TIMEOUT: ${NGINX_PROXY_READ_TIMEOUT:-3600s} NGINX_PROXY_SEND_TIMEOUT: ${NGINX_PROXY_SEND_TIMEOUT:-3600s} NGINX_ENABLE_CERTBOT_CHALLENGE: ${NGINX_ENABLE_CERTBOT_CHALLENGE:-false} + NGINX_SOCKET_IO_UPSTREAM: ${NGINX_SOCKET_IO_UPSTREAM:-api_websocket:5001} CERTBOT_DOMAIN: ${CERTBOT_DOMAIN:-} depends_on: - api diff --git a/docker/envs/core-services/api.env.example b/docker/envs/core-services/api.env.example new file mode 100644 index 0000000000..1a3fc7a4ab --- /dev/null +++ b/docker/envs/core-services/api.env.example @@ -0,0 +1,13 @@ +# ------------------------------ +# Api Configuration +# ------------------------------ + +MODE=api +SENTRY_DSN= +SENTRY_TRACES_SAMPLE_RATE=1.0 +SENTRY_PROFILES_SAMPLE_RATE=1.0 +PLUGIN_REMOTE_INSTALL_HOST=localhost +PLUGIN_REMOTE_INSTALL_PORT=5003 +PLUGIN_MAX_PACKAGE_SIZE=52428800 +PLUGIN_DAEMON_TIMEOUT=600.0 +INNER_API_KEY_FOR_PLUGIN=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1 diff --git a/docker/envs/core-services/plugin-daemon.env.example b/docker/envs/core-services/plugin-daemon.env.example new file mode 100644 index 0000000000..c3b1bef974 --- /dev/null +++ b/docker/envs/core-services/plugin-daemon.env.example @@ -0,0 +1,23 @@ +# ------------------------------ +# Plugin Daemon Configuration +# ------------------------------ + +DB_PLUGIN_DATABASE=dify_plugin +PLUGIN_DAEMON_URL=http://plugin_daemon:5002 +PLUGIN_PPROF_ENABLED=false +PLUGIN_DIFY_INNER_API_URL=http://api:5001 +FORCE_VERIFYING_SIGNATURE=true +PLUGIN_STDIO_BUFFER_SIZE=1024 +PLUGIN_STDIO_MAX_BUFFER_SIZE=5242880 +PLUGIN_PYTHON_ENV_INIT_TIMEOUT=120 +PLUGIN_MAX_EXECUTION_TIMEOUT=600 +PLUGIN_DEBUGGING_HOST=0.0.0.0 +PLUGIN_DEBUGGING_PORT=5003 +PLUGIN_DAEMON_KEY=lYkiYYT6owG+71oLerGzA7GXCgOT++6ovaezWAjpCjf+Sjc3ZtU+qUEi +PLUGIN_DIFY_INNER_API_KEY=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1 +PLUGIN_DAEMON_PORT=5002 +CELERY_WORKER_CLASS= +PLUGIN_STORAGE_TYPE=local +PLUGIN_STORAGE_LOCAL_ROOT=/app/storage +PLUGIN_WORKING_PATH=/app/storage/cwd +PLUGIN_STORAGE_OSS_BUCKET= diff --git a/docker/envs/core-services/sandbox.env.example b/docker/envs/core-services/sandbox.env.example new file mode 100644 index 0000000000..5d4ee6614b --- /dev/null +++ b/docker/envs/core-services/sandbox.env.example @@ -0,0 +1,17 @@ +# ------------------------------ +# Sandbox Configuration +# ------------------------------ + +SANDBOX_HTTP_PROXY=http://ssrf_proxy:3128 +SANDBOX_HTTPS_PROXY=http://ssrf_proxy:3128 +SANDBOX_PORT=8194 +PIP_MIRROR_URL= +SANDBOX_API_KEY=dify-sandbox +SANDBOX_GIN_MODE=release +SANDBOX_WORKER_TIMEOUT=15 +SANDBOX_ENABLE_NETWORK=true +SANDBOX_EXPIRED_RECORDS_CLEAN_GRACEFUL_PERIOD=21 +SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_SIZE=1000 +SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_MAX_INTERVAL=200 +SANDBOX_EXPIRED_RECORDS_RETENTION_DAYS=30 +SANDBOX_EXPIRED_RECORDS_CLEAN_TASK_LOCK_TTL=90000 diff --git a/docker/envs/core-services/shared.env.example b/docker/envs/core-services/shared.env.example new file mode 100644 index 0000000000..80cfe42c38 --- /dev/null +++ b/docker/envs/core-services/shared.env.example @@ -0,0 +1,475 @@ +# ------------------------------ +# Shared API/Worker Configuration +# ------------------------------ + +CONSOLE_WEB_URL= +SERVICE_API_URL= +TRIGGER_URL=http://localhost +APP_WEB_URL= +FILES_URL= +INTERNAL_FILES_URL= +LANG=C.UTF-8 +LC_ALL=C.UTF-8 +PYTHONIOENCODING=utf-8 +UV_CACHE_DIR=/tmp/.uv-cache +CHECK_UPDATE_URL=https://updates.dify.ai +OPENAI_API_BASE=https://api.openai.com/v1 +MIGRATION_ENABLED=true +FILES_ACCESS_TIMEOUT=300 +# Remove `collaboration` from COMPOSE_PROFILES to stop the dedicated websocket service. +ENABLE_COLLABORATION_MODE=true +CELERY_BROKER_URL=redis://:difyai123456@redis:6379/1 +CELERY_TASK_ANNOTATIONS=null +AZURE_BLOB_ACCOUNT_URL=https://.blob.core.windows.net +SUPABASE_URL=your-server-url +TIDB_ON_QDRANT_URL=http://127.0.0.1 +TIDB_ON_QDRANT_API_KEY=dify +TIDB_API_URL=http://127.0.0.1 +TIDB_IAM_API_URL=http://127.0.0.1 +TIDB_REGION=regions/aws-us-east-1 +TIDB_PROJECT_ID=dify +TIDB_SPEND_LIMIT=100 +TENCENT_VECTOR_DB_URL=http://127.0.0.1 +TENCENT_VECTOR_DB_API_KEY=dify +LINDORM_URL=http://localhost:30070 +LINDORM_USERNAME=admin +UPSTASH_VECTOR_URL=https://xxx-vector.upstash.io +UPLOAD_FILE_SIZE_LIMIT=15 +UPLOAD_FILE_BATCH_LIMIT=5 +UPLOAD_FILE_EXTENSION_BLACKLIST= +SINGLE_CHUNK_ATTACHMENT_LIMIT=10 +IMAGE_FILE_BATCH_LIMIT=10 +ATTACHMENT_IMAGE_FILE_SIZE_LIMIT=2 +ATTACHMENT_IMAGE_DOWNLOAD_TIMEOUT=60 +ETL_TYPE=dify +UNSTRUCTURED_API_URL= +MULTIMODAL_SEND_FORMAT=base64 +UPLOAD_IMAGE_FILE_SIZE_LIMIT=10 +UPLOAD_VIDEO_FILE_SIZE_LIMIT=100 +UPLOAD_AUDIO_FILE_SIZE_LIMIT=50 +API_SENTRY_DSN= +API_SENTRY_TRACES_SAMPLE_RATE=1.0 +API_SENTRY_PROFILES_SAMPLE_RATE=1.0 +WEB_SENTRY_DSN= +PLUGIN_SENTRY_ENABLED=false +PLUGIN_SENTRY_DSN= +NOTION_INTEGRATION_TYPE=public +RESEND_API_URL=https://api.resend.com +SSRF_PROXY_HTTP_URL=http://ssrf_proxy:3128 +SSRF_PROXY_HTTPS_URL=http://ssrf_proxy:3128 +PGDATA=/var/lib/postgresql/data/pgdata +PLUGIN_MAX_PACKAGE_SIZE=52428800 +PLUGIN_MODEL_SCHEMA_CACHE_TTL=3600 +ENDPOINT_URL_TEMPLATE=http://localhost/e/{hook_id} +LOG_LEVEL=INFO +LOG_OUTPUT_FORMAT=text +LOG_FILE=/app/logs/server.log +LOG_FILE_MAX_SIZE=20 +LOG_FILE_BACKUP_COUNT=5 +LOG_DATEFORMAT=%Y-%m-%d %H:%M:%S +LOG_TZ=UTC +DEBUG=false +FLASK_DEBUG=false +ENABLE_REQUEST_LOGGING=False +OPS_TRACE_RETRYABLE_DISPATCH_MAX_RETRIES=60 +OPS_TRACE_RETRYABLE_DISPATCH_DELAY_SECONDS=5 +WORKFLOW_LOG_CLEANUP_ENABLED=false +WORKFLOW_LOG_RETENTION_DAYS=30 +WORKFLOW_LOG_CLEANUP_BATCH_SIZE=100 +WORKFLOW_LOG_CLEANUP_SPECIFIC_WORKFLOW_IDS= +EXPOSE_PLUGIN_DEBUGGING_HOST=localhost +EXPOSE_PLUGIN_DEBUGGING_PORT=5003 +DEPLOY_ENV=PRODUCTION +ACCESS_TOKEN_EXPIRE_MINUTES=60 +REFRESH_TOKEN_EXPIRE_DAYS=30 +APP_DEFAULT_ACTIVE_REQUESTS=0 +APP_MAX_ACTIVE_REQUESTS=0 +APP_MAX_EXECUTION_TIME=1200 +DIFY_BIND_ADDRESS=0.0.0.0 +DIFY_PORT=5001 +SERVER_WORKER_AMOUNT=1 +SERVER_WORKER_CLASS=gevent +SERVER_WORKER_CONNECTIONS=10 +API_WEBSOCKET_WORKER_CLASS=geventwebsocket.gunicorn.workers.GeventWebSocketWorker +API_WEBSOCKET_WORKER_CONNECTIONS=1000 +API_WEBSOCKET_GUNICORN_TIMEOUT=360 +CELERY_SENTINEL_PASSWORD= +S3_ACCESS_KEY= +S3_SECRET_KEY= +ARCHIVE_STORAGE_ACCESS_KEY= +ARCHIVE_STORAGE_SECRET_KEY= +AZURE_BLOB_ACCOUNT_KEY=difyai +ALIYUN_OSS_ACCESS_KEY=your-access-key +ALIYUN_OSS_SECRET_KEY=your-secret-key +TENCENT_COS_SECRET_KEY=your-secret-key +TENCENT_COS_SECRET_ID=your-secret-id +OCI_ACCESS_KEY=your-access-key +OCI_SECRET_KEY=your-secret-key +HUAWEI_OBS_SECRET_KEY=your-secret-key +HUAWEI_OBS_ACCESS_KEY=your-access-key +VOLCENGINE_TOS_SECRET_KEY=your-secret-key +VOLCENGINE_TOS_ACCESS_KEY=your-access-key +BAIDU_OBS_SECRET_KEY=your-secret-key +BAIDU_OBS_ACCESS_KEY=your-access-key +SUPABASE_API_KEY=your-access-key +ALIBABACLOUD_MYSQL_PASSWORD=difyai123456 +RELYT_PASSWORD=difyai123456 +LINDORM_PASSWORD=admin +LINDORM_USING_UGC=True +LINDORM_QUERY_TIMEOUT=1 +HUAWEI_CLOUD_PASSWORD=admin +UPSTASH_VECTOR_TOKEN=dify +TABLESTORE_ACCESS_KEY_ID=xxx +TABLESTORE_ACCESS_KEY_SECRET=xxx +TABLESTORE_NORMALIZE_FULLTEXT_BM25_SCORE=false +CLICKZETTA_PASSWORD= +CLICKZETTA_INSTANCE= +CLICKZETTA_SERVICE=api.clickzetta.com +CLICKZETTA_WORKSPACE=quick_start +CLICKZETTA_VCLUSTER=default_ap +CLICKZETTA_SCHEMA=dify +CLICKZETTA_BATCH_SIZE=100 +CLICKZETTA_ENABLE_INVERTED_INDEX=true +CLICKZETTA_ANALYZER_TYPE=chinese +CLICKZETTA_ANALYZER_MODE=smart +UNSTRUCTURED_API_KEY= +SCARF_NO_ANALYTICS=true +PLUGIN_BASED_TOKEN_COUNTING_ENABLED=false +NOTION_CLIENT_SECRET= +NOTION_CLIENT_ID= +NOTION_INTERNAL_SECRET= +MAIL_TYPE=resend +MAIL_DEFAULT_SEND_FROM= +RESEND_API_KEY=your-resend-api-key +SMTP_SERVER= +SMTP_PORT=465 +SMTP_USERNAME= +SMTP_PASSWORD= +SMTP_USE_TLS=true +SMTP_OPPORTUNISTIC_TLS=false +SMTP_LOCAL_HOSTNAME= +SENDGRID_API_KEY= +INVITE_EXPIRY_HOURS=72 +RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5 +EMAIL_REGISTER_TOKEN_EXPIRY_MINUTES=5 +CHANGE_EMAIL_TOKEN_EXPIRY_MINUTES=5 +OWNER_TRANSFER_TOKEN_EXPIRY_MINUTES=5 +CODE_EXECUTION_ENDPOINT=http://sandbox:8194 +CODE_EXECUTION_API_KEY=dify-sandbox +CODE_EXECUTION_SSL_VERIFY=True +CODE_EXECUTION_POOL_MAX_CONNECTIONS=100 +CODE_EXECUTION_POOL_MAX_KEEPALIVE_CONNECTIONS=20 +CODE_EXECUTION_POOL_KEEPALIVE_EXPIRY=5.0 +CODE_MAX_NUMBER=9223372036854775807 +CODE_MIN_NUMBER=-9223372036854775808 +CODE_MAX_DEPTH=5 +CODE_MAX_PRECISION=20 +CODE_MAX_STRING_LENGTH=400000 +CODE_MAX_STRING_ARRAY_LENGTH=30 +CODE_MAX_OBJECT_ARRAY_LENGTH=30 +CODE_MAX_NUMBER_ARRAY_LENGTH=1000 +CODE_EXECUTION_CONNECT_TIMEOUT=10 +CODE_EXECUTION_READ_TIMEOUT=60 +CODE_EXECUTION_WRITE_TIMEOUT=10 +TEMPLATE_TRANSFORM_MAX_LENGTH=400000 +WORKFLOW_MAX_EXECUTION_STEPS=500 +WORKFLOW_MAX_EXECUTION_TIME=1200 +WORKFLOW_CALL_MAX_DEPTH=5 +MAX_VARIABLE_SIZE=204800 +WORKFLOW_FILE_UPLOAD_LIMIT=10 +GRAPH_ENGINE_MIN_WORKERS=1 +GRAPH_ENGINE_MAX_WORKERS=10 +GRAPH_ENGINE_SCALE_UP_THRESHOLD=3 +GRAPH_ENGINE_SCALE_DOWN_IDLE_TIME=5.0 +ALIYUN_SLS_ACCESS_KEY_ID= +ALIYUN_SLS_ACCESS_KEY_SECRET= +WEBHOOK_REQUEST_BODY_MAX_SIZE=10485760 +RESPECT_XFORWARD_HEADERS_ENABLED=false +SSRF_HTTP_PORT=3128 +SSRF_COREDUMP_DIR=/var/spool/squid +SSRF_REVERSE_PROXY_PORT=8194 +SSRF_SANDBOX_HOST=sandbox +SSRF_DEFAULT_TIME_OUT=5 +SSRF_DEFAULT_CONNECT_TIME_OUT=5 +SSRF_DEFAULT_READ_TIME_OUT=5 +SSRF_DEFAULT_WRITE_TIME_OUT=5 +SSRF_POOL_MAX_CONNECTIONS=100 +SSRF_POOL_MAX_KEEPALIVE_CONNECTIONS=20 +SSRF_POOL_KEEPALIVE_EXPIRY=5.0 +PLUGIN_AWS_ACCESS_KEY= +PLUGIN_AWS_SECRET_KEY= +PLUGIN_AWS_REGION= +PLUGIN_TENCENT_COS_SECRET_KEY= +PLUGIN_TENCENT_COS_SECRET_ID= +PLUGIN_ALIYUN_OSS_ACCESS_KEY_ID= +PLUGIN_ALIYUN_OSS_ACCESS_KEY_SECRET= +PLUGIN_VOLCENGINE_TOS_ACCESS_KEY= +PLUGIN_VOLCENGINE_TOS_SECRET_KEY= +OTLP_API_KEY= +OTEL_EXPORTER_OTLP_PROTOCOL= +OTEL_EXPORTER_TYPE=otlp +OTEL_SAMPLING_RATE=0.1 +OTEL_BATCH_EXPORT_SCHEDULE_DELAY=5000 +OTEL_MAX_QUEUE_SIZE=2048 +OTEL_MAX_EXPORT_BATCH_SIZE=512 +OTEL_METRIC_EXPORT_INTERVAL=60000 +OTEL_BATCH_EXPORT_TIMEOUT=10000 +OTEL_METRIC_EXPORT_TIMEOUT=30000 +QUEUE_MONITOR_THRESHOLD=200 +QUEUE_MONITOR_ALERT_EMAILS= +QUEUE_MONITOR_INTERVAL=30 +SWAGGER_UI_ENABLED=false +SWAGGER_UI_PATH=/swagger-ui.html +DSL_EXPORT_ENCRYPT_DATASET_ID=true +DATASET_MAX_SEGMENTS_PER_REQUEST=0 +ENABLE_CLEAN_EMBEDDING_CACHE_TASK=false +ENABLE_CLEAN_UNUSED_DATASETS_TASK=false +ENABLE_CREATE_TIDB_SERVERLESS_TASK=false +ENABLE_UPDATE_TIDB_SERVERLESS_STATUS_TASK=false +ENABLE_CLEAN_MESSAGES=false +ENABLE_WORKFLOW_RUN_CLEANUP_TASK=false +ENABLE_MAIL_CLEAN_DOCUMENT_NOTIFY_TASK=false +ENABLE_DATASETS_QUEUE_MONITOR=false +ENABLE_CHECK_UPGRADABLE_PLUGIN_TASK=true +ENABLE_WORKFLOW_SCHEDULE_POLLER_TASK=true +WORKFLOW_SCHEDULE_POLLER_INTERVAL=1 +WORKFLOW_SCHEDULE_POLLER_BATCH_SIZE=100 +WORKFLOW_SCHEDULE_MAX_DISPATCH_PER_TICK=0 +TENANT_ISOLATED_TASK_CONCURRENCY=1 +ANNOTATION_IMPORT_FILE_SIZE_LIMIT=2 +ANNOTATION_IMPORT_MAX_RECORDS=10000 +ANNOTATION_IMPORT_MIN_RECORDS=1 +ANNOTATION_IMPORT_RATE_LIMIT_PER_MINUTE=5 +ANNOTATION_IMPORT_RATE_LIMIT_PER_HOUR=20 +ANNOTATION_IMPORT_MAX_CONCURRENT=5 +CREATORS_PLATFORM_FEATURES_ENABLED=true +CREATORS_PLATFORM_API_URL=https://creators.dify.ai +CREATORS_PLATFORM_OAUTH_CLIENT_ID= +TIDB_VECTOR_DATABASE=dify +ALIBABACLOUD_MYSQL_HOST=127.0.0.1 +ALIBABACLOUD_MYSQL_PORT=3306 +ALIBABACLOUD_MYSQL_USER=root +ALIBABACLOUD_MYSQL_DATABASE=dify +ALIBABACLOUD_MYSQL_MAX_CONNECTION=5 +ALIBABACLOUD_MYSQL_HNSW_M=6 +RELYT_DATABASE=postgres +TENCENT_VECTOR_DB_DATABASE=dify +BAIDU_VECTOR_DB_DATABASE=dify +EXPOSE_PLUGIN_DAEMON_PORT=5002 +GUNICORN_TIMEOUT=360 +CELERY_WORKER_AMOUNT= +CELERY_AUTO_SCALE=false +CELERY_MAX_WORKERS= +CELERY_MIN_WORKERS= +API_TOOL_DEFAULT_CONNECT_TIMEOUT=10 +API_TOOL_DEFAULT_READ_TIMEOUT=60 +CELERY_BACKEND=redis +CELERY_USE_SENTINEL=false +CELERY_SENTINEL_MASTER_NAME= +CELERY_SENTINEL_SOCKET_TIMEOUT=0.1 +WEB_API_CORS_ALLOW_ORIGINS=* +CONSOLE_CORS_ALLOW_ORIGINS=* +COOKIE_DOMAIN= +OPENDAL_SCHEME=fs +OPENDAL_FS_ROOT=storage +CLICKZETTA_VOLUME_TYPE=user +CLICKZETTA_VOLUME_NAME= +CLICKZETTA_VOLUME_TABLE_PREFIX=dataset_ +CLICKZETTA_VOLUME_DIFY_PREFIX=dify_km +S3_ENDPOINT= +S3_REGION=us-east-1 +S3_BUCKET_NAME=difyai +S3_ADDRESS_STYLE=auto +S3_USE_AWS_MANAGED_IAM=false +ARCHIVE_STORAGE_ENABLED=false +ARCHIVE_STORAGE_ENDPOINT= +ARCHIVE_STORAGE_ARCHIVE_BUCKET= +ARCHIVE_STORAGE_EXPORT_BUCKET= +ARCHIVE_STORAGE_REGION=auto +AZURE_BLOB_ACCOUNT_NAME=difyai +AZURE_BLOB_CONTAINER_NAME=difyai-container +GOOGLE_STORAGE_BUCKET_NAME=your-bucket-name +GOOGLE_STORAGE_SERVICE_ACCOUNT_JSON_BASE64= +ALIYUN_OSS_BUCKET_NAME=your-bucket-name +ALIYUN_OSS_ENDPOINT=https://oss-ap-southeast-1-internal.aliyuncs.com +ALIYUN_OSS_REGION=ap-southeast-1 +ALIYUN_OSS_AUTH_VERSION=v4 +ALIYUN_OSS_PATH=your-path +ALIYUN_CLOUDBOX_ID=your-cloudbox-id +TENCENT_COS_BUCKET_NAME=your-bucket-name +TENCENT_COS_REGION=your-region +TENCENT_COS_SCHEME=your-scheme +TENCENT_COS_CUSTOM_DOMAIN=your-custom-domain +OCI_ENDPOINT=https://your-object-storage-namespace.compat.objectstorage.us-ashburn-1.oraclecloud.com +OCI_BUCKET_NAME=your-bucket-name +OCI_REGION=us-ashburn-1 +HUAWEI_OBS_BUCKET_NAME=your-bucket-name +HUAWEI_OBS_SERVER=your-server-url +HUAWEI_OBS_PATH_STYLE=false +VOLCENGINE_TOS_BUCKET_NAME=your-bucket-name +VOLCENGINE_TOS_ENDPOINT=your-server-url +VOLCENGINE_TOS_REGION=your-region +BAIDU_OBS_BUCKET_NAME=your-bucket-name +BAIDU_OBS_ENDPOINT=your-server-url +SUPABASE_BUCKET_NAME=your-bucket-name +TENCENT_VECTOR_DB_TIMEOUT=30 +TENCENT_VECTOR_DB_USERNAME=dify +TENCENT_VECTOR_DB_SHARD=1 +TENCENT_VECTOR_DB_REPLICAS=2 +TENCENT_VECTOR_DB_ENABLE_HYBRID_SEARCH=false +BAIDU_VECTOR_DB_ENDPOINT=http://127.0.0.1:5287 +BAIDU_VECTOR_DB_CONNECTION_TIMEOUT_MS=30000 +BAIDU_VECTOR_DB_ACCOUNT=root +BAIDU_VECTOR_DB_API_KEY=dify +BAIDU_VECTOR_DB_SHARD=1 +BAIDU_VECTOR_DB_REPLICAS=3 +BAIDU_VECTOR_DB_INVERTED_INDEX_ANALYZER=DEFAULT_ANALYZER +BAIDU_VECTOR_DB_INVERTED_INDEX_PARSER_MODE=COARSE_MODE +BAIDU_VECTOR_DB_AUTO_BUILD_ROW_COUNT_INCREMENT=500 +BAIDU_VECTOR_DB_AUTO_BUILD_ROW_COUNT_INCREMENT_RATIO=0.05 +BAIDU_VECTOR_DB_REBUILD_INDEX_TIMEOUT_IN_SECONDS=300 +HUAWEI_CLOUD_HOSTS=https://127.0.0.1:9200 +HUAWEI_CLOUD_USER=admin +WORKFLOW_NODE_EXECUTION_STORAGE=rdbms +CORE_WORKFLOW_EXECUTION_REPOSITORY=core.repositories.sqlalchemy_workflow_execution_repository.SQLAlchemyWorkflowExecutionRepository +CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY=core.repositories.sqlalchemy_workflow_node_execution_repository.SQLAlchemyWorkflowNodeExecutionRepository +API_WORKFLOW_RUN_REPOSITORY=repositories.sqlalchemy_api_workflow_run_repository.DifyAPISQLAlchemyWorkflowRunRepository +API_WORKFLOW_NODE_EXECUTION_REPOSITORY=repositories.sqlalchemy_api_workflow_node_execution_repository.DifyAPISQLAlchemyWorkflowNodeExecutionRepository +ALIYUN_SLS_ENDPOINT= +ALIYUN_SLS_REGION= +ALIYUN_SLS_PROJECT_NAME= +ALIYUN_SLS_LOGSTORE_TTL=365 +LOGSTORE_DUAL_WRITE_ENABLED=false +LOGSTORE_DUAL_READ_ENABLED=true +LOGSTORE_ENABLE_PUT_GRAPH_FIELD=true +HTTP_REQUEST_NODE_MAX_BINARY_SIZE=10485760 +HTTP_REQUEST_NODE_MAX_TEXT_SIZE=1048576 +HTTP_REQUEST_NODE_SSL_VERIFY=True +HTTP_REQUEST_MAX_CONNECT_TIMEOUT=10 +HTTP_REQUEST_MAX_READ_TIMEOUT=600 +HTTP_REQUEST_MAX_WRITE_TIMEOUT=600 +PLUGIN_INSTALLED_PATH=plugin +PLUGIN_PACKAGE_CACHE_PATH=plugin_packages +PLUGIN_MEDIA_CACHE_PATH=assets +PLUGIN_S3_USE_AWS=false +PLUGIN_S3_USE_AWS_MANAGED_IAM=false +PLUGIN_S3_ENDPOINT= +PLUGIN_S3_USE_PATH_STYLE=false +PLUGIN_AZURE_BLOB_STORAGE_CONTAINER_NAME= +PLUGIN_AZURE_BLOB_STORAGE_CONNECTION_STRING= +PLUGIN_TENCENT_COS_REGION= +PLUGIN_ALIYUN_OSS_REGION= +PLUGIN_ALIYUN_OSS_ENDPOINT= +PLUGIN_ALIYUN_OSS_AUTH_VERSION=v4 +PLUGIN_ALIYUN_OSS_PATH= +PLUGIN_VOLCENGINE_TOS_ENDPOINT= +PLUGIN_VOLCENGINE_TOS_REGION= +ENABLE_OTEL=false +OTLP_TRACE_ENDPOINT= +OTLP_METRIC_ENDPOINT= +# Prefix used to create collection name in vector database +OTLP_BASE_ENDPOINT=http://localhost:4318 +WEAVIATE_GRPC_ENDPOINT=grpc://weaviate:50051 +ANALYTICDB_KEY_ID=your-ak +ANALYTICDB_KEY_SECRET=your-sk +ANALYTICDB_REGION_ID=cn-hangzhou +ANALYTICDB_INSTANCE_ID=gp-ab123456 +ANALYTICDB_ACCOUNT=testaccount +ANALYTICDB_PASSWORD=testpassword +ANALYTICDB_NAMESPACE=dify +ANALYTICDB_NAMESPACE_PASSWORD=difypassword +ANALYTICDB_HOST=gp-test.aliyuncs.com +ANALYTICDB_PORT=5432 +ANALYTICDB_MIN_CONNECTION=1 +ANALYTICDB_MAX_CONNECTION=5 +TIDB_VECTOR_HOST=tidb +TIDB_VECTOR_PORT=4000 +TIDB_VECTOR_USER= +TIDB_VECTOR_PASSWORD= +TIDB_ON_QDRANT_CLIENT_TIMEOUT=20 +TIDB_ON_QDRANT_GRPC_ENABLED=false +TIDB_ON_QDRANT_GRPC_PORT=6334 +TIDB_PUBLIC_KEY=dify +TIDB_PRIVATE_KEY=dify +RELYT_HOST=db +RELYT_PORT=5432 +RELYT_USER=postgres +VIKINGDB_ACCESS_KEY=your-ak +VIKINGDB_SECRET_KEY=your-sk +VIKINGDB_REGION=cn-shanghai +VIKINGDB_HOST=api-vikingdb.xxx.volces.com +VIKINGDB_SCHEME=http +VIKINGDB_CONNECTION_TIMEOUT=30 +VIKINGDB_SOCKET_TIMEOUT=30 +TABLESTORE_ENDPOINT=https://instance-name.cn-hangzhou.ots.aliyuncs.com +TABLESTORE_INSTANCE_NAME=instance-name +CLICKZETTA_USERNAME= +CLICKZETTA_VECTOR_DISTANCE_FUNCTION=cosine_distance +COMPOSE_PROFILES=${VECTOR_STORE:-weaviate},${DB_TYPE:-postgresql},collaboration +EXPOSE_NGINX_PORT=80 +EXPOSE_NGINX_SSL_PORT=443 +POSITION_TOOL_PINS= +POSITION_TOOL_INCLUDES= +POSITION_TOOL_EXCLUDES= +POSITION_PROVIDER_PINS= +POSITION_PROVIDER_INCLUDES= +POSITION_PROVIDER_EXCLUDES= +CREATE_TIDB_SERVICE_JOB_ENABLED=false +MAX_SUBMIT_COUNT=100 + +# Vector Store Configuration +STORAGE_TYPE=opendal +VECTOR_STORE=weaviate +VECTOR_INDEX_NAME_PREFIX=Vector_index +WEAVIATE_ENDPOINT=http://weaviate:8080 +WEAVIATE_API_KEY=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih +WEAVIATE_TOKENIZATION=word +OCEANBASE_VECTOR_HOST=oceanbase +OCEANBASE_VECTOR_PORT=2881 +OCEANBASE_VECTOR_USER=root@test +OCEANBASE_VECTOR_PASSWORD=difyai123456 +OCEANBASE_VECTOR_DATABASE=test +OCEANBASE_ENABLE_HYBRID_SEARCH=false +OCEANBASE_FULLTEXT_PARSER=ik +SEEKDB_MEMORY_LIMIT=2G +QDRANT_URL=http://qdrant:6333 +QDRANT_API_KEY=difyai123456 +QDRANT_CLIENT_TIMEOUT=20 +QDRANT_GRPC_ENABLED=false +QDRANT_GRPC_PORT=6334 +QDRANT_REPLICATION_FACTOR=1 +MILVUS_URI=http://host.docker.internal:19530 +MILVUS_TOKEN= +MILVUS_USER= +MILVUS_PASSWORD= +MILVUS_ANALYZER_PARAMS= +PGVECTOR_HOST=pgvector +PGVECTOR_PORT=5432 +PGVECTOR_USER=postgres +PGVECTOR_PASSWORD=difyai123456 +PGVECTOR_DATABASE=dify +PGVECTOR_MIN_CONNECTION=1 +PGVECTOR_MAX_CONNECTION=5 +PGVECTOR_PG_BIGM=false +PGVECTOR_PG_BIGM_VERSION=1.2-20240606 + +# Hologres Configuration +HOLOGRES_HOST= +HOLOGRES_PORT=80 +HOLOGRES_DATABASE= +HOLOGRES_ACCESS_KEY_ID= +HOLOGRES_ACCESS_KEY_SECRET= +HOLOGRES_SCHEMA=public +HOLOGRES_TOKENIZER=jieba +HOLOGRES_DISTANCE_METHOD=Cosine +HOLOGRES_BASE_QUANTIZATION_TYPE=rabitq +HOLOGRES_MAX_DEGREE=64 +HOLOGRES_EF_CONSTRUCTION=400 + +# Milvus API Configuration +MILVUS_DATABASE= +MILVUS_ENABLE_HYBRID_SEARCH=False + +# Human Input Task Configuration +ENABLE_HUMAN_INPUT_TIMEOUT_TASK=true +HUMAN_INPUT_TIMEOUT_TASK_INTERVAL=1 diff --git a/docker/envs/core-services/web.env.example b/docker/envs/core-services/web.env.example new file mode 100644 index 0000000000..d366cd87ba --- /dev/null +++ b/docker/envs/core-services/web.env.example @@ -0,0 +1,30 @@ +# ------------------------------ +# Web Configuration +# ------------------------------ + +CONSOLE_API_URL= +APP_API_URL= +SENTRY_DSN= +NEXT_PUBLIC_SOCKET_URL=ws://localhost +EXPERIMENTAL_ENABLE_VINEXT=false +LOOP_NODE_MAX_COUNT=100 +MAX_TOOLS_NUM=10 +MAX_PARALLEL_LIMIT=10 +MAX_ITERATIONS_NUM=99 +TEXT_GENERATION_TIMEOUT_MS=60000 +ALLOW_INLINE_STYLES=false +ALLOW_UNSAFE_DATA_SCHEME=false +MAX_TREE_DEPTH=50 +MARKETPLACE_ENABLED=true +MARKETPLACE_API_URL=https://marketplace.dify.ai +INDEXING_MAX_SEGMENTATION_TOKENS_LENGTH=4000 +ALLOW_EMBED=false +AMPLITUDE_API_KEY= +ENABLE_WEBSITE_JINAREADER=true +ENABLE_WEBSITE_FIRECRAWL=true +ENABLE_WEBSITE_WATERCRAWL=true +NEXT_PUBLIC_ENABLE_SINGLE_DOLLAR_LATEX=false +NEXT_PUBLIC_COOKIE_DOMAIN= +NEXT_PUBLIC_BATCH_CONCURRENCY=5 +CSP_WHITELIST= +TOP_K_MAX_VALUE=10 diff --git a/docker/envs/core-services/worker-beat.env.example b/docker/envs/core-services/worker-beat.env.example new file mode 100644 index 0000000000..380fe02b68 --- /dev/null +++ b/docker/envs/core-services/worker-beat.env.example @@ -0,0 +1,8 @@ +# ------------------------------ +# Worker Beat Configuration +# ------------------------------ + +MODE=beat +COMPOSE_WORKER_HEALTHCHECK_DISABLED=true +COMPOSE_WORKER_HEALTHCHECK_INTERVAL=30s +COMPOSE_WORKER_HEALTHCHECK_TIMEOUT=30s diff --git a/docker/envs/core-services/worker.env.example b/docker/envs/core-services/worker.env.example new file mode 100644 index 0000000000..58cf4ea901 --- /dev/null +++ b/docker/envs/core-services/worker.env.example @@ -0,0 +1,13 @@ +# ------------------------------ +# Worker Configuration +# ------------------------------ + +MODE=worker +SENTRY_DSN= +SENTRY_TRACES_SAMPLE_RATE=1.0 +SENTRY_PROFILES_SAMPLE_RATE=1.0 +PLUGIN_MAX_PACKAGE_SIZE=52428800 +INNER_API_KEY_FOR_PLUGIN=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y1 +COMPOSE_WORKER_HEALTHCHECK_DISABLED=true +COMPOSE_WORKER_HEALTHCHECK_INTERVAL=30s +COMPOSE_WORKER_HEALTHCHECK_TIMEOUT=30s diff --git a/docker/envs/databases/db-mysql.env.example b/docker/envs/databases/db-mysql.env.example new file mode 100644 index 0000000000..b3ea6801fe --- /dev/null +++ b/docker/envs/databases/db-mysql.env.example @@ -0,0 +1,9 @@ +# ------------------------------ +# Db Mysql Configuration +# ------------------------------ + +MYSQL_INNODB_LOG_FILE_SIZE=128M +MYSQL_INNODB_FLUSH_LOG_AT_TRX_COMMIT=2 +MYSQL_MAX_CONNECTIONS=1000 +MYSQL_INNODB_BUFFER_POOL_SIZE=512M +MYSQL_HOST_VOLUME=./volumes/mysql/data diff --git a/docker/envs/databases/db-postgres.env.example b/docker/envs/databases/db-postgres.env.example new file mode 100644 index 0000000000..14cefb6bee --- /dev/null +++ b/docker/envs/databases/db-postgres.env.example @@ -0,0 +1,26 @@ +# ------------------------------ +# Db Postgres Configuration +# ------------------------------ + +PGDATA=/var/lib/postgresql/data/pgdata +DB_TYPE=postgresql +DB_USERNAME=postgres +DB_PASSWORD=difyai123456 +DB_HOST=db_postgres +DB_PORT=5432 +DB_DATABASE=dify +SQLALCHEMY_POOL_SIZE=30 +SQLALCHEMY_MAX_OVERFLOW=10 +SQLALCHEMY_POOL_RECYCLE=3600 +SQLALCHEMY_ECHO=false +SQLALCHEMY_POOL_PRE_PING=false +SQLALCHEMY_POOL_USE_LIFO=false +SQLALCHEMY_POOL_TIMEOUT=30 +SQLALCHEMY_POOL_RESET_ON_RETURN=rollback +POSTGRES_MAX_CONNECTIONS=100 +POSTGRES_SHARED_BUFFERS=128MB +POSTGRES_WORK_MEM=4MB +POSTGRES_MAINTENANCE_WORK_MEM=64MB +POSTGRES_EFFECTIVE_CACHE_SIZE=4096MB +POSTGRES_STATEMENT_TIMEOUT=0 +POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT=0 diff --git a/docker/envs/databases/redis.env.example b/docker/envs/databases/redis.env.example new file mode 100644 index 0000000000..74bcb6525e --- /dev/null +++ b/docker/envs/databases/redis.env.example @@ -0,0 +1,35 @@ +# ------------------------------ +# Redis Configuration +# ------------------------------ + +REDIS_HOST=redis +REDIS_PORT=6379 +REDIS_USERNAME= +REDIS_PASSWORD=difyai123456 +REDIS_USE_SSL=false +REDIS_SSL_CERT_REQS=CERT_NONE +REDIS_SSL_CA_CERTS= +REDIS_SSL_CERTFILE= +REDIS_SSL_KEYFILE= +REDIS_DB=0 +REDIS_KEY_PREFIX= +REDIS_MAX_CONNECTIONS= +REDIS_USE_SENTINEL=false +REDIS_SENTINELS= +REDIS_SENTINEL_SERVICE_NAME= +REDIS_SENTINEL_USERNAME= +REDIS_SENTINEL_PASSWORD= +REDIS_SENTINEL_SOCKET_TIMEOUT=0.1 +REDIS_USE_CLUSTERS=false +REDIS_CLUSTERS= +REDIS_CLUSTERS_PASSWORD= +REDIS_RETRY_RETRIES=3 +REDIS_RETRY_BACKOFF_BASE=1.0 +REDIS_RETRY_BACKOFF_CAP=10.0 +REDIS_SOCKET_TIMEOUT=5.0 +REDIS_SOCKET_CONNECT_TIMEOUT=5.0 +REDIS_HEALTH_CHECK_INTERVAL=30 +EVENT_BUS_REDIS_URL= +EVENT_BUS_REDIS_CHANNEL_TYPE=pubsub +EVENT_BUS_REDIS_USE_CLUSTERS=false +BROKER_USE_SSL=false diff --git a/docker/envs/infrastructure/certbot.env.example b/docker/envs/infrastructure/certbot.env.example new file mode 100644 index 0000000000..c654fbe02f --- /dev/null +++ b/docker/envs/infrastructure/certbot.env.example @@ -0,0 +1,7 @@ +# ------------------------------ +# Certbot Configuration +# ------------------------------ + +CERTBOT_EMAIL=your_email@example.com +CERTBOT_DOMAIN=your_domain.com +CERTBOT_OPTIONS= diff --git a/docker/envs/infrastructure/etcd.env.example b/docker/envs/infrastructure/etcd.env.example new file mode 100644 index 0000000000..4dca26671a --- /dev/null +++ b/docker/envs/infrastructure/etcd.env.example @@ -0,0 +1,4 @@ +# ------------------------------ +# Etcd Configuration +# ------------------------------ + diff --git a/docker/envs/infrastructure/milvus-standalone.env.example b/docker/envs/infrastructure/milvus-standalone.env.example new file mode 100644 index 0000000000..7e87ed2648 --- /dev/null +++ b/docker/envs/infrastructure/milvus-standalone.env.example @@ -0,0 +1,4 @@ +# ------------------------------ +# Milvus Standalone Configuration +# ------------------------------ + diff --git a/docker/envs/infrastructure/minio.env.example b/docker/envs/infrastructure/minio.env.example new file mode 100644 index 0000000000..7c8e1fa35a --- /dev/null +++ b/docker/envs/infrastructure/minio.env.example @@ -0,0 +1,4 @@ +# ------------------------------ +# Minio Configuration +# ------------------------------ + diff --git a/docker/envs/infrastructure/nginx.env.example b/docker/envs/infrastructure/nginx.env.example new file mode 100644 index 0000000000..fcb369a47d --- /dev/null +++ b/docker/envs/infrastructure/nginx.env.example @@ -0,0 +1,18 @@ +# ------------------------------ +# Nginx Configuration +# ------------------------------ + +NGINX_SERVER_NAME=_ +NGINX_HTTPS_ENABLED=false +NGINX_PORT=80 +NGINX_SSL_PORT=443 +NGINX_SSL_CERT_FILENAME=dify.crt +NGINX_SSL_CERT_KEY_FILENAME=dify.key +NGINX_SSL_PROTOCOLS=TLSv1.2 TLSv1.3 +NGINX_WORKER_PROCESSES=auto +NGINX_CLIENT_MAX_BODY_SIZE=100M +NGINX_KEEPALIVE_TIMEOUT=65 +NGINX_PROXY_READ_TIMEOUT=3600s +NGINX_PROXY_SEND_TIMEOUT=3600s +NGINX_ENABLE_CERTBOT_CHALLENGE=false +NGINX_SOCKET_IO_UPSTREAM=api_websocket:5001 diff --git a/docker/envs/infrastructure/ssrf-proxy.env.example b/docker/envs/infrastructure/ssrf-proxy.env.example new file mode 100644 index 0000000000..210a782494 --- /dev/null +++ b/docker/envs/infrastructure/ssrf-proxy.env.example @@ -0,0 +1,17 @@ +# ------------------------------ +# Ssrf Proxy Configuration +# ------------------------------ + +SSRF_PROXY_HTTP_URL=http://ssrf_proxy:3128 +SSRF_PROXY_HTTPS_URL=http://ssrf_proxy:3128 +SSRF_HTTP_PORT=3128 +SSRF_COREDUMP_DIR=/var/spool/squid +SSRF_REVERSE_PROXY_PORT=8194 +SSRF_SANDBOX_HOST=sandbox +SSRF_DEFAULT_TIME_OUT=5 +SSRF_DEFAULT_CONNECT_TIME_OUT=5 +SSRF_DEFAULT_READ_TIME_OUT=5 +SSRF_DEFAULT_WRITE_TIME_OUT=5 +SSRF_POOL_MAX_CONNECTIONS=100 +SSRF_POOL_MAX_KEEPALIVE_CONNECTIONS=20 +SSRF_POOL_KEEPALIVE_EXPIRY=5.0 diff --git a/docker/middleware.env.example b/docker/envs/middleware.env.example similarity index 100% rename from docker/middleware.env.example rename to docker/envs/middleware.env.example diff --git a/docker/envs/security.env.example b/docker/envs/security.env.example new file mode 100644 index 0000000000..787aef2706 --- /dev/null +++ b/docker/envs/security.env.example @@ -0,0 +1,40 @@ +# ------------------------------ +# Security Configuration +# ------------------------------ + +TIDB_ON_QDRANT_API_KEY=dify +TENCENT_VECTOR_DB_API_KEY=dify +ALIBABACLOUD_MYSQL_PASSWORD=difyai123456 +RELYT_PASSWORD=difyai123456 +LINDORM_PASSWORD=admin +HUAWEI_CLOUD_PASSWORD=admin +UPSTASH_VECTOR_TOKEN=dify +TABLESTORE_ACCESS_KEY_ID=xxx +TABLESTORE_ACCESS_KEY_SECRET=xxx +UNSTRUCTURED_API_KEY= +PLUGIN_BASED_TOKEN_COUNTING_ENABLED=false +NOTION_CLIENT_SECRET= +NOTION_INTERNAL_SECRET= +RESEND_API_KEY=your-resend-api-key +SMTP_PASSWORD= +SENDGRID_API_KEY= +RESET_PASSWORD_TOKEN_EXPIRY_MINUTES=5 +EMAIL_REGISTER_TOKEN_EXPIRY_MINUTES=5 +CHANGE_EMAIL_TOKEN_EXPIRY_MINUTES=5 +OWNER_TRANSFER_TOKEN_EXPIRY_MINUTES=5 +CODE_EXECUTION_API_KEY=dify-sandbox +ALIYUN_SLS_ACCESS_KEY_ID= +ALIYUN_SLS_ACCESS_KEY_SECRET= +OTLP_API_KEY= +BAIDU_VECTOR_DB_API_KEY=dify +ANALYTICDB_KEY_ID=your-ak +ANALYTICDB_KEY_SECRET=your-sk +ANALYTICDB_PASSWORD=testpassword +ANALYTICDB_NAMESPACE_PASSWORD=difypassword +TIDB_VECTOR_PASSWORD= +TIDB_PUBLIC_KEY=dify +TIDB_PRIVATE_KEY=dify +VIKINGDB_ACCESS_KEY=your-ak +VIKINGDB_SECRET_KEY=your-sk +SECRET_KEY=sk-9f73s3ljTXVcMT3Blb3ljTqtsKiGHXVcMT3BlbkFJLK7U +INIT_PASSWORD= diff --git a/docker/envs/vectorstores/chroma.env.example b/docker/envs/vectorstores/chroma.env.example new file mode 100644 index 0000000000..2a15375a3d --- /dev/null +++ b/docker/envs/vectorstores/chroma.env.example @@ -0,0 +1,13 @@ +# ------------------------------ +# Chroma Configuration +# ------------------------------ + +CHROMA_DATABASE=default_database +CHROMA_AUTH_PROVIDER=chromadb.auth.token_authn.TokenAuthClientProvider +CHROMA_AUTH_CREDENTIALS= +CHROMA_HOST=127.0.0.1 +CHROMA_PORT=8000 +CHROMA_TENANT=default_tenant +CHROMA_SERVER_AUTHN_CREDENTIALS=difyai123456 +CHROMA_SERVER_AUTHN_PROVIDER=chromadb.auth.token_authn.TokenAuthenticationServerProvider +CHROMA_IS_PERSISTENT=TRUE diff --git a/docker/envs/vectorstores/couchbase.env.example b/docker/envs/vectorstores/couchbase.env.example new file mode 100644 index 0000000000..4329d9c723 --- /dev/null +++ b/docker/envs/vectorstores/couchbase.env.example @@ -0,0 +1,9 @@ +# ------------------------------ +# Couchbase Configuration +# ------------------------------ + +COUCHBASE_PASSWORD=password +COUCHBASE_BUCKET_NAME=Embeddings +COUCHBASE_SCOPE_NAME=_default +COUCHBASE_CONNECTION_STRING=couchbase://couchbase-server +COUCHBASE_USER=Administrator diff --git a/docker/envs/vectorstores/elasticsearch.env.example b/docker/envs/vectorstores/elasticsearch.env.example new file mode 100644 index 0000000000..2aaa965cd7 --- /dev/null +++ b/docker/envs/vectorstores/elasticsearch.env.example @@ -0,0 +1,17 @@ +# ------------------------------ +# Elasticsearch Configuration +# ------------------------------ + +ELASTICSEARCH_CLOUD_URL=YOUR-ELASTICSEARCH_CLOUD_URL +ELASTICSEARCH_PASSWORD=elastic +KIBANA_PORT=5601 +ELASTICSEARCH_USE_CLOUD=false +ELASTICSEARCH_API_KEY=YOUR-ELASTICSEARCH_API_KEY +ELASTICSEARCH_VERIFY_CERTS=False +ELASTICSEARCH_CA_CERTS= +ELASTICSEARCH_REQUEST_TIMEOUT=100000 +ELASTICSEARCH_RETRY_ON_TIMEOUT=True +ELASTICSEARCH_MAX_RETRIES=10 +ELASTICSEARCH_HOST=0.0.0.0 +ELASTICSEARCH_PORT=9200 +ELASTICSEARCH_USERNAME=elastic diff --git a/docker/envs/vectorstores/iris.env.example b/docker/envs/vectorstores/iris.env.example new file mode 100644 index 0000000000..b1eb39bff8 --- /dev/null +++ b/docker/envs/vectorstores/iris.env.example @@ -0,0 +1,17 @@ +# ------------------------------ +# Iris Configuration +# ------------------------------ + +IRIS_CONNECTION_URL= +IRIS_MIN_CONNECTION=1 +IRIS_MAX_CONNECTION=3 +IRIS_TEXT_INDEX=true +IRIS_TEXT_INDEX_LANGUAGE=en +IRIS_TIMEZONE=UTC +IRIS_PASSWORD=Dify@1234 +IRIS_DATABASE=USER +IRIS_SCHEMA=dify +IRIS_HOST=iris +IRIS_SUPER_SERVER_PORT=1972 +IRIS_WEB_SERVER_PORT=52773 +IRIS_USER=_SYSTEM diff --git a/docker/envs/vectorstores/matrixone.env.example b/docker/envs/vectorstores/matrixone.env.example new file mode 100644 index 0000000000..931375f8b4 --- /dev/null +++ b/docker/envs/vectorstores/matrixone.env.example @@ -0,0 +1,9 @@ +# ------------------------------ +# Matrixone Configuration +# ------------------------------ + +MATRIXONE_PASSWORD=111 +MATRIXONE_HOST=matrixone +MATRIXONE_PORT=6001 +MATRIXONE_USER=dump +MATRIXONE_DATABASE=dify diff --git a/docker/envs/vectorstores/milvus.env.example b/docker/envs/vectorstores/milvus.env.example new file mode 100644 index 0000000000..d16879ca7b --- /dev/null +++ b/docker/envs/vectorstores/milvus.env.example @@ -0,0 +1,13 @@ +# ------------------------------ +# Milvus Configuration +# ------------------------------ + +MINIO_ACCESS_KEY=minioadmin +MINIO_SECRET_KEY=minioadmin +ETCD_ENDPOINTS=etcd:2379 +MINIO_ADDRESS=minio:9000 +ETCD_AUTO_COMPACTION_MODE=revision +ETCD_AUTO_COMPACTION_RETENTION=1000 +ETCD_QUOTA_BACKEND_BYTES=4294967296 +ETCD_SNAPSHOT_COUNT=50000 +MILVUS_AUTHORIZATION_ENABLED=true diff --git a/docker/envs/vectorstores/myscale.env.example b/docker/envs/vectorstores/myscale.env.example new file mode 100644 index 0000000000..eaa9e88cc0 --- /dev/null +++ b/docker/envs/vectorstores/myscale.env.example @@ -0,0 +1,10 @@ +# ------------------------------ +# Myscale Configuration +# ------------------------------ + +MYSCALE_PASSWORD= +MYSCALE_DATABASE=dify +MYSCALE_FTS_PARAMS= +MYSCALE_HOST=myscale +MYSCALE_PORT=8123 +MYSCALE_USER=default diff --git a/docker/envs/vectorstores/oceanbase.env.example b/docker/envs/vectorstores/oceanbase.env.example new file mode 100644 index 0000000000..42bed8df6a --- /dev/null +++ b/docker/envs/vectorstores/oceanbase.env.example @@ -0,0 +1,6 @@ +# ------------------------------ +# Oceanbase Configuration +# ------------------------------ + +OCEANBASE_CLUSTER_NAME=difyai +OCEANBASE_MEMORY_LIMIT=6G diff --git a/docker/envs/vectorstores/opengauss.env.example b/docker/envs/vectorstores/opengauss.env.example new file mode 100644 index 0000000000..9f58499b64 --- /dev/null +++ b/docker/envs/vectorstores/opengauss.env.example @@ -0,0 +1,12 @@ +# ------------------------------ +# Opengauss Configuration +# ------------------------------ + +OPENGAUSS_PASSWORD=Dify@123 +OPENGAUSS_DATABASE=dify +OPENGAUSS_MIN_CONNECTION=1 +OPENGAUSS_MAX_CONNECTION=5 +OPENGAUSS_ENABLE_PQ=false +OPENGAUSS_HOST=opengauss +OPENGAUSS_PORT=6600 +OPENGAUSS_USER=postgres diff --git a/docker/envs/vectorstores/opensearch.env.example b/docker/envs/vectorstores/opensearch.env.example new file mode 100644 index 0000000000..a6a9283378 --- /dev/null +++ b/docker/envs/vectorstores/opensearch.env.example @@ -0,0 +1,22 @@ +# ------------------------------ +# Opensearch Configuration +# ------------------------------ + +OPENSEARCH_PASSWORD=admin +OPENSEARCH_AWS_REGION=ap-southeast-1 +OPENSEARCH_AWS_SERVICE=aoss +OPENSEARCH_INITIAL_ADMIN_PASSWORD=Qazwsxedc!@#123 +OPENSEARCH_MEMLOCK_SOFT=-1 +OPENSEARCH_MEMLOCK_HARD=-1 +OPENSEARCH_NOFILE_SOFT=65536 +OPENSEARCH_NOFILE_HARD=65536 +OPENSEARCH_HOST=opensearch +OPENSEARCH_PORT=9200 +OPENSEARCH_SECURE=true +OPENSEARCH_VERIFY_CERTS=true +OPENSEARCH_AUTH_METHOD=basic +OPENSEARCH_USER=admin +OPENSEARCH_DISCOVERY_TYPE=single-node +OPENSEARCH_BOOTSTRAP_MEMORY_LOCK=true +OPENSEARCH_JAVA_OPTS_MIN=512m +OPENSEARCH_JAVA_OPTS_MAX=1024m diff --git a/docker/envs/vectorstores/oracle.env.example b/docker/envs/vectorstores/oracle.env.example new file mode 100644 index 0000000000..c8f24db41a --- /dev/null +++ b/docker/envs/vectorstores/oracle.env.example @@ -0,0 +1,13 @@ +# ------------------------------ +# Oracle Configuration +# ------------------------------ + +ORACLE_PASSWORD=dify +ORACLE_DSN=oracle:1521/FREEPDB1 +ORACLE_CONFIG_DIR=/app/api/storage/wallet +ORACLE_WALLET_LOCATION=/app/api/storage/wallet +ORACLE_WALLET_PASSWORD=dify +ORACLE_IS_AUTONOMOUS=false +ORACLE_USER=dify +ORACLE_PWD=Dify123456 +ORACLE_CHARACTERSET=AL32UTF8 diff --git a/docker/envs/vectorstores/pgvecto-rs.env.example b/docker/envs/vectorstores/pgvecto-rs.env.example new file mode 100644 index 0000000000..6428e5dd67 --- /dev/null +++ b/docker/envs/vectorstores/pgvecto-rs.env.example @@ -0,0 +1,9 @@ +# ------------------------------ +# Pgvecto Rs Configuration +# ------------------------------ + +PGVECTO_RS_HOST=pgvecto-rs +PGVECTO_RS_PORT=5432 +PGVECTO_RS_USER=postgres +PGVECTO_RS_PASSWORD=difyai123456 +PGVECTO_RS_DATABASE=dify diff --git a/docker/envs/vectorstores/pgvector.env.example b/docker/envs/vectorstores/pgvector.env.example new file mode 100644 index 0000000000..9fd1dbf962 --- /dev/null +++ b/docker/envs/vectorstores/pgvector.env.example @@ -0,0 +1,8 @@ +# ------------------------------ +# Pgvector Configuration +# ------------------------------ + +PGVECTOR_PGUSER=postgres +PGVECTOR_POSTGRES_PASSWORD=difyai123456 +PGVECTOR_POSTGRES_DB=dify +PGVECTOR_PGDATA=/var/lib/postgresql/data/pgdata diff --git a/docker/envs/vectorstores/qdrant.env.example b/docker/envs/vectorstores/qdrant.env.example new file mode 100644 index 0000000000..a3555fe547 --- /dev/null +++ b/docker/envs/vectorstores/qdrant.env.example @@ -0,0 +1,4 @@ +# ------------------------------ +# Qdrant Configuration +# ------------------------------ + diff --git a/docker/envs/vectorstores/seekdb.env.example b/docker/envs/vectorstores/seekdb.env.example new file mode 100644 index 0000000000..4307fbede2 --- /dev/null +++ b/docker/envs/vectorstores/seekdb.env.example @@ -0,0 +1,4 @@ +# ------------------------------ +# Seekdb Configuration +# ------------------------------ + diff --git a/docker/envs/vectorstores/vastbase.env.example b/docker/envs/vectorstores/vastbase.env.example new file mode 100644 index 0000000000..2c9db50fbe --- /dev/null +++ b/docker/envs/vectorstores/vastbase.env.example @@ -0,0 +1,11 @@ +# ------------------------------ +# Vastbase Configuration +# ------------------------------ + +VASTBASE_PASSWORD=Difyai123456 +VASTBASE_DATABASE=dify +VASTBASE_MIN_CONNECTION=1 +VASTBASE_MAX_CONNECTION=5 +VASTBASE_HOST=vastbase +VASTBASE_PORT=5432 +VASTBASE_USER=dify diff --git a/docker/envs/vectorstores/weaviate.env.example b/docker/envs/vectorstores/weaviate.env.example new file mode 100644 index 0000000000..82a3ccb172 --- /dev/null +++ b/docker/envs/vectorstores/weaviate.env.example @@ -0,0 +1,18 @@ +# ------------------------------ +# Weaviate Configuration +# ------------------------------ + +WEAVIATE_PERSISTENCE_DATA_PATH=/var/lib/weaviate +WEAVIATE_QUERY_DEFAULTS_LIMIT=25 +WEAVIATE_AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true +WEAVIATE_DEFAULT_VECTORIZER_MODULE=none +WEAVIATE_CLUSTER_HOSTNAME=node1 +WEAVIATE_AUTHENTICATION_APIKEY_ENABLED=true +WEAVIATE_AUTHENTICATION_APIKEY_ALLOWED_KEYS=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih +WEAVIATE_AUTHENTICATION_APIKEY_USERS=hello@dify.ai +WEAVIATE_AUTHORIZATION_ADMINLIST_ENABLED=true +WEAVIATE_AUTHORIZATION_ADMINLIST_USERS=hello@dify.ai +WEAVIATE_DISABLE_TELEMETRY=false +WEAVIATE_ENABLE_TOKENIZER_GSE=false +WEAVIATE_ENABLE_TOKENIZER_KAGOME_JA=false +WEAVIATE_ENABLE_TOKENIZER_KAGOME_KR=false diff --git a/docker/generate_docker_compose b/docker/generate_docker_compose index 46d948f3c1..580091e006 100755 --- a/docker/generate_docker_compose +++ b/docker/generate_docker_compose @@ -64,25 +64,61 @@ def generate_shared_env_block(env_vars, anchor_name="shared-api-worker-env"): return "\n".join(lines) -def insert_shared_env(template_path, output_path, shared_env_block, header_comments): +def create_env_files_from_example(env_example_path): """ - Inserts the shared environment variables block and header comments into the template file, - removing any existing x-shared-env anchors, and generates the final docker-compose.yaml file. - Always writes with LF line endings. + Creates actual env files from .env.example by copying the categorized .env.example files. + This allows docker-compose to use env_file references. + Supports per-module structure with subdirectories. + """ + base_dir = os.path.dirname(os.path.abspath(env_example_path)) + root_env_file = os.path.join(base_dir, ".env") + if not os.path.exists(root_env_file): + with open(env_example_path, "r", encoding="utf-8") as src, open( + root_env_file, "w", encoding="utf-8", newline="\n" + ) as dst: + dst.write(src.read()) + print(f"Created {root_env_file}") + else: + print(f"{root_env_file} already exists, skipping") + + envs_dir = os.path.join(base_dir, "envs") + if not os.path.isdir(envs_dir): + print(f"No envs directory found at {envs_dir}, skipping split env files") + return [] + + created_files = [] + # Walk through all .env.example files in subdirectories + for root, dirs, files in os.walk(envs_dir): + for file in files: + if file.endswith('.env.example'): + example_file = os.path.join(root, file) + env_file = example_file.replace('.env.example', '.env') + + if os.path.exists(env_file): + print(f"{env_file} already exists, skipping") + continue + + # Copy .example to actual file + with open(example_file, "r", encoding="utf-8") as src, open( + env_file, "w", encoding="utf-8", newline="\n" + ) as dst: + dst.write(src.read()) + created_files.append(env_file) + print(f"Created {env_file}") + + return created_files + + +def insert_shared_env(template_path, output_path, header_comments): + """ + Copies the template file to output path with header comments. + The template now uses env_file references instead of a huge YAML anchor. """ with open(template_path, "r", encoding="utf-8") as f: template_content = f.read() - # Remove existing x-shared-env: &shared-api-worker-env lines - template_content = re.sub( - r"^x-shared-env: &shared-api-worker-env\s*\n?", - "", - template_content, - flags=re.MULTILINE, - ) - - # Prepare the final content with header comments and shared env block - final_content = f"{header_comments}\n{shared_env_block}\n\n{template_content}" + # Prepare the final content with header comments + final_content = f"{header_comments}\n{template_content}" with open(output_path, "w", encoding="utf-8", newline="\n") as f: f.write(final_content) @@ -90,10 +126,10 @@ def insert_shared_env(template_path, output_path, shared_env_block, header_comme def main(): - env_example_path = ".env.example" - template_path = "docker-compose-template.yaml" - output_path = "docker-compose.yaml" - anchor_name = "shared-api-worker-env" # Can be modified as needed + base_dir = os.path.dirname(os.path.abspath(__file__)) + env_example_path = os.path.join(base_dir, ".env.example") + template_path = os.path.join(base_dir, "docker-compose-template.yaml") + output_path = os.path.join(base_dir, "docker-compose.yaml") # Define header comments to be added at the top of docker-compose.yaml header_comments = ( @@ -110,17 +146,14 @@ def main(): print(f"Error: File {path} does not exist.") sys.exit(1) - # Parse .env.example file - env_vars = parse_env_example(env_example_path) + # Create env files from categorized .env.example files + # These files are used by docker-compose's env_file directive + # This ensures .env files exist even in CI/CD environments + create_env_files_from_example(env_example_path) - if not env_vars: - print("Warning: No environment variables found in .env.example.") - - # Generate shared environment variables block - shared_env_block = generate_shared_env_block(env_vars, anchor_name) - - # Insert shared environment variables block and header comments into the template - insert_shared_env(template_path, output_path, shared_env_block, header_comments) + # Copy template to output with header comments + # The template now uses env_file references instead of a huge YAML anchor + insert_shared_env(template_path, output_path, header_comments) if __name__ == "__main__": diff --git a/docker/nginx/conf.d/default.conf.template b/docker/nginx/conf.d/default.conf.template index 94a748290f..64c720ca2b 100644 --- a/docker/nginx/conf.d/default.conf.template +++ b/docker/nginx/conf.d/default.conf.template @@ -15,7 +15,9 @@ server { } location /socket.io/ { - proxy_pass http://api:5001; + resolver 127.0.0.11 valid=30s ipv6=off; + set $socket_io_upstream ${NGINX_SOCKET_IO_UPSTREAM}; + proxy_pass http://$socket_io_upstream; include proxy.conf; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "upgrade"; diff --git a/e2e/AGENTS.md b/e2e/AGENTS.md index e56aab20a7..c05b5105be 100644 --- a/e2e/AGENTS.md +++ b/e2e/AGENTS.md @@ -31,7 +31,7 @@ pnpm -C e2e check `pnpm install` is resolved through the repository workspace and uses the shared root lockfile plus `pnpm-workspace.yaml`. -Use `pnpm check` as the default local verification step after editing E2E TypeScript, Cucumber support code, or feature glue. It runs formatting, linting, and type checks for this package. +Use `pnpm -C e2e check` as the default local verification step after editing E2E TypeScript, Cucumber support code, or feature glue. It runs formatting, linting, and type checks for this package. Common commands: @@ -68,8 +68,8 @@ flowchart TD C --> D["Cucumber loads config, steps, and support modules"] D --> E["BeforeAll bootstraps shared auth state via /install"] E --> F{"Which command is running?"} - F -->|`pnpm e2e`| G["Run config default tags: not @fresh and not @skip"] - F -->|`pnpm e2e:full*`| H["Override tags to not @skip"] + F -->|`pnpm -C e2e e2e`| G["Run config default tags: not @fresh and not @skip"] + F -->|`pnpm -C e2e e2e:full*`| H["Override tags to not @skip"] G --> I["Per-scenario BrowserContext from shared browser"] H --> I I --> J["Failure artifacts written to cucumber-report/artifacts"] @@ -99,7 +99,7 @@ Behavior depends on instance state: - uninitialized instance: completes install and stores authenticated state - initialized instance: signs in and reuses authenticated state -Because of that, the `@fresh` install scenario only runs in the `pnpm e2e:full*` flows. The default `pnpm e2e*` flows exclude `@fresh` via Cucumber config tags so they can be re-run against an already initialized instance. +Because of that, the `@fresh` install scenario only runs in the `pnpm -C e2e e2e:full*` flows. The default `pnpm -C e2e e2e*` flows exclude `@fresh` via Cucumber config tags so they can be re-run against an already initialized instance. Reset all persisted E2E state: @@ -126,7 +126,7 @@ pnpm -C e2e e2e:middleware:up Stop the full middleware stack: ```bash -pnpm e2e:middleware:down +pnpm -C e2e e2e:middleware:down ``` The middleware stack includes: @@ -141,15 +141,15 @@ The middleware stack includes: Fresh install verification: ```bash -pnpm e2e:full +pnpm -C e2e e2e:full ``` Run the Cucumber suite against an already running middleware stack: ```bash -pnpm e2e:middleware:up -pnpm e2e -pnpm e2e:middleware:down +pnpm -C e2e e2e:middleware:up +pnpm -C e2e e2e +pnpm -C e2e e2e:middleware:down ``` Artifacts and diagnostics: diff --git a/e2e/features/step-definitions/apps/share-app.steps.ts b/e2e/features/step-definitions/apps/share-app.steps.ts index d5742bdaa8..3ec038b065 100644 --- a/e2e/features/step-definitions/apps/share-app.steps.ts +++ b/e2e/features/step-definitions/apps/share-app.steps.ts @@ -40,7 +40,7 @@ Then('the shared app page should be accessible', async function (this: DifyWorld When('I run the shared workflow app', async function (this: DifyWorld) { const page = this.getPage() - const runButton = page.getByTestId('run-button') + const runButton = page.getByRole('button', { name: 'Execute' }) await expect(runButton).toBeEnabled({ timeout: 15_000 }) await runButton.click() diff --git a/e2e/scripts/common.ts b/e2e/scripts/common.ts index ea6c897b2d..2964892dd0 100644 --- a/e2e/scripts/common.ts +++ b/e2e/scripts/common.ts @@ -36,7 +36,7 @@ export const webDir = path.join(rootDir, 'web') export const middlewareComposeFile = path.join(dockerDir, 'docker-compose.middleware.yaml') export const middlewareEnvFile = path.join(dockerDir, 'middleware.env') -export const middlewareEnvExampleFile = path.join(dockerDir, 'middleware.env.example') +export const middlewareEnvExampleFile = path.join(dockerDir, 'envs', 'middleware.env.example') export const webEnvLocalFile = path.join(webDir, '.env.local') export const webEnvExampleFile = path.join(webDir, '.env.example') export const apiEnvExampleFile = path.join(apiDir, 'tests', 'integration_tests', '.env.example') diff --git a/eslint-suppressions.json b/eslint-suppressions.json index e1c8bda126..8ac0f5567b 100644 --- a/eslint-suppressions.json +++ b/eslint-suppressions.json @@ -160,33 +160,15 @@ } }, "web/app/account/(commonLayout)/account-page/email-change-modal.tsx": { - "erasable-syntax-only/enums": { - "count": 1 - }, "ts/no-explicit-any": { "count": 5 } }, - "web/app/account/(commonLayout)/account-page/index.tsx": { - "ts/no-explicit-any": { - "count": 1 - } - }, - "web/app/account/(commonLayout)/delete-account/components/feed-back.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/account/(commonLayout)/delete-account/components/verify-email.tsx": { "react/set-state-in-effect": { "count": 1 } }, - "web/app/account/(commonLayout)/delete-account/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/account/oauth/authorize/layout.tsx": { "ts/no-explicit-any": { "count": 1 @@ -202,26 +184,11 @@ "count": 4 } }, - "web/app/components/app-sidebar/basic.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/app-sidebar/dataset-info/dropdown.tsx": { - "ts/no-explicit-any": { - "count": 1 - } - }, "web/app/components/app-sidebar/index.tsx": { "ts/no-explicit-any": { "count": 1 } }, - "web/app/components/app-sidebar/toggle-button.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/app/annotation/add-annotation-modal/edit-item/index.tsx": { "erasable-syntax-only/enums": { "count": 1 @@ -234,9 +201,6 @@ "erasable-syntax-only/enums": { "count": 1 }, - "no-restricted-imports": { - "count": 1 - }, "react-refresh/only-export-components": { "count": 1 }, @@ -287,7 +251,7 @@ "count": 1 } }, - "web/app/components/app/app-access-control/specific-groups-or-members.tsx": { + "web/app/components/app/app-access-control/add-member-or-group-pop.tsx": { "no-restricted-imports": { "count": 1 } @@ -297,16 +261,6 @@ "count": 4 } }, - "web/app/components/app/app-publisher/index.tsx": { - "ts/no-explicit-any": { - "count": 5 - } - }, - "web/app/components/app/app-publisher/version-info-modal.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/app/configuration/base/var-highlight/index.tsx": { "react-refresh/only-export-components": { "count": 1 @@ -318,9 +272,6 @@ } }, "web/app/components/app/configuration/config-prompt/conversation-history/edit-modal.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 1 } @@ -336,41 +287,15 @@ } }, "web/app/components/app/configuration/config-var/config-modal/index.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 4 } }, - "web/app/components/app/configuration/config-var/config-modal/type-select.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/app/configuration/config-var/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/app/configuration/config-var/select-var-type.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 1 } }, - "web/app/components/app/configuration/config-vision/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/app/configuration/config-vision/param-config-content.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/app/configuration/config/agent/agent-setting/index.tsx": { "react/set-state-in-effect": { "count": 1 @@ -379,15 +304,7 @@ "count": 1 } }, - "web/app/components/app/configuration/config/agent/agent-setting/item-panel.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/app/configuration/config/agent/agent-tools/index.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 9 } @@ -404,17 +321,11 @@ } }, "web/app/components/app/configuration/config/assistant-type-picker/index.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 1 } }, "web/app/components/app/configuration/config/automatic/get-automatic-res.tsx": { - "no-restricted-imports": { - "count": 1 - }, "react/set-state-in-effect": { "count": 4 }, @@ -442,15 +353,7 @@ "count": 1 } }, - "web/app/components/app/configuration/config/automatic/version-selector.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/app/configuration/config/code-generator/get-code-generator-res.tsx": { - "no-restricted-imports": { - "count": 1 - }, "react/set-state-in-effect": { "count": 4 }, @@ -458,21 +361,6 @@ "count": 2 } }, - "web/app/components/app/configuration/config/config-audio.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/app/configuration/config/config-document.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/app/configuration/dataset-config/context-var/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/app/configuration/dataset-config/index.tsx": { "ts/no-explicit-any": { "count": 1 @@ -483,24 +371,11 @@ "count": 1 } }, - "web/app/components/app/configuration/dataset-config/params-config/config-content.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/app/configuration/dataset-config/params-config/index.tsx": { - "no-restricted-imports": { - "count": 1 - }, "react/set-state-in-effect": { "count": 1 } }, - "web/app/components/app/configuration/dataset-config/select-dataset/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/app/configuration/dataset-config/settings-modal/index.tsx": { "react/set-state-in-effect": { "count": 2 @@ -529,11 +404,6 @@ "count": 2 } }, - "web/app/components/app/configuration/debug/debug-with-multiple-model/model-parameter-trigger.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/app/configuration/debug/debug-with-multiple-model/text-generation-item.tsx": { "ts/no-explicit-any": { "count": 8 @@ -580,26 +450,10 @@ "count": 1 } }, - "web/app/components/app/create-app-modal/index.tsx": { - "react/set-state-in-effect": { - "count": 1 - }, - "ts/no-explicit-any": { - "count": 1 - } - }, - "web/app/components/app/create-from-dsl-modal/dsl-confirm-modal.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/app/create-from-dsl-modal/index.tsx": { "erasable-syntax-only/enums": { "count": 1 }, - "no-restricted-imports": { - "count": 1 - }, "react-refresh/only-export-components": { "count": 1 }, @@ -607,11 +461,6 @@ "count": 2 } }, - "web/app/components/app/duplicate-modal/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/app/log/filter.tsx": { "react-refresh/only-export-components": { "count": 1 @@ -623,9 +472,6 @@ } }, "web/app/components/app/log/list.tsx": { - "no-restricted-imports": { - "count": 1 - }, "react/set-state-in-effect": { "count": 6 }, @@ -676,9 +522,6 @@ } }, "web/app/components/app/switch-app-modal/index.tsx": { - "no-restricted-imports": { - "count": 1 - }, "react/set-state-in-effect": { "count": 1 } @@ -693,11 +536,6 @@ "count": 2 } }, - "web/app/components/app/workflow-log/detail.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/app/workflow-log/filter.tsx": { "react-refresh/only-export-components": { "count": 1 @@ -713,25 +551,6 @@ "count": 1 } }, - "web/app/components/apps/app-card.tsx": { - "no-restricted-imports": { - "count": 1 - }, - "react/set-state-in-effect": { - "count": 1 - }, - "ts/no-explicit-any": { - "count": 2 - } - }, - "web/app/components/apps/list.tsx": { - "react-hooks/exhaustive-deps": { - "count": 1 - }, - "react/unsupported-syntax": { - "count": 2 - } - }, "web/app/components/apps/new-app-card.tsx": { "react-hooks-extra/no-direct-set-state-in-use-effect": { "count": 1 @@ -799,11 +618,6 @@ "count": 3 } }, - "web/app/components/base/audio-btn/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/base/audio-gallery/AudioPlayer.tsx": { "ts/no-explicit-any": { "count": 2 @@ -879,9 +693,6 @@ } }, "web/app/components/base/chat/chat-with-history/header/index.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 2 } @@ -909,11 +720,6 @@ "count": 1 } }, - "web/app/components/base/chat/chat-with-history/sidebar/rename-modal.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/base/chat/chat/answer/agent-content.tsx": { "style/multiline-ternary": { "count": 2 @@ -935,11 +741,6 @@ "count": 1 } }, - "web/app/components/base/chat/chat/answer/operation.tsx": { - "no-restricted-imports": { - "count": 2 - } - }, "web/app/components/base/chat/chat/answer/workflow-process.tsx": { "react/set-state-in-effect": { "count": 1 @@ -999,11 +800,6 @@ "count": 7 } }, - "web/app/components/base/chat/embedded-chatbot/header/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/base/chat/embedded-chatbot/hooks.tsx": { "react-hooks-extra/no-direct-set-state-in-use-effect": { "count": 3 @@ -1040,16 +836,6 @@ "count": 3 } }, - "web/app/components/base/content-dialog/index.stories.tsx": { - "react/set-state-in-effect": { - "count": 1 - } - }, - "web/app/components/base/copy-feedback/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/base/date-and-time-picker/hooks.ts": { "react/no-unnecessary-use-prefix": { "count": 2 @@ -1101,21 +887,6 @@ "count": 1 } }, - "web/app/components/base/features/new-feature-panel/annotation-reply/annotation-ctrl-button.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/base/features/new-feature-panel/annotation-reply/config-param-modal.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/base/features/new-feature-panel/annotation-reply/config-param.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/base/features/new-feature-panel/annotation-reply/index.tsx": { "ts/no-explicit-any": { "count": 3 @@ -1131,15 +902,7 @@ "count": 2 } }, - "web/app/components/base/features/new-feature-panel/feature-bar.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/base/features/new-feature-panel/feature-card.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 5 } @@ -1150,9 +913,6 @@ } }, "web/app/components/base/features/new-feature-panel/moderation/moderation-setting-modal.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 2 } @@ -1176,9 +936,6 @@ } }, "web/app/components/base/file-uploader/file-list-in-log.tsx": { - "no-restricted-imports": { - "count": 1 - }, "react/no-missing-key": { "count": 1 } @@ -1201,11 +958,6 @@ "count": 2 } }, - "web/app/components/base/file-uploader/pdf-preview.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/base/file-uploader/store.tsx": { "react-refresh/only-export-components": { "count": 4 @@ -1221,14 +973,6 @@ "count": 3 } }, - "web/app/components/base/form/components/base/base-field.tsx": { - "no-restricted-imports": { - "count": 2 - }, - "ts/no-explicit-any": { - "count": 3 - } - }, "web/app/components/base/form/components/base/base-form.tsx": { "ts/no-explicit-any": { "count": 6 @@ -1244,14 +988,6 @@ "count": 1 } }, - "web/app/components/base/form/components/field/variable-or-constant-input.tsx": { - "no-console": { - "count": 2 - }, - "ts/no-explicit-any": { - "count": 2 - } - }, "web/app/components/base/form/components/field/variable-selector.tsx": { "no-console": { "count": 1 @@ -1433,7 +1169,7 @@ }, "web/app/components/base/icons/src/vender/line/development/index.ts": { "no-barrel-files/no-barrel-files": { - "count": 2 + "count": 1 } }, "web/app/components/base/icons/src/vender/line/editor/index.ts": { @@ -1601,15 +1337,7 @@ "count": 1 } }, - "web/app/components/base/image-uploader/image-list.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/base/image-uploader/image-preview.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 1 } @@ -1665,11 +1393,6 @@ "count": 9 } }, - "web/app/components/base/markdown-blocks/form.tsx": { - "erasable-syntax-only/enums": { - "count": 3 - } - }, "web/app/components/base/markdown-blocks/index.ts": { "no-barrel-files/no-barrel-files": { "count": 10 @@ -1753,28 +1476,7 @@ "count": 1 } }, - "web/app/components/base/modal-like-wrap/index.stories.tsx": { - "no-console": { - "count": 3 - } - }, - "web/app/components/base/modal/index.stories.tsx": { - "react/set-state-in-effect": { - "count": 1 - } - }, - "web/app/components/base/modal/modal.stories.tsx": { - "no-console": { - "count": 4 - }, - "react/set-state-in-effect": { - "count": 1 - } - }, "web/app/components/base/new-audio-button/index.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 1 } @@ -1807,11 +1509,6 @@ "count": 1 } }, - "web/app/components/base/param-item/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/base/prompt-editor/index.stories.tsx": { "no-console": { "count": 1 @@ -1822,11 +1519,11 @@ }, "web/app/components/base/prompt-editor/index.tsx": { "ts/no-explicit-any": { - "count": 4 + "count": 3 } }, "web/app/components/base/prompt-editor/plugins/component-picker-block/index.tsx": { - "ts/no-explicit-any": { + "no-restricted-imports": { "count": 1 } }, @@ -1915,8 +1612,8 @@ } }, "web/app/components/base/prompt-editor/plugins/shortcuts-popup-plugin/index.tsx": { - "ts/no-explicit-any": { - "count": 2 + "no-restricted-imports": { + "count": 1 } }, "web/app/components/base/prompt-editor/plugins/update-block.tsx": { @@ -1957,11 +1654,6 @@ "count": 1 } }, - "web/app/components/base/qrcode/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/base/radio-card/index.stories.tsx": { "ts/no-explicit-any": { "count": 1 @@ -1990,25 +1682,6 @@ "count": 1 } }, - "web/app/components/base/select/index.stories.tsx": { - "no-console": { - "count": 4 - }, - "ts/no-explicit-any": { - "count": 1 - } - }, - "web/app/components/base/select/index.tsx": { - "react/set-state-in-effect": { - "count": 2 - }, - "style/multiline-ternary": { - "count": 2 - }, - "ts/no-explicit-any": { - "count": 1 - } - }, "web/app/components/base/sort/index.tsx": { "ts/no-explicit-any": { "count": 2 @@ -2032,26 +1705,6 @@ "count": 1 } }, - "web/app/components/base/tag-management/__tests__/panel.spec.tsx": { - "ts/no-explicit-any": { - "count": 2 - } - }, - "web/app/components/base/tag-management/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/base/tag-management/tag-item-editor.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/base/tag-management/tag-remove-modal.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/base/text-generation/hooks.ts": { "ts/no-explicit-any": { "count": 1 @@ -2060,6 +1713,7 @@ "web/app/components/base/text-generation/types.ts": { "no-barrel-files/no-barrel-files": { "count": 1 + "count": 1 } }, "web/app/components/base/textarea/index.stories.tsx": { @@ -2114,31 +1768,16 @@ "count": 4 } }, - "web/app/components/billing/annotation-full/modal.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/billing/billing-page/__tests__/index.spec.tsx": { "ts/no-explicit-any": { "count": 4 } }, - "web/app/components/billing/plan-upgrade-modal/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/billing/plan/assets/index.tsx": { "no-barrel-files/no-barrel-files": { "count": 4 } }, - "web/app/components/billing/plan/index.tsx": { - "ts/no-explicit-any": { - "count": 2 - } - }, "web/app/components/billing/pricing/assets/index.tsx": { "no-barrel-files/no-barrel-files": { "count": 12 @@ -2157,21 +1796,11 @@ "count": 1 } }, - "web/app/components/billing/priority-label/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/billing/type.ts": { "erasable-syntax-only/enums": { "count": 4 } }, - "web/app/components/billing/upgrade-btn/index.tsx": { - "ts/no-explicit-any": { - "count": 3 - } - }, "web/app/components/datasets/common/image-previewer/index.tsx": { "no-irregular-whitespace": { "count": 1 @@ -2187,11 +1816,6 @@ "count": 3 } }, - "web/app/components/datasets/common/image-uploader/image-uploader-in-retrieval-testing/image-input.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/common/image-uploader/store.tsx": { "react-refresh/only-export-components": { "count": 3 @@ -2202,16 +1826,6 @@ "count": 1 } }, - "web/app/components/datasets/common/retrieval-param-config/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/datasets/create-from-pipeline/create-options/create-from-dsl-modal/dsl-confirm-modal.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/create-from-pipeline/create-options/create-from-dsl-modal/hooks/use-dsl-import.ts": { "erasable-syntax-only/enums": { "count": 1 @@ -2220,14 +1834,6 @@ "web/app/components/datasets/create-from-pipeline/create-options/create-from-dsl-modal/index.tsx": { "no-barrel-files/no-barrel-files": { "count": 1 - }, - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/datasets/create-from-pipeline/list/template-card/details/index.tsx": { - "no-restricted-imports": { - "count": 1 } }, "web/app/components/datasets/create-from-pipeline/list/template-card/details/types.ts": { @@ -2235,21 +1841,6 @@ "count": 1 } }, - "web/app/components/datasets/create-from-pipeline/list/template-card/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/datasets/create/embedding-process/indexing-progress-item.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/datasets/create/empty-dataset-creation-modal/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/create/file-preview/index.tsx": { "react/set-state-in-effect": { "count": 1 @@ -2270,26 +1861,11 @@ "count": 1 } }, - "web/app/components/datasets/create/step-two/components/general-chunking-options.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/create/step-two/components/index.ts": { "no-barrel-files/no-barrel-files": { "count": 5 } }, - "web/app/components/datasets/create/step-two/components/indexing-mode-section.tsx": { - "no-restricted-imports": { - "count": 2 - } - }, - "web/app/components/datasets/create/step-two/components/inputs.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/create/step-two/hooks/index.ts": { "no-barrel-files/no-barrel-files": { "count": 6 @@ -2319,21 +1895,6 @@ "count": 1 } }, - "web/app/components/datasets/create/stop-embedding-modal/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/datasets/create/website/base/checkbox-with-label.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/datasets/create/website/base/field.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/create/website/firecrawl/index.tsx": { "no-console": { "count": 1 @@ -2382,11 +1943,6 @@ "count": 1 } }, - "web/app/components/datasets/documents/components/document-list/components/document-table-row.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/documents/components/document-list/components/index.ts": { "no-barrel-files/no-barrel-files": { "count": 2 @@ -2447,11 +2003,6 @@ "count": 4 } }, - "web/app/components/datasets/documents/create-from-pipeline/data-source/website-crawl/base/checkbox-with-label.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/documents/create-from-pipeline/data-source/website-crawl/base/options/index.tsx": { "ts/no-explicit-any": { "count": 1 @@ -2477,11 +2028,6 @@ "count": 2 } }, - "web/app/components/datasets/documents/create-from-pipeline/processing/embedding-process/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/documents/create-from-pipeline/steps/index.ts": { "no-barrel-files/no-barrel-files": { "count": 3 @@ -2492,14 +2038,6 @@ "count": 1 } }, - "web/app/components/datasets/documents/detail/batch-modal/index.tsx": { - "no-restricted-imports": { - "count": 1 - }, - "react/set-state-in-effect": { - "count": 1 - } - }, "web/app/components/datasets/documents/detail/completed/common/chunk-content.tsx": { "react/set-state-in-effect": { "count": 1 @@ -2520,11 +2058,6 @@ "count": 1 } }, - "web/app/components/datasets/documents/detail/completed/display-toggle.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/documents/detail/completed/hooks/index.ts": { "no-barrel-files/no-barrel-files": { "count": 5 @@ -2584,14 +2117,6 @@ "count": 1 } }, - "web/app/components/datasets/documents/detail/segment-add/index.tsx": { - "erasable-syntax-only/enums": { - "count": 1 - }, - "react-refresh/only-export-components": { - "count": 1 - } - }, "web/app/components/datasets/documents/detail/settings/pipeline-settings/index.tsx": { "ts/no-explicit-any": { "count": 6 @@ -2602,19 +2127,6 @@ "count": 3 } }, - "web/app/components/datasets/documents/status-item/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/datasets/external-api/external-api-modal/index.tsx": { - "no-restricted-imports": { - "count": 2 - }, - "react/set-state-in-effect": { - "count": 1 - } - }, "web/app/components/datasets/external-knowledge-base/create/ExternalApiSelect.tsx": { "react/set-state-in-effect": { "count": 1 @@ -2625,7 +2137,12 @@ "count": 1 } }, - "web/app/components/datasets/extra-info/statistics.tsx": { + "web/app/components/datasets/formatted-text/flavours/edit-slice.tsx": { + "no-restricted-imports": { + "count": 2 + } + }, + "web/app/components/datasets/formatted-text/flavours/preview-slice.tsx": { "no-restricted-imports": { "count": 1 } @@ -2635,21 +2152,6 @@ "count": 1 } }, - "web/app/components/datasets/hit-testing/components/chunk-detail-modal.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/datasets/hit-testing/components/query-input/textarea.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/datasets/hit-testing/components/result-item-external.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/hit-testing/components/score.tsx": { "unicorn/prefer-number-properties": { "count": 1 @@ -2660,31 +2162,11 @@ "count": 1 } }, - "web/app/components/datasets/list/dataset-card/components/dataset-card-footer.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/datasets/list/dataset-card/hooks/use-dataset-card-state.ts": { - "react/set-state-in-effect": { - "count": 1 - } - }, - "web/app/components/datasets/metadata/edit-metadata-batch/edited-beacon.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/metadata/edit-metadata-batch/input-combined.tsx": { "ts/no-explicit-any": { "count": 2 } }, - "web/app/components/datasets/metadata/edit-metadata-batch/modal.tsx": { - "no-restricted-imports": { - "count": 2 - } - }, "web/app/components/datasets/metadata/hooks/use-edit-dataset-metadata.ts": { "react/set-state-in-effect": { "count": 1 @@ -2698,67 +2180,19 @@ "count": 2 } }, - "web/app/components/datasets/metadata/metadata-dataset/create-content.tsx": { - "ts/no-explicit-any": { - "count": 1 - } - }, - "web/app/components/datasets/metadata/metadata-dataset/create-metadata-modal.tsx": { - "ts/no-explicit-any": { - "count": 1 - } - }, - "web/app/components/datasets/metadata/metadata-dataset/dataset-metadata-drawer.tsx": { - "no-restricted-imports": { - "count": 2 - } - }, - "web/app/components/datasets/metadata/metadata-dataset/select-metadata-modal.tsx": { - "erasable-syntax-only/enums": { - "count": 1 - } - }, - "web/app/components/datasets/metadata/metadata-document/info-group.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/metadata/types.ts": { "erasable-syntax-only/enums": { "count": 2 } }, - "web/app/components/datasets/rename-modal/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/settings/chunk-structure/types.ts": { "erasable-syntax-only/enums": { "count": 1 } }, - "web/app/components/datasets/settings/index-method/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/datasets/settings/index-method/keyword-number.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/datasets/settings/summary-index-setting.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/develop/code.tsx": { - "ts/no-empty-object-type": { - "count": 1 - }, "ts/no-explicit-any": { - "count": 9 + "count": 7 } }, "web/app/components/develop/md.tsx": { @@ -2795,32 +2229,11 @@ "count": 2 } }, - "web/app/components/explore/create-app-modal/index.tsx": { - "no-restricted-imports": { - "count": 1 - }, - "ts/no-explicit-any": { - "count": 1 - }, - "unicorn/prefer-number-properties": { - "count": 1 - } - }, "web/app/components/explore/item-operation/index.tsx": { "react/set-state-in-effect": { "count": 1 } }, - "web/app/components/explore/try-app/app/chat.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/explore/try-app/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/explore/try-app/tab.tsx": { "erasable-syntax-only/enums": { "count": 1 @@ -2900,16 +2313,6 @@ "count": 1 } }, - "web/app/components/header/account-about/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/header/account-setting/api-based-extension-page/modal.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/header/account-setting/data-source-page-new/card.tsx": { "ts/no-explicit-any": { "count": 2 @@ -2941,9 +2344,6 @@ } }, "web/app/components/header/account-setting/key-validator/declarations.ts": { - "erasable-syntax-only/enums": { - "count": 1 - }, "ts/no-explicit-any": { "count": 1 } @@ -2957,11 +2357,8 @@ "erasable-syntax-only/enums": { "count": 1 }, - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { - "count": 3 + "count": 2 } }, "web/app/components/header/account-setting/model-provider-page/declarations.ts": { @@ -2985,12 +2382,7 @@ "count": 4 } }, - "web/app/components/header/account-setting/model-provider-page/model-auth/config-provider.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/header/account-setting/model-provider-page/model-auth/credential-selector.tsx": { + "web/app/components/header/account-setting/model-provider-page/model-auth/authorized/index.tsx": { "no-restricted-imports": { "count": 1 } @@ -3021,9 +2413,6 @@ } }, "web/app/components/header/account-setting/model-provider-page/model-auth/switch-credential-in-load-balancing.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 3 } @@ -3057,22 +2446,11 @@ } }, "web/app/components/header/account-setting/model-provider-page/model-parameter-modal/status-indicators.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 2 } }, - "web/app/components/header/account-setting/model-provider-page/model-selector/feature-icon.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/header/account-setting/model-provider-page/provider-added-card/cooldown-timer.tsx": { - "no-restricted-imports": { - "count": 1 - }, "react/set-state-in-effect": { "count": 2 } @@ -3082,23 +2460,12 @@ "count": 2 } }, - "web/app/components/header/account-setting/model-provider-page/provider-added-card/model-list-item.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/header/account-setting/model-provider-page/provider-added-card/model-load-balancing-configs.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 5 } }, "web/app/components/header/account-setting/model-provider-page/provider-added-card/model-load-balancing-modal.tsx": { - "no-restricted-imports": { - "count": 1 - }, "react/set-state-in-effect": { "count": 1 }, @@ -3106,11 +2473,6 @@ "count": 3 } }, - "web/app/components/header/account-setting/model-provider-page/provider-added-card/priority-use-tip.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/header/account-setting/model-provider-page/utils.ts": { "no-barrel-files/no-barrel-files": { "count": 2 @@ -3121,14 +2483,6 @@ "count": 4 } }, - "web/app/components/header/app-nav/index.tsx": { - "react/set-state-in-effect": { - "count": 2 - }, - "ts/no-explicit-any": { - "count": 1 - } - }, "web/app/components/header/header-wrapper.tsx": { "ts/no-explicit-any": { "count": 1 @@ -3153,9 +2507,6 @@ "erasable-syntax-only/enums": { "count": 1 }, - "no-restricted-imports": { - "count": 1 - }, "react-refresh/only-export-components": { "count": 1 } @@ -3183,27 +2534,12 @@ "count": 1 } }, - "web/app/components/plugins/install-plugin/install-from-local-package/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/plugins/install-plugin/install-from-local-package/steps/uploading.tsx": { - "ts/no-explicit-any": { - "count": 2 - } - }, - "web/app/components/plugins/install-plugin/install-from-marketplace/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/plugins/marketplace/hooks.ts": { "@tanstack/query/exhaustive-deps": { "count": 1 } }, - "web/app/components/plugins/plugin-auth/authorize/add-oauth-button.tsx": { + "web/app/components/plugins/plugin-auth/authorized-in-node.tsx": { "ts/no-explicit-any": { "count": 2 } @@ -3213,23 +2549,7 @@ "count": 1 } }, - "web/app/components/plugins/plugin-auth/authorize/oauth-client-settings.tsx": { - "no-restricted-imports": { - "count": 1 - }, - "ts/no-explicit-any": { - "count": 2 - } - }, - "web/app/components/plugins/plugin-auth/authorized-in-node.tsx": { - "ts/no-explicit-any": { - "count": 1 - } - }, "web/app/components/plugins/plugin-auth/authorized/item.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 1 } @@ -3299,9 +2619,6 @@ } }, "web/app/components/plugins/plugin-detail-panel/endpoint-list.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 2 } @@ -3326,14 +2643,6 @@ "count": 1 } }, - "web/app/components/plugins/plugin-detail-panel/multiple-tool-selector/index.tsx": { - "no-restricted-imports": { - "count": 1 - }, - "ts/no-explicit-any": { - "count": 1 - } - }, "web/app/components/plugins/plugin-detail-panel/strategy-detail.tsx": { "ts/no-explicit-any": { "count": 2 @@ -3344,11 +2653,6 @@ "count": 2 } }, - "web/app/components/plugins/plugin-detail-panel/subscription-list/create/common-modal.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/plugins/plugin-detail-panel/subscription-list/create/hooks/use-common-modal-state.ts": { "erasable-syntax-only/enums": { "count": 1 @@ -3377,34 +2681,11 @@ "count": 1 } }, - "web/app/components/plugins/plugin-detail-panel/subscription-list/edit/apikey-edit-modal.tsx": { - "erasable-syntax-only/enums": { - "count": 1 - }, - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/plugins/plugin-detail-panel/subscription-list/edit/manual-edit-modal.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/plugins/plugin-detail-panel/subscription-list/edit/oauth-edit-modal.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/plugins/plugin-detail-panel/subscription-list/index.tsx": { "no-barrel-files/no-barrel-files": { "count": 2 } }, - "web/app/components/plugins/plugin-detail-panel/subscription-list/list-view.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/plugins/plugin-detail-panel/subscription-list/log-viewer.tsx": { "erasable-syntax-only/enums": { "count": 1 @@ -3413,16 +2694,6 @@ "count": 2 } }, - "web/app/components/plugins/plugin-detail-panel/subscription-list/selector-view.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/plugins/plugin-detail-panel/subscription-list/subscription-card.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/plugins/plugin-detail-panel/subscription-list/types.ts": { "erasable-syntax-only/enums": { "count": 1 @@ -3438,39 +2709,26 @@ "count": 1 } }, - "web/app/components/plugins/plugin-detail-panel/tool-selector/components/tool-item.tsx": { - "no-restricted-imports": { - "count": 2 - } - }, "web/app/components/plugins/plugin-detail-panel/tool-selector/hooks/index.ts": { "no-barrel-files/no-barrel-files": { "count": 2 } }, + "web/app/components/plugins/plugin-detail-panel/tool-selector/index.tsx": { + "no-restricted-imports": { + "count": 1 + } + }, "web/app/components/plugins/plugin-detail-panel/trigger/event-detail-drawer.tsx": { "ts/no-explicit-any": { "count": 5 } }, - "web/app/components/plugins/plugin-item/action.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/plugins/plugin-item/index.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 1 } }, - "web/app/components/plugins/plugin-mutation-model/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/plugins/plugin-page/context.ts": { "ts/no-explicit-any": { "count": 1 @@ -3481,11 +2739,6 @@ "count": 2 } }, - "web/app/components/plugins/plugin-page/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/plugins/plugin-page/install-plugin-dropdown.tsx": { "react/set-state-in-effect": { "count": 2 @@ -3511,11 +2764,6 @@ "count": 2 } }, - "web/app/components/plugins/reference-setting-modal/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/plugins/types.ts": { "erasable-syntax-only/enums": { "count": 7 @@ -3568,16 +2816,6 @@ "count": 1 } }, - "web/app/components/rag-pipeline/components/panel/input-field/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/rag-pipeline/components/panel/input-field/label-right-content/global-inputs.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/rag-pipeline/components/panel/test-run/preparation/document-processing/index.tsx": { "ts/no-explicit-any": { "count": 1 @@ -3608,11 +2846,6 @@ "count": 4 } }, - "web/app/components/rag-pipeline/components/publish-as-knowledge-pipeline-modal.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/rag-pipeline/components/rag-pipeline-children.tsx": { "ts/no-explicit-any": { "count": 1 @@ -3628,16 +2861,6 @@ "count": 2 } }, - "web/app/components/rag-pipeline/components/update-dsl-modal.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/rag-pipeline/components/version-mismatch-modal.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/rag-pipeline/hooks/index.ts": { "no-barrel-files/no-barrel-files": { "count": 9 @@ -3693,11 +2916,6 @@ "count": 1 } }, - "web/app/components/share/text-generation/info-modal.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/share/text-generation/menu-dropdown.tsx": { "react/set-state-in-effect": { "count": 1 @@ -3736,16 +2954,6 @@ "count": 2 } }, - "web/app/components/tools/edit-custom-collection-modal/config-credentials.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/tools/edit-custom-collection-modal/get-schema.tsx": { - "ts/no-explicit-any": { - "count": 1 - } - }, "web/app/components/tools/edit-custom-collection-modal/index.tsx": { "react/set-state-in-effect": { "count": 4 @@ -3782,16 +2990,6 @@ "count": 1 } }, - "web/app/components/tools/mcp/mcp-service-card.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/tools/mcp/modal.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/tools/mcp/provider-card.tsx": { "ts/no-explicit-any": { "count": 3 @@ -3820,21 +3018,6 @@ "count": 4 } }, - "web/app/components/tools/workflow-tool/confirm-modal/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/tools/workflow-tool/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/tools/workflow-tool/method-selector.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow-app/components/workflow-children.tsx": { "ts/no-explicit-any": { "count": 3 @@ -3916,6 +3099,11 @@ "count": 1 } }, + "web/app/components/workflow/block-selector/main.tsx": { + "no-restricted-imports": { + "count": 1 + } + }, "web/app/components/workflow/block-selector/market-place-plugin/action.tsx": { "react/set-state-in-effect": { "count": 1 @@ -3931,7 +3119,7 @@ "count": 1 } }, - "web/app/components/workflow/block-selector/tabs.tsx": { + "web/app/components/workflow/block-selector/tool-picker.tsx": { "no-restricted-imports": { "count": 1 } @@ -3997,11 +3185,6 @@ "count": 1 } }, - "web/app/components/workflow/dsl-export-confirm-modal.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/header/run-mode.tsx": { "no-console": { "count": 1 @@ -4018,11 +3201,6 @@ "count": 1 } }, - "web/app/components/workflow/header/version-history-button.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/hooks-store/index.ts": { "no-barrel-files/no-barrel-files": { "count": 2 @@ -4045,7 +3223,7 @@ }, "web/app/components/workflow/hooks/index.ts": { "no-barrel-files/no-barrel-files": { - "count": 27 + "count": 25 } }, "web/app/components/workflow/hooks/use-checklist.ts": { @@ -4152,11 +3330,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/_base/components/config-vision.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/_base/components/editor/code-editor/editor-support-vars.tsx": { "react/set-state-in-effect": { "count": 1 @@ -4205,9 +3378,6 @@ } }, "web/app/components/workflow/nodes/_base/components/input-support-select-var.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 1 } @@ -4230,11 +3400,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/_base/components/mcp-tool-not-support-tooltip.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/_base/components/memory-config.tsx": { "unicorn/prefer-number-properties": { "count": 1 @@ -4256,9 +3421,6 @@ } }, "web/app/components/workflow/nodes/_base/components/prompt/editor.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 4 } @@ -4274,9 +3436,6 @@ } }, "web/app/components/workflow/nodes/_base/components/setting-item.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 1 } @@ -4291,11 +3450,6 @@ "count": 8 } }, - "web/app/components/workflow/nodes/_base/components/variable/object-child-tree-panel/picker/field.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/_base/components/variable/output-var-list.tsx": { "ts/no-non-null-asserted-optional-chain": { "count": 1 @@ -4311,11 +3465,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/_base/components/variable/var-type-picker.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/_base/components/variable/variable-label/hooks.ts": { "react/no-unnecessary-use-prefix": { "count": 2 @@ -4327,9 +3476,6 @@ } }, "web/app/components/workflow/nodes/_base/components/workflow-panel/index.tsx": { - "no-restricted-imports": { - "count": 1 - }, "react/set-state-in-effect": { "count": 3 }, @@ -4561,16 +3707,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/http/components/authorization/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/workflow/nodes/http/components/curl-panel.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/http/components/key-value/key-value-edit/index.tsx": { "ts/no-explicit-any": { "count": 2 @@ -4604,42 +3740,6 @@ "count": 5 } }, - "web/app/components/workflow/nodes/human-input/components/delivery-method/email-configure-modal.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/workflow/nodes/human-input/components/delivery-method/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/workflow/nodes/human-input/components/delivery-method/method-item.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/workflow/nodes/human-input/components/delivery-method/method-selector.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/workflow/nodes/human-input/components/delivery-method/test-email-sender.tsx": { - "no-restricted-imports": { - "count": 1 - }, - "ts/no-explicit-any": { - "count": 2 - }, - "ts/no-non-null-asserted-optional-chain": { - "count": 1 - } - }, - "web/app/components/workflow/nodes/human-input/components/delivery-method/upgrade-modal.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/human-input/components/form-content-preview.tsx": { "react/unsupported-syntax": { "count": 1 @@ -4664,11 +3764,6 @@ "count": 2 } }, - "web/app/components/workflow/nodes/human-input/panel.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/human-input/types.ts": { "erasable-syntax-only/enums": { "count": 2 @@ -4704,11 +3799,6 @@ "count": 5 } }, - "web/app/components/workflow/nodes/iteration-start/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/iteration/default.ts": { "ts/no-explicit-any": { "count": 1 @@ -4724,16 +3814,6 @@ "count": 6 } }, - "web/app/components/workflow/nodes/knowledge-base/components/chunk-structure/selector.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/workflow/nodes/knowledge-base/components/index-method.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/knowledge-base/components/retrieval-setting/hooks.tsx": { "ts/no-explicit-any": { "count": 4 @@ -4772,26 +3852,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/knowledge-retrieval/components/metadata/condition-list/condition-operator.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/workflow/nodes/knowledge-retrieval/components/metadata/condition-list/condition-value-method.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/workflow/nodes/knowledge-retrieval/components/metadata/metadata-filter/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/workflow/nodes/knowledge-retrieval/components/metadata/metadata-filter/metadata-filter-selector.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/knowledge-retrieval/default.ts": { "ts/no-explicit-any": { "count": 1 @@ -4830,14 +3890,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/llm/components/config-prompt-item.tsx": { - "no-restricted-imports": { - "count": 1 - }, - "ts/no-explicit-any": { - "count": 3 - } - }, "web/app/components/workflow/nodes/llm/components/config-prompt.tsx": { "react/unsupported-syntax": { "count": 1 @@ -4871,11 +3923,6 @@ "count": 2 } }, - "web/app/components/workflow/nodes/llm/components/json-schema-config-modal/json-schema-generator/prompt-editor.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/llm/components/json-schema-config-modal/visual-editor/context.tsx": { "react-refresh/only-export-components": { "count": 2 @@ -4886,11 +3933,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/llm/components/json-schema-config-modal/visual-editor/edit-card/type-selector.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/llm/components/json-schema-config-modal/visual-editor/hooks.ts": { "ts/no-explicit-any": { "count": 1 @@ -4932,11 +3974,6 @@ "count": 7 } }, - "web/app/components/workflow/nodes/loop-start/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/loop/components/condition-list/condition-input.tsx": { "ts/no-explicit-any": { "count": 1 @@ -5008,11 +4045,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/parameter-extractor/panel.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/parameter-extractor/types.ts": { "erasable-syntax-only/enums": { "count": 2 @@ -5031,40 +4063,9 @@ "count": 9 } }, - "web/app/components/workflow/nodes/question-classifier/components/advanced-setting.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/workflow/nodes/question-classifier/components/class-item.tsx": { - "react/set-state-in-effect": { - "count": 1 - } - }, "web/app/components/workflow/nodes/question-classifier/components/class-list.tsx": { "react/set-state-in-effect": { "count": 1 - }, - "react/unsupported-syntax": { - "count": 2 - } - }, - "web/app/components/workflow/nodes/question-classifier/default.ts": { - "ts/no-explicit-any": { - "count": 1 - } - }, - "web/app/components/workflow/nodes/question-classifier/node.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/workflow/nodes/question-classifier/use-config.ts": { - "react/set-state-in-effect": { - "count": 2 - }, - "ts/no-explicit-any": { - "count": 2 } }, "web/app/components/workflow/nodes/question-classifier/use-single-run-form-params.ts": { @@ -5123,9 +4124,6 @@ } }, "web/app/components/workflow/nodes/tool/components/tool-form/item.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 1 } @@ -5169,9 +4167,6 @@ } }, "web/app/components/workflow/nodes/trigger-plugin/components/trigger-form/item.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 1 } @@ -5263,7 +4258,7 @@ } }, "web/app/components/workflow/note-node/note-editor/plugins/link-editor-plugin/component.tsx": { - "react/set-state-in-effect": { + "no-restricted-imports": { "count": 1 } }, @@ -5273,6 +4268,9 @@ } }, "web/app/components/workflow/operator/add-block.tsx": { + "no-restricted-imports": { + "count": 1 + }, "ts/no-explicit-any": { "count": 1 } @@ -5282,11 +4280,6 @@ "count": 1 } }, - "web/app/components/workflow/operator/tip-popup.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/operator/zoom-in-out.tsx": { "erasable-syntax-only/enums": { "count": 1 @@ -5339,9 +4332,6 @@ } }, "web/app/components/workflow/panel/debug-and-preview/conversation-variable-modal.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 2 } @@ -5351,15 +4341,7 @@ "count": 12 } }, - "web/app/components/workflow/panel/debug-and-preview/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/panel/env-panel/variable-modal.tsx": { - "no-restricted-imports": { - "count": 1 - }, "react/set-state-in-effect": { "count": 4 }, @@ -5377,16 +4359,6 @@ "count": 4 } }, - "web/app/components/workflow/panel/version-history-panel/delete-confirm-modal.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/workflow/panel/version-history-panel/restore-confirm-modal.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/panel/workflow-preview.tsx": { "ts/no-explicit-any": { "count": 2 @@ -5522,9 +4494,6 @@ } }, "web/app/components/workflow/update-dsl-modal.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 1 } @@ -5580,9 +4549,6 @@ } }, "web/app/components/workflow/variable-inspect/group.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 2 } @@ -5593,9 +4559,6 @@ } }, "web/app/components/workflow/variable-inspect/listening.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 2 } @@ -5606,17 +4569,11 @@ } }, "web/app/components/workflow/variable-inspect/right.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 3 } }, "web/app/components/workflow/variable-inspect/trigger.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 1 } @@ -5640,51 +4597,21 @@ "count": 5 } }, - "web/app/components/workflow/workflow-history-store.tsx": { - "react-refresh/only-export-components": { - "count": 2 - } - }, - "web/app/components/workflow/workflow-preview/components/nodes/base.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/workflow-preview/components/nodes/constants.ts": { "ts/no-explicit-any": { "count": 1 } }, - "web/app/components/workflow/workflow-preview/components/nodes/iteration-start/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/workflow/workflow-preview/components/nodes/loop-start/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/workflow-preview/components/zoom-in-out.tsx": { "erasable-syntax-only/enums": { "count": 1 } }, - "web/app/education-apply/expire-notice-modal.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/education-apply/hooks.ts": { "react/set-state-in-effect": { "count": 5 } }, - "web/app/education-apply/verify-state-modal.tsx": { - "react/set-state-in-effect": { - "count": 1 - } - }, "web/app/forgot-password/ForgotPasswordForm.spec.tsx": { "ts/no-explicit-any": { "count": 5 @@ -5710,11 +4637,6 @@ "count": 1 } }, - "web/app/signin/_header.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/signin/components/mail-and-password-auth.tsx": { "ts/no-explicit-any": { "count": 1 @@ -5753,21 +4675,6 @@ "count": 3 } }, - "web/context/modal-context-provider.tsx": { - "ts/no-explicit-any": { - "count": 3 - } - }, - "web/context/modal-context.test.tsx": { - "ts/no-explicit-any": { - "count": 5 - } - }, - "web/context/modal-context.ts": { - "ts/no-explicit-any": { - "count": 2 - } - }, "web/context/provider-context-provider.tsx": { "ts/no-explicit-any": { "count": 1 @@ -5929,11 +4836,6 @@ "count": 1 } }, - "web/plugins/dev-proxy/server.spec.ts": { - "ts/no-explicit-any": { - "count": 1 - } - }, "web/scripts/component-analyzer.js": { "regexp/no-unused-capturing-group": { "count": 6 @@ -5987,11 +4889,6 @@ "count": 2 } }, - "web/service/knowledge/use-dataset.ts": { - "@tanstack/query/exhaustive-deps": { - "count": 1 - } - }, "web/service/share.ts": { "erasable-syntax-only/enums": { "count": 1 @@ -6005,11 +4902,6 @@ "count": 2 } }, - "web/service/use-apps.ts": { - "ts/no-explicit-any": { - "count": 1 - } - }, "web/service/use-common.ts": { "ts/no-empty-object-type": { "count": 1 diff --git a/eslint.config.mjs b/eslint.config.mjs index ae9fdaff01..1380ed67d2 100644 --- a/eslint.config.mjs +++ b/eslint.config.mjs @@ -4,6 +4,17 @@ import antfu, { GLOB_MARKDOWN } from '@antfu/eslint-config' import md from 'eslint-markdown' import markdownPreferences from 'eslint-plugin-markdown-preferences' +const GENERATED_IGNORES = [ + '**/storybook-static/', + '**/.next/', + 'web/next/', + 'web/next-env.d.ts', + '**/dist/', + '**/coverage/', + 'e2e/.auth/', + 'e2e/cucumber-report/', +] + export default antfu( { ignores: original => [ @@ -15,6 +26,7 @@ export default antfu( '!package.json', '!pnpm-workspace.yaml', '!vite.config.ts', + ...GENERATED_IGNORES, ...original, ], typescript: { diff --git a/package.json b/package.json index 42d6961f5f..9ef6b4ef4e 100644 --- a/package.json +++ b/package.json @@ -2,11 +2,12 @@ "name": "dify", "type": "module", "private": true, - "packageManager": "pnpm@10.33.2", + "packageManager": "pnpm@11.0.8", "engines": { "node": "^22.22.1" }, "scripts": { + "dev": "concurrently -k -n vinext,proxy \"vp run dify-web#dev:vinext\" \"vp run dify-web#dev:proxy\"", "prepare": "vp config", "type-check": "vp run -r type-check", "lint": "eslint --cache --concurrency=auto", @@ -16,6 +17,7 @@ }, "devDependencies": { "@antfu/eslint-config": "catalog:", + "concurrently": "catalog:", "eslint": "catalog:", "eslint-markdown": "catalog:", "eslint-plugin-markdown-preferences": "catalog:", diff --git a/packages/contracts/generated/api/console/account/orpc.gen.ts b/packages/contracts/generated/api/console/account/orpc.gen.ts new file mode 100644 index 0000000000..a926103667 --- /dev/null +++ b/packages/contracts/generated/api/console/account/orpc.gen.ts @@ -0,0 +1,378 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { + zGetAccountAvatarQuery, + zGetAccountAvatarResponse, + zGetAccountDeleteVerifyResponse, + zGetAccountEducationAutocompleteQuery, + zGetAccountEducationAutocompleteResponse, + zGetAccountEducationResponse, + zGetAccountEducationVerifyResponse, + zGetAccountIntegratesResponse, + zGetAccountProfileResponse, + zPostAccountAvatarBody, + zPostAccountAvatarResponse, + zPostAccountChangeEmailBody, + zPostAccountChangeEmailCheckEmailUniqueBody, + zPostAccountChangeEmailCheckEmailUniqueResponse, + zPostAccountChangeEmailResetBody, + zPostAccountChangeEmailResetResponse, + zPostAccountChangeEmailResponse, + zPostAccountChangeEmailValidityBody, + zPostAccountChangeEmailValidityResponse, + zPostAccountDeleteBody, + zPostAccountDeleteFeedbackBody, + zPostAccountDeleteFeedbackResponse, + zPostAccountDeleteResponse, + zPostAccountEducationBody, + zPostAccountEducationResponse, + zPostAccountInitBody, + zPostAccountInitResponse, + zPostAccountInterfaceLanguageBody, + zPostAccountInterfaceLanguageResponse, + zPostAccountInterfaceThemeBody, + zPostAccountInterfaceThemeResponse, + zPostAccountNameBody, + zPostAccountNameResponse, + zPostAccountPasswordBody, + zPostAccountPasswordResponse, + zPostAccountTimezoneBody, + zPostAccountTimezoneResponse, +} from './zod.gen' + +/** + * Get account avatar url + */ +export const get = oc + .route({ + description: 'Get account avatar url', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAccountAvatar', + path: '/account/avatar', + tags: ['console'], + }) + .input(z.object({ query: zGetAccountAvatarQuery })) + .output(zGetAccountAvatarResponse) + +export const post = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAccountAvatar', + path: '/account/avatar', + tags: ['console'], + }) + .input(z.object({ body: zPostAccountAvatarBody })) + .output(zPostAccountAvatarResponse) + +export const avatar = { + get, + post, +} + +export const post2 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAccountChangeEmailCheckEmailUnique', + path: '/account/change-email/check-email-unique', + tags: ['console'], + }) + .input(z.object({ body: zPostAccountChangeEmailCheckEmailUniqueBody })) + .output(zPostAccountChangeEmailCheckEmailUniqueResponse) + +export const checkEmailUnique = { + post: post2, +} + +export const post3 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAccountChangeEmailReset', + path: '/account/change-email/reset', + tags: ['console'], + }) + .input(z.object({ body: zPostAccountChangeEmailResetBody })) + .output(zPostAccountChangeEmailResetResponse) + +export const reset = { + post: post3, +} + +export const post4 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAccountChangeEmailValidity', + path: '/account/change-email/validity', + tags: ['console'], + }) + .input(z.object({ body: zPostAccountChangeEmailValidityBody })) + .output(zPostAccountChangeEmailValidityResponse) + +export const validity = { + post: post4, +} + +export const post5 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAccountChangeEmail', + path: '/account/change-email', + tags: ['console'], + }) + .input(z.object({ body: zPostAccountChangeEmailBody })) + .output(zPostAccountChangeEmailResponse) + +export const changeEmail = { + post: post5, + checkEmailUnique, + reset, + validity, +} + +export const post6 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAccountDeleteFeedback', + path: '/account/delete/feedback', + tags: ['console'], + }) + .input(z.object({ body: zPostAccountDeleteFeedbackBody })) + .output(zPostAccountDeleteFeedbackResponse) + +export const feedback = { + post: post6, +} + +export const get2 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAccountDeleteVerify', + path: '/account/delete/verify', + tags: ['console'], + }) + .output(zGetAccountDeleteVerifyResponse) + +export const verify = { + get: get2, +} + +export const post7 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAccountDelete', + path: '/account/delete', + tags: ['console'], + }) + .input(z.object({ body: zPostAccountDeleteBody })) + .output(zPostAccountDeleteResponse) + +export const delete_ = { + post: post7, + feedback, + verify, +} + +export const get3 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAccountEducationAutocomplete', + path: '/account/education/autocomplete', + tags: ['console'], + }) + .input(z.object({ query: zGetAccountEducationAutocompleteQuery })) + .output(zGetAccountEducationAutocompleteResponse) + +export const autocomplete = { + get: get3, +} + +export const get4 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAccountEducationVerify', + path: '/account/education/verify', + tags: ['console'], + }) + .output(zGetAccountEducationVerifyResponse) + +export const verify2 = { + get: get4, +} + +export const get5 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAccountEducation', + path: '/account/education', + tags: ['console'], + }) + .output(zGetAccountEducationResponse) + +export const post8 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAccountEducation', + path: '/account/education', + tags: ['console'], + }) + .input(z.object({ body: zPostAccountEducationBody })) + .output(zPostAccountEducationResponse) + +export const education = { + get: get5, + post: post8, + autocomplete, + verify: verify2, +} + +export const post9 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAccountInit', + path: '/account/init', + tags: ['console'], + }) + .input(z.object({ body: zPostAccountInitBody })) + .output(zPostAccountInitResponse) + +export const init = { + post: post9, +} + +export const get6 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAccountIntegrates', + path: '/account/integrates', + tags: ['console'], + }) + .output(zGetAccountIntegratesResponse) + +export const integrates = { + get: get6, +} + +export const post10 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAccountInterfaceLanguage', + path: '/account/interface-language', + tags: ['console'], + }) + .input(z.object({ body: zPostAccountInterfaceLanguageBody })) + .output(zPostAccountInterfaceLanguageResponse) + +export const interfaceLanguage = { + post: post10, +} + +export const post11 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAccountInterfaceTheme', + path: '/account/interface-theme', + tags: ['console'], + }) + .input(z.object({ body: zPostAccountInterfaceThemeBody })) + .output(zPostAccountInterfaceThemeResponse) + +export const interfaceTheme = { + post: post11, +} + +export const post12 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAccountName', + path: '/account/name', + tags: ['console'], + }) + .input(z.object({ body: zPostAccountNameBody })) + .output(zPostAccountNameResponse) + +export const name = { + post: post12, +} + +export const post13 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAccountPassword', + path: '/account/password', + tags: ['console'], + }) + .input(z.object({ body: zPostAccountPasswordBody })) + .output(zPostAccountPasswordResponse) + +export const password = { + post: post13, +} + +export const get7 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAccountProfile', + path: '/account/profile', + tags: ['console'], + }) + .output(zGetAccountProfileResponse) + +export const profile = { + get: get7, +} + +export const post14 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAccountTimezone', + path: '/account/timezone', + tags: ['console'], + }) + .input(z.object({ body: zPostAccountTimezoneBody })) + .output(zPostAccountTimezoneResponse) + +export const timezone = { + post: post14, +} + +export const account = { + avatar, + changeEmail, + delete: delete_, + education, + init, + integrates, + interfaceLanguage, + interfaceTheme, + name, + password, + profile, + timezone, +} + +export const contract = { + account, +} diff --git a/packages/contracts/generated/api/console/account/types.gen.ts b/packages/contracts/generated/api/console/account/types.gen.ts new file mode 100644 index 0000000000..9021d4c8fb --- /dev/null +++ b/packages/contracts/generated/api/console/account/types.gen.ts @@ -0,0 +1,429 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type AccountAvatarPayload = { + avatar: string +} + +export type Account = { + avatar?: string | null + created_at?: number | null + email: string + id: string + interface_language?: string | null + interface_theme?: string | null + is_password_set: boolean + last_login_at?: number | null + last_login_ip?: string | null + name: string + timezone?: string | null +} + +export type ChangeEmailSendPayload = { + email: string + language?: string | null + phase?: string | null + token?: string | null +} + +export type CheckEmailUniquePayload = { + email: string +} + +export type ChangeEmailResetPayload = { + new_email: string + token: string +} + +export type ChangeEmailValidityPayload = { + code: string + email: string + token: string +} + +export type AccountDeletePayload = { + code: string + token: string +} + +export type AccountDeletionFeedbackPayload = { + email: string + feedback: string +} + +export type EducationStatusResponse = { + allow_refresh?: boolean | null + expire_at?: number | null + is_student?: boolean | null + result?: boolean | null +} + +export type EducationActivatePayload = { + institution: string + role: string + token: string +} + +export type EducationAutocompleteResponse = { + curr_page?: number | null + data?: Array + has_next?: boolean | null +} + +export type EducationVerifyResponse = { + token?: string | null +} + +export type AccountInitPayload = { + interface_language: string + invitation_code?: string | null + timezone: string +} + +export type AccountIntegrateListResponse = { + data: Array +} + +export type AccountInterfaceLanguagePayload = { + interface_language: string +} + +export type AccountInterfaceThemePayload = { + interface_theme: 'light' | 'dark' +} + +export type AccountNamePayload = { + name: string +} + +export type AccountPasswordPayload = { + new_password: string + password?: string | null + repeat_new_password: string +} + +export type AccountTimezonePayload = { + timezone: string +} + +export type AccountIntegrateResponse = { + created_at?: number | null + is_bound: boolean + link?: string | null + provider: string +} + +export type GetAccountAvatarData = { + body?: never + path?: never + query: { + avatar: string + } + url: '/account/avatar' +} + +export type GetAccountAvatarResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetAccountAvatarResponse = GetAccountAvatarResponses[keyof GetAccountAvatarResponses] + +export type PostAccountAvatarData = { + body: AccountAvatarPayload + path?: never + query?: never + url: '/account/avatar' +} + +export type PostAccountAvatarResponses = { + 200: Account +} + +export type PostAccountAvatarResponse = PostAccountAvatarResponses[keyof PostAccountAvatarResponses] + +export type PostAccountChangeEmailData = { + body: ChangeEmailSendPayload + path?: never + query?: never + url: '/account/change-email' +} + +export type PostAccountChangeEmailResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAccountChangeEmailResponse + = PostAccountChangeEmailResponses[keyof PostAccountChangeEmailResponses] + +export type PostAccountChangeEmailCheckEmailUniqueData = { + body: CheckEmailUniquePayload + path?: never + query?: never + url: '/account/change-email/check-email-unique' +} + +export type PostAccountChangeEmailCheckEmailUniqueResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAccountChangeEmailCheckEmailUniqueResponse + = PostAccountChangeEmailCheckEmailUniqueResponses[keyof PostAccountChangeEmailCheckEmailUniqueResponses] + +export type PostAccountChangeEmailResetData = { + body: ChangeEmailResetPayload + path?: never + query?: never + url: '/account/change-email/reset' +} + +export type PostAccountChangeEmailResetResponses = { + 200: Account +} + +export type PostAccountChangeEmailResetResponse + = PostAccountChangeEmailResetResponses[keyof PostAccountChangeEmailResetResponses] + +export type PostAccountChangeEmailValidityData = { + body: ChangeEmailValidityPayload + path?: never + query?: never + url: '/account/change-email/validity' +} + +export type PostAccountChangeEmailValidityResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAccountChangeEmailValidityResponse + = PostAccountChangeEmailValidityResponses[keyof PostAccountChangeEmailValidityResponses] + +export type PostAccountDeleteData = { + body: AccountDeletePayload + path?: never + query?: never + url: '/account/delete' +} + +export type PostAccountDeleteResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAccountDeleteResponse = PostAccountDeleteResponses[keyof PostAccountDeleteResponses] + +export type PostAccountDeleteFeedbackData = { + body: AccountDeletionFeedbackPayload + path?: never + query?: never + url: '/account/delete/feedback' +} + +export type PostAccountDeleteFeedbackResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAccountDeleteFeedbackResponse + = PostAccountDeleteFeedbackResponses[keyof PostAccountDeleteFeedbackResponses] + +export type GetAccountDeleteVerifyData = { + body?: never + path?: never + query?: never + url: '/account/delete/verify' +} + +export type GetAccountDeleteVerifyResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetAccountDeleteVerifyResponse + = GetAccountDeleteVerifyResponses[keyof GetAccountDeleteVerifyResponses] + +export type GetAccountEducationData = { + body?: never + path?: never + query?: never + url: '/account/education' +} + +export type GetAccountEducationResponses = { + 200: EducationStatusResponse +} + +export type GetAccountEducationResponse + = GetAccountEducationResponses[keyof GetAccountEducationResponses] + +export type PostAccountEducationData = { + body: EducationActivatePayload + path?: never + query?: never + url: '/account/education' +} + +export type PostAccountEducationResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAccountEducationResponse + = PostAccountEducationResponses[keyof PostAccountEducationResponses] + +export type GetAccountEducationAutocompleteData = { + body?: never + path?: never + query: { + keywords: string + limit?: number + page?: number + } + url: '/account/education/autocomplete' +} + +export type GetAccountEducationAutocompleteResponses = { + 200: EducationAutocompleteResponse +} + +export type GetAccountEducationAutocompleteResponse + = GetAccountEducationAutocompleteResponses[keyof GetAccountEducationAutocompleteResponses] + +export type GetAccountEducationVerifyData = { + body?: never + path?: never + query?: never + url: '/account/education/verify' +} + +export type GetAccountEducationVerifyResponses = { + 200: EducationVerifyResponse +} + +export type GetAccountEducationVerifyResponse + = GetAccountEducationVerifyResponses[keyof GetAccountEducationVerifyResponses] + +export type PostAccountInitData = { + body: AccountInitPayload + path?: never + query?: never + url: '/account/init' +} + +export type PostAccountInitResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAccountInitResponse = PostAccountInitResponses[keyof PostAccountInitResponses] + +export type GetAccountIntegratesData = { + body?: never + path?: never + query?: never + url: '/account/integrates' +} + +export type GetAccountIntegratesResponses = { + 200: AccountIntegrateListResponse +} + +export type GetAccountIntegratesResponse + = GetAccountIntegratesResponses[keyof GetAccountIntegratesResponses] + +export type PostAccountInterfaceLanguageData = { + body: AccountInterfaceLanguagePayload + path?: never + query?: never + url: '/account/interface-language' +} + +export type PostAccountInterfaceLanguageResponses = { + 200: Account +} + +export type PostAccountInterfaceLanguageResponse + = PostAccountInterfaceLanguageResponses[keyof PostAccountInterfaceLanguageResponses] + +export type PostAccountInterfaceThemeData = { + body: AccountInterfaceThemePayload + path?: never + query?: never + url: '/account/interface-theme' +} + +export type PostAccountInterfaceThemeResponses = { + 200: Account +} + +export type PostAccountInterfaceThemeResponse + = PostAccountInterfaceThemeResponses[keyof PostAccountInterfaceThemeResponses] + +export type PostAccountNameData = { + body: AccountNamePayload + path?: never + query?: never + url: '/account/name' +} + +export type PostAccountNameResponses = { + 200: Account +} + +export type PostAccountNameResponse = PostAccountNameResponses[keyof PostAccountNameResponses] + +export type PostAccountPasswordData = { + body: AccountPasswordPayload + path?: never + query?: never + url: '/account/password' +} + +export type PostAccountPasswordResponses = { + 200: Account +} + +export type PostAccountPasswordResponse + = PostAccountPasswordResponses[keyof PostAccountPasswordResponses] + +export type GetAccountProfileData = { + body?: never + path?: never + query?: never + url: '/account/profile' +} + +export type GetAccountProfileResponses = { + 200: Account +} + +export type GetAccountProfileResponse = GetAccountProfileResponses[keyof GetAccountProfileResponses] + +export type PostAccountTimezoneData = { + body: AccountTimezonePayload + path?: never + query?: never + url: '/account/timezone' +} + +export type PostAccountTimezoneResponses = { + 200: Account +} + +export type PostAccountTimezoneResponse + = PostAccountTimezoneResponses[keyof PostAccountTimezoneResponses] diff --git a/packages/contracts/generated/api/console/account/zod.gen.ts b/packages/contracts/generated/api/console/account/zod.gen.ts new file mode 100644 index 0000000000..befa7c27c6 --- /dev/null +++ b/packages/contracts/generated/api/console/account/zod.gen.ts @@ -0,0 +1,318 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * AccountAvatarPayload + */ +export const zAccountAvatarPayload = z.object({ + avatar: z.string(), +}) + +/** + * Account + */ +export const zAccount = z.object({ + avatar: z.string().nullish(), + created_at: z.int().nullish(), + email: z.string(), + id: z.string(), + interface_language: z.string().nullish(), + interface_theme: z.string().nullish(), + is_password_set: z.boolean(), + last_login_at: z.int().nullish(), + last_login_ip: z.string().nullish(), + name: z.string(), + timezone: z.string().nullish(), +}) + +/** + * ChangeEmailSendPayload + */ +export const zChangeEmailSendPayload = z.object({ + email: z.string(), + language: z.string().nullish(), + phase: z.string().nullish(), + token: z.string().nullish(), +}) + +/** + * CheckEmailUniquePayload + */ +export const zCheckEmailUniquePayload = z.object({ + email: z.string(), +}) + +/** + * ChangeEmailResetPayload + */ +export const zChangeEmailResetPayload = z.object({ + new_email: z.string(), + token: z.string(), +}) + +/** + * ChangeEmailValidityPayload + */ +export const zChangeEmailValidityPayload = z.object({ + code: z.string(), + email: z.string(), + token: z.string(), +}) + +/** + * AccountDeletePayload + */ +export const zAccountDeletePayload = z.object({ + code: z.string(), + token: z.string(), +}) + +/** + * AccountDeletionFeedbackPayload + */ +export const zAccountDeletionFeedbackPayload = z.object({ + email: z.string(), + feedback: z.string(), +}) + +/** + * EducationStatusResponse + */ +export const zEducationStatusResponse = z.object({ + allow_refresh: z.boolean().nullish(), + expire_at: z.int().nullish(), + is_student: z.boolean().nullish(), + result: z.boolean().nullish(), +}) + +/** + * EducationActivatePayload + */ +export const zEducationActivatePayload = z.object({ + institution: z.string(), + role: z.string(), + token: z.string(), +}) + +/** + * EducationAutocompleteResponse + */ +export const zEducationAutocompleteResponse = z.object({ + curr_page: z.int().nullish(), + data: z.array(z.string()).optional(), + has_next: z.boolean().nullish(), +}) + +/** + * EducationVerifyResponse + */ +export const zEducationVerifyResponse = z.object({ + token: z.string().nullish(), +}) + +/** + * AccountInitPayload + */ +export const zAccountInitPayload = z.object({ + interface_language: z.string(), + invitation_code: z.string().nullish(), + timezone: z.string(), +}) + +/** + * AccountInterfaceLanguagePayload + */ +export const zAccountInterfaceLanguagePayload = z.object({ + interface_language: z.string(), +}) + +/** + * AccountInterfaceThemePayload + */ +export const zAccountInterfaceThemePayload = z.object({ + interface_theme: z.enum(['light', 'dark']), +}) + +/** + * AccountNamePayload + */ +export const zAccountNamePayload = z.object({ + name: z.string().min(3).max(30), +}) + +/** + * AccountPasswordPayload + */ +export const zAccountPasswordPayload = z.object({ + new_password: z.string(), + password: z.string().nullish(), + repeat_new_password: z.string(), +}) + +/** + * AccountTimezonePayload + */ +export const zAccountTimezonePayload = z.object({ + timezone: z.string(), +}) + +/** + * AccountIntegrateResponse + */ +export const zAccountIntegrateResponse = z.object({ + created_at: z.int().nullish(), + is_bound: z.boolean(), + link: z.string().nullish(), + provider: z.string(), +}) + +/** + * AccountIntegrateListResponse + */ +export const zAccountIntegrateListResponse = z.object({ + data: z.array(zAccountIntegrateResponse), +}) + +export const zGetAccountAvatarQuery = z.object({ + avatar: z.string(), +}) + +/** + * Success + */ +export const zGetAccountAvatarResponse = z.record(z.string(), z.unknown()) + +export const zPostAccountAvatarBody = zAccountAvatarPayload + +/** + * Success + */ +export const zPostAccountAvatarResponse = zAccount + +export const zPostAccountChangeEmailBody = zChangeEmailSendPayload + +/** + * Success + */ +export const zPostAccountChangeEmailResponse = z.record(z.string(), z.unknown()) + +export const zPostAccountChangeEmailCheckEmailUniqueBody = zCheckEmailUniquePayload + +/** + * Success + */ +export const zPostAccountChangeEmailCheckEmailUniqueResponse = z.record(z.string(), z.unknown()) + +export const zPostAccountChangeEmailResetBody = zChangeEmailResetPayload + +/** + * Success + */ +export const zPostAccountChangeEmailResetResponse = zAccount + +export const zPostAccountChangeEmailValidityBody = zChangeEmailValidityPayload + +/** + * Success + */ +export const zPostAccountChangeEmailValidityResponse = z.record(z.string(), z.unknown()) + +export const zPostAccountDeleteBody = zAccountDeletePayload + +/** + * Success + */ +export const zPostAccountDeleteResponse = z.record(z.string(), z.unknown()) + +export const zPostAccountDeleteFeedbackBody = zAccountDeletionFeedbackPayload + +/** + * Success + */ +export const zPostAccountDeleteFeedbackResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zGetAccountDeleteVerifyResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zGetAccountEducationResponse = zEducationStatusResponse + +export const zPostAccountEducationBody = zEducationActivatePayload + +/** + * Success + */ +export const zPostAccountEducationResponse = z.record(z.string(), z.unknown()) + +export const zGetAccountEducationAutocompleteQuery = z.object({ + keywords: z.string(), + limit: z.int().optional().default(20), + page: z.int().optional().default(0), +}) + +/** + * Success + */ +export const zGetAccountEducationAutocompleteResponse = zEducationAutocompleteResponse + +/** + * Success + */ +export const zGetAccountEducationVerifyResponse = zEducationVerifyResponse + +export const zPostAccountInitBody = zAccountInitPayload + +/** + * Success + */ +export const zPostAccountInitResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zGetAccountIntegratesResponse = zAccountIntegrateListResponse + +export const zPostAccountInterfaceLanguageBody = zAccountInterfaceLanguagePayload + +/** + * Success + */ +export const zPostAccountInterfaceLanguageResponse = zAccount + +export const zPostAccountInterfaceThemeBody = zAccountInterfaceThemePayload + +/** + * Success + */ +export const zPostAccountInterfaceThemeResponse = zAccount + +export const zPostAccountNameBody = zAccountNamePayload + +/** + * Success + */ +export const zPostAccountNameResponse = zAccount + +export const zPostAccountPasswordBody = zAccountPasswordPayload + +/** + * Success + */ +export const zPostAccountPasswordResponse = zAccount + +/** + * Success + */ +export const zGetAccountProfileResponse = zAccount + +export const zPostAccountTimezoneBody = zAccountTimezonePayload + +/** + * Success + */ +export const zPostAccountTimezoneResponse = zAccount diff --git a/packages/contracts/generated/api/console/activate/orpc.gen.ts b/packages/contracts/generated/api/console/activate/orpc.gen.ts new file mode 100644 index 0000000000..870f45bd2e --- /dev/null +++ b/packages/contracts/generated/api/console/activate/orpc.gen.ts @@ -0,0 +1,54 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { + zGetActivateCheckQuery, + zGetActivateCheckResponse, + zPostActivateBody, + zPostActivateResponse, +} from './zod.gen' + +/** + * Check if activation token is valid + */ +export const get = oc + .route({ + description: 'Check if activation token is valid', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getActivateCheck', + path: '/activate/check', + tags: ['console'], + }) + .input(z.object({ query: zGetActivateCheckQuery })) + .output(zGetActivateCheckResponse) + +export const check = { + get, +} + +/** + * Activate account with invitation token + */ +export const post = oc + .route({ + description: 'Activate account with invitation token', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postActivate', + path: '/activate', + tags: ['console'], + }) + .input(z.object({ body: zPostActivateBody })) + .output(zPostActivateResponse) + +export const activate = { + post, + check, +} + +export const contract = { + activate, +} diff --git a/packages/contracts/generated/api/console/activate/types.gen.ts b/packages/contracts/generated/api/console/activate/types.gen.ts new file mode 100644 index 0000000000..97a16c6861 --- /dev/null +++ b/packages/contracts/generated/api/console/activate/types.gen.ts @@ -0,0 +1,63 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type ActivatePayload = { + email?: string | null + interface_language: string + name: string + timezone: string + token: string + workspace_id?: string | null +} + +export type ActivationResponse = { + result: string +} + +export type ActivationCheckResponse = { + data?: { + [key: string]: unknown + } | null + is_valid: boolean +} + +export type PostActivateData = { + body: ActivatePayload + path?: never + query?: never + url: '/activate' +} + +export type PostActivateErrors = { + 400: { + [key: string]: unknown + } +} + +export type PostActivateError = PostActivateErrors[keyof PostActivateErrors] + +export type PostActivateResponses = { + 200: ActivationResponse +} + +export type PostActivateResponse = PostActivateResponses[keyof PostActivateResponses] + +export type GetActivateCheckData = { + body?: never + path?: never + query: { + email?: string | null + token: string + workspace_id?: string | null + } + url: '/activate/check' +} + +export type GetActivateCheckResponses = { + 200: ActivationCheckResponse +} + +export type GetActivateCheckResponse = GetActivateCheckResponses[keyof GetActivateCheckResponses] diff --git a/packages/contracts/generated/api/console/activate/zod.gen.ts b/packages/contracts/generated/api/console/activate/zod.gen.ts new file mode 100644 index 0000000000..30897b6666 --- /dev/null +++ b/packages/contracts/generated/api/console/activate/zod.gen.ts @@ -0,0 +1,48 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * ActivatePayload + */ +export const zActivatePayload = z.object({ + email: z.string().nullish(), + interface_language: z.string(), + name: z.string().max(30), + timezone: z.string(), + token: z.string(), + workspace_id: z.string().nullish(), +}) + +/** + * ActivationResponse + */ +export const zActivationResponse = z.object({ + result: z.string(), +}) + +/** + * ActivationCheckResponse + */ +export const zActivationCheckResponse = z.object({ + data: z.record(z.string(), z.unknown()).nullish(), + is_valid: z.boolean(), +}) + +export const zPostActivateBody = zActivatePayload + +/** + * Account activated successfully + */ +export const zPostActivateResponse = zActivationResponse + +export const zGetActivateCheckQuery = z.object({ + email: z.string().nullish(), + token: z.string(), + workspace_id: z.string().nullish(), +}) + +/** + * Success + */ +export const zGetActivateCheckResponse = zActivationCheckResponse diff --git a/packages/contracts/generated/api/console/admin/orpc.gen.ts b/packages/contracts/generated/api/console/admin/orpc.gen.ts new file mode 100644 index 0000000000..93f012405a --- /dev/null +++ b/packages/contracts/generated/api/console/admin/orpc.gen.ts @@ -0,0 +1,153 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { + zDeleteAdminDeleteExploreBannerByBannerIdPath, + zDeleteAdminDeleteExploreBannerByBannerIdResponse, + zDeleteAdminInsertExploreAppsByAppIdPath, + zDeleteAdminInsertExploreAppsByAppIdResponse, + zPostAdminBatchAddNotificationAccountsResponse, + zPostAdminInsertExploreAppsBody, + zPostAdminInsertExploreAppsResponse, + zPostAdminInsertExploreBannerBody, + zPostAdminInsertExploreBannerResponse, + zPostAdminUpsertNotificationBody, + zPostAdminUpsertNotificationResponse, +} from './zod.gen' + +/** + * Register target accounts for a notification by email address. JSON body: {"notification_id": "...", "user_email": ["a@example.com", ...]}. File upload: multipart/form-data with a 'file' field (CSV or TXT, one email per line) plus a 'notification_id' field. Emails that do not match any account are silently skipped. + */ +export const post = oc + .route({ + description: + 'Register target accounts for a notification by email address. JSON body: {"notification_id": "...", "user_email": ["a@example.com", ...]}. File upload: multipart/form-data with a \'file\' field (CSV or TXT, one email per line) plus a \'notification_id\' field. Emails that do not match any account are silently skipped.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAdminBatchAddNotificationAccounts', + path: '/admin/batch_add_notification_accounts', + tags: ['console'], + }) + .output(zPostAdminBatchAddNotificationAccountsResponse) + +export const batchAddNotificationAccounts = { + post, +} + +/** + * Delete an explore banner + */ +export const delete_ = oc + .route({ + description: 'Delete an explore banner', + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteAdminDeleteExploreBannerByBannerId', + path: '/admin/delete-explore-banner/{banner_id}', + successStatus: 204, + tags: ['console'], + }) + .input(z.object({ params: zDeleteAdminDeleteExploreBannerByBannerIdPath })) + .output(zDeleteAdminDeleteExploreBannerByBannerIdResponse) + +export const byBannerId = { + delete: delete_, +} + +export const deleteExploreBanner = { + byBannerId, +} + +/** + * Remove an app from the explore list + */ +export const delete2 = oc + .route({ + description: 'Remove an app from the explore list', + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteAdminInsertExploreAppsByAppId', + path: '/admin/insert-explore-apps/{app_id}', + successStatus: 204, + tags: ['console'], + }) + .input(z.object({ params: zDeleteAdminInsertExploreAppsByAppIdPath })) + .output(zDeleteAdminInsertExploreAppsByAppIdResponse) + +export const byAppId = { + delete: delete2, +} + +/** + * Insert or update an app in the explore list + */ +export const post2 = oc + .route({ + description: 'Insert or update an app in the explore list', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAdminInsertExploreApps', + path: '/admin/insert-explore-apps', + tags: ['console'], + }) + .input(z.object({ body: zPostAdminInsertExploreAppsBody })) + .output(zPostAdminInsertExploreAppsResponse) + +export const insertExploreApps = { + post: post2, + byAppId, +} + +/** + * Insert an explore banner + */ +export const post3 = oc + .route({ + description: 'Insert an explore banner', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAdminInsertExploreBanner', + path: '/admin/insert-explore-banner', + successStatus: 201, + tags: ['console'], + }) + .input(z.object({ body: zPostAdminInsertExploreBannerBody })) + .output(zPostAdminInsertExploreBannerResponse) + +export const insertExploreBanner = { + post: post3, +} + +/** + * Create or update an in-product notification. Supply notification_id to update an existing one; omit it to create a new one. Pass at least one language variant in contents (zh / en / jp). + */ +export const post4 = oc + .route({ + description: + 'Create or update an in-product notification. Supply notification_id to update an existing one; omit it to create a new one. Pass at least one language variant in contents (zh / en / jp).', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAdminUpsertNotification', + path: '/admin/upsert_notification', + tags: ['console'], + }) + .input(z.object({ body: zPostAdminUpsertNotificationBody })) + .output(zPostAdminUpsertNotificationResponse) + +export const upsertNotification = { + post: post4, +} + +export const admin = { + batchAddNotificationAccounts, + deleteExploreBanner, + insertExploreApps, + insertExploreBanner, + upsertNotification, +} + +export const contract = { + admin, +} diff --git a/packages/contracts/generated/api/console/admin/types.gen.ts b/packages/contracts/generated/api/console/admin/types.gen.ts new file mode 100644 index 0000000000..d3fef01791 --- /dev/null +++ b/packages/contracts/generated/api/console/admin/types.gen.ts @@ -0,0 +1,157 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type InsertExploreAppPayload = { + app_id: string + can_trial?: boolean + category: string + copyright?: string | null + custom_disclaimer?: string | null + desc?: string | null + language: string + position: number + privacy_policy?: string | null + trial_limit?: number +} + +export type InsertExploreBannerPayload = { + 'category': string + 'description': string + 'img-src': string + 'language'?: string + 'link': string + 'sort': number + 'title': string +} + +export type UpsertNotificationPayload = { + contents: Array + end_time?: string | null + frequency?: string + notification_id?: string | null + start_time?: string | null + status?: string +} + +export type LangContentPayload = { + body: string + lang: string + subtitle?: string | null + title: string + title_pic_url?: string | null +} + +export type PostAdminBatchAddNotificationAccountsData = { + body?: never + path?: never + query?: never + url: '/admin/batch_add_notification_accounts' +} + +export type PostAdminBatchAddNotificationAccountsResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAdminBatchAddNotificationAccountsResponse + = PostAdminBatchAddNotificationAccountsResponses[keyof PostAdminBatchAddNotificationAccountsResponses] + +export type DeleteAdminDeleteExploreBannerByBannerIdData = { + body?: never + path: { + banner_id: string + } + query?: never + url: '/admin/delete-explore-banner/{banner_id}' +} + +export type DeleteAdminDeleteExploreBannerByBannerIdResponses = { + 204: { + [key: string]: unknown + } +} + +export type DeleteAdminDeleteExploreBannerByBannerIdResponse + = DeleteAdminDeleteExploreBannerByBannerIdResponses[keyof DeleteAdminDeleteExploreBannerByBannerIdResponses] + +export type PostAdminInsertExploreAppsData = { + body: InsertExploreAppPayload + path?: never + query?: never + url: '/admin/insert-explore-apps' +} + +export type PostAdminInsertExploreAppsErrors = { + 404: { + [key: string]: unknown + } +} + +export type PostAdminInsertExploreAppsError + = PostAdminInsertExploreAppsErrors[keyof PostAdminInsertExploreAppsErrors] + +export type PostAdminInsertExploreAppsResponses = { + 200: { + [key: string]: unknown + } + 201: { + [key: string]: unknown + } +} + +export type PostAdminInsertExploreAppsResponse + = PostAdminInsertExploreAppsResponses[keyof PostAdminInsertExploreAppsResponses] + +export type DeleteAdminInsertExploreAppsByAppIdData = { + body?: never + path: { + app_id: string + } + query?: never + url: '/admin/insert-explore-apps/{app_id}' +} + +export type DeleteAdminInsertExploreAppsByAppIdResponses = { + 204: { + [key: string]: unknown + } +} + +export type DeleteAdminInsertExploreAppsByAppIdResponse + = DeleteAdminInsertExploreAppsByAppIdResponses[keyof DeleteAdminInsertExploreAppsByAppIdResponses] + +export type PostAdminInsertExploreBannerData = { + body: InsertExploreBannerPayload + path?: never + query?: never + url: '/admin/insert-explore-banner' +} + +export type PostAdminInsertExploreBannerResponses = { + 201: { + [key: string]: unknown + } +} + +export type PostAdminInsertExploreBannerResponse + = PostAdminInsertExploreBannerResponses[keyof PostAdminInsertExploreBannerResponses] + +export type PostAdminUpsertNotificationData = { + body: UpsertNotificationPayload + path?: never + query?: never + url: '/admin/upsert_notification' +} + +export type PostAdminUpsertNotificationResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAdminUpsertNotificationResponse + = PostAdminUpsertNotificationResponses[keyof PostAdminUpsertNotificationResponses] diff --git a/packages/contracts/generated/api/console/admin/zod.gen.ts b/packages/contracts/generated/api/console/admin/zod.gen.ts new file mode 100644 index 0000000000..9ebed93e1e --- /dev/null +++ b/packages/contracts/generated/api/console/admin/zod.gen.ts @@ -0,0 +1,99 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * InsertExploreAppPayload + */ +export const zInsertExploreAppPayload = z.object({ + app_id: z.string(), + can_trial: z.boolean().optional().default(false), + category: z.string(), + copyright: z.string().nullish(), + custom_disclaimer: z.string().nullish(), + desc: z.string().nullish(), + language: z.string(), + position: z.int(), + privacy_policy: z.string().nullish(), + trial_limit: z.int().optional().default(0), +}) + +/** + * InsertExploreBannerPayload + */ +export const zInsertExploreBannerPayload = z.object({ + 'category': z.string(), + 'description': z.string(), + 'img-src': z.string(), + 'language': z.string().optional().default('en-US'), + 'link': z.string(), + 'sort': z.int(), + 'title': z.string(), +}) + +/** + * LangContentPayload + */ +export const zLangContentPayload = z.object({ + body: z.string(), + lang: z.string(), + subtitle: z.string().nullish(), + title: z.string(), + title_pic_url: z.string().nullish(), +}) + +/** + * UpsertNotificationPayload + */ +export const zUpsertNotificationPayload = z.object({ + contents: z.array(zLangContentPayload).min(1), + end_time: z.string().nullish(), + frequency: z.string().optional().default('once'), + notification_id: z.string().nullish(), + start_time: z.string().nullish(), + status: z.string().optional().default('active'), +}) + +/** + * Accounts added successfully + */ +export const zPostAdminBatchAddNotificationAccountsResponse = z.record(z.string(), z.unknown()) + +export const zDeleteAdminDeleteExploreBannerByBannerIdPath = z.object({ + banner_id: z.string(), +}) + +/** + * Banner deleted successfully + */ +export const zDeleteAdminDeleteExploreBannerByBannerIdResponse = z.record(z.string(), z.unknown()) + +export const zPostAdminInsertExploreAppsBody = zInsertExploreAppPayload + +export const zPostAdminInsertExploreAppsResponse = z.union([ + z.record(z.string(), z.unknown()), + z.record(z.string(), z.unknown()), +]) + +export const zDeleteAdminInsertExploreAppsByAppIdPath = z.object({ + app_id: z.string(), +}) + +/** + * App removed successfully + */ +export const zDeleteAdminInsertExploreAppsByAppIdResponse = z.record(z.string(), z.unknown()) + +export const zPostAdminInsertExploreBannerBody = zInsertExploreBannerPayload + +/** + * Banner inserted successfully + */ +export const zPostAdminInsertExploreBannerResponse = z.record(z.string(), z.unknown()) + +export const zPostAdminUpsertNotificationBody = zUpsertNotificationPayload + +/** + * Notification upserted successfully + */ +export const zPostAdminUpsertNotificationResponse = z.record(z.string(), z.unknown()) diff --git a/packages/contracts/generated/api/console/all-workspaces/orpc.gen.ts b/packages/contracts/generated/api/console/all-workspaces/orpc.gen.ts new file mode 100644 index 0000000000..91ccdbc408 --- /dev/null +++ b/packages/contracts/generated/api/console/all-workspaces/orpc.gen.ts @@ -0,0 +1,25 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { zGetAllWorkspacesQuery, zGetAllWorkspacesResponse } from './zod.gen' + +export const get = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAllWorkspaces', + path: '/all-workspaces', + tags: ['console'], + }) + .input(z.object({ query: zGetAllWorkspacesQuery.optional() })) + .output(zGetAllWorkspacesResponse) + +export const allWorkspaces = { + get, +} + +export const contract = { + allWorkspaces, +} diff --git a/packages/contracts/generated/api/console/all-workspaces/types.gen.ts b/packages/contracts/generated/api/console/all-workspaces/types.gen.ts new file mode 100644 index 0000000000..2c30287835 --- /dev/null +++ b/packages/contracts/generated/api/console/all-workspaces/types.gen.ts @@ -0,0 +1,23 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type GetAllWorkspacesData = { + body?: never + path?: never + query?: { + limit?: number + page?: number + } + url: '/all-workspaces' +} + +export type GetAllWorkspacesResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetAllWorkspacesResponse = GetAllWorkspacesResponses[keyof GetAllWorkspacesResponses] diff --git a/packages/contracts/generated/api/console/all-workspaces/zod.gen.ts b/packages/contracts/generated/api/console/all-workspaces/zod.gen.ts new file mode 100644 index 0000000000..bdb5f0d132 --- /dev/null +++ b/packages/contracts/generated/api/console/all-workspaces/zod.gen.ts @@ -0,0 +1,13 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +export const zGetAllWorkspacesQuery = z.object({ + limit: z.int().gte(1).lte(100).optional().default(20), + page: z.int().gte(1).lte(99999).optional().default(1), +}) + +/** + * Success + */ +export const zGetAllWorkspacesResponse = z.record(z.string(), z.unknown()) diff --git a/packages/contracts/generated/api/console/api-based-extension/orpc.gen.ts b/packages/contracts/generated/api/console/api-based-extension/orpc.gen.ts new file mode 100644 index 0000000000..b47ed17f34 --- /dev/null +++ b/packages/contracts/generated/api/console/api-based-extension/orpc.gen.ts @@ -0,0 +1,109 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { + zDeleteApiBasedExtensionByIdPath, + zDeleteApiBasedExtensionByIdResponse, + zGetApiBasedExtensionByIdPath, + zGetApiBasedExtensionByIdResponse, + zGetApiBasedExtensionResponse, + zPostApiBasedExtensionBody, + zPostApiBasedExtensionByIdBody, + zPostApiBasedExtensionByIdPath, + zPostApiBasedExtensionByIdResponse, + zPostApiBasedExtensionResponse, +} from './zod.gen' + +/** + * Delete API-based extension + */ +export const delete_ = oc + .route({ + description: 'Delete API-based extension', + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteApiBasedExtensionById', + path: '/api-based-extension/{id}', + successStatus: 204, + tags: ['console'], + }) + .input(z.object({ params: zDeleteApiBasedExtensionByIdPath })) + .output(zDeleteApiBasedExtensionByIdResponse) + +/** + * Get API-based extension by ID + */ +export const get = oc + .route({ + description: 'Get API-based extension by ID', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getApiBasedExtensionById', + path: '/api-based-extension/{id}', + tags: ['console'], + }) + .input(z.object({ params: zGetApiBasedExtensionByIdPath })) + .output(zGetApiBasedExtensionByIdResponse) + +/** + * Update API-based extension + */ +export const post = oc + .route({ + description: 'Update API-based extension', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postApiBasedExtensionById', + path: '/api-based-extension/{id}', + tags: ['console'], + }) + .input(z.object({ body: zPostApiBasedExtensionByIdBody, params: zPostApiBasedExtensionByIdPath })) + .output(zPostApiBasedExtensionByIdResponse) + +export const byId = { + delete: delete_, + get, + post, +} + +/** + * Get all API-based extensions for current tenant + */ +export const get2 = oc + .route({ + description: 'Get all API-based extensions for current tenant', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getApiBasedExtension', + path: '/api-based-extension', + tags: ['console'], + }) + .output(zGetApiBasedExtensionResponse) + +/** + * Create a new API-based extension + */ +export const post2 = oc + .route({ + description: 'Create a new API-based extension', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postApiBasedExtension', + path: '/api-based-extension', + successStatus: 201, + tags: ['console'], + }) + .input(z.object({ body: zPostApiBasedExtensionBody })) + .output(zPostApiBasedExtensionResponse) + +export const apiBasedExtension = { + get: get2, + post: post2, + byId, +} + +export const contract = { + apiBasedExtension, +} diff --git a/packages/contracts/generated/api/console/api-based-extension/types.gen.ts b/packages/contracts/generated/api/console/api-based-extension/types.gen.ts new file mode 100644 index 0000000000..f7b12f5b3f --- /dev/null +++ b/packages/contracts/generated/api/console/api-based-extension/types.gen.ts @@ -0,0 +1,99 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type ApiBasedExtensionListResponse = Array + +export type ApiBasedExtensionPayload = { + api_endpoint: string + api_key: string + name: string +} + +export type ApiBasedExtensionResponse = { + api_endpoint: string + api_key: string + created_at?: number | null + id: string + name: string +} + +export type GetApiBasedExtensionData = { + body?: never + path?: never + query?: never + url: '/api-based-extension' +} + +export type GetApiBasedExtensionResponses = { + 200: ApiBasedExtensionListResponse +} + +export type GetApiBasedExtensionResponse + = GetApiBasedExtensionResponses[keyof GetApiBasedExtensionResponses] + +export type PostApiBasedExtensionData = { + body: ApiBasedExtensionPayload + path?: never + query?: never + url: '/api-based-extension' +} + +export type PostApiBasedExtensionResponses = { + 201: ApiBasedExtensionResponse +} + +export type PostApiBasedExtensionResponse + = PostApiBasedExtensionResponses[keyof PostApiBasedExtensionResponses] + +export type DeleteApiBasedExtensionByIdData = { + body?: never + path: { + id: string + } + query?: never + url: '/api-based-extension/{id}' +} + +export type DeleteApiBasedExtensionByIdResponses = { + 204: { + [key: string]: unknown + } +} + +export type DeleteApiBasedExtensionByIdResponse + = DeleteApiBasedExtensionByIdResponses[keyof DeleteApiBasedExtensionByIdResponses] + +export type GetApiBasedExtensionByIdData = { + body?: never + path: { + id: string + } + query?: never + url: '/api-based-extension/{id}' +} + +export type GetApiBasedExtensionByIdResponses = { + 200: ApiBasedExtensionResponse +} + +export type GetApiBasedExtensionByIdResponse + = GetApiBasedExtensionByIdResponses[keyof GetApiBasedExtensionByIdResponses] + +export type PostApiBasedExtensionByIdData = { + body: ApiBasedExtensionPayload + path: { + id: string + } + query?: never + url: '/api-based-extension/{id}' +} + +export type PostApiBasedExtensionByIdResponses = { + 200: ApiBasedExtensionResponse +} + +export type PostApiBasedExtensionByIdResponse + = PostApiBasedExtensionByIdResponses[keyof PostApiBasedExtensionByIdResponses] diff --git a/packages/contracts/generated/api/console/api-based-extension/zod.gen.ts b/packages/contracts/generated/api/console/api-based-extension/zod.gen.ts new file mode 100644 index 0000000000..43f38a5214 --- /dev/null +++ b/packages/contracts/generated/api/console/api-based-extension/zod.gen.ts @@ -0,0 +1,66 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * APIBasedExtensionPayload + */ +export const zApiBasedExtensionPayload = z.object({ + api_endpoint: z.string(), + api_key: z.string(), + name: z.string(), +}) + +/** + * APIBasedExtensionResponse + */ +export const zApiBasedExtensionResponse = z.object({ + api_endpoint: z.string(), + api_key: z.string(), + created_at: z.int().nullish(), + id: z.string(), + name: z.string(), +}) + +export const zApiBasedExtensionListResponse = z.array(zApiBasedExtensionResponse) + +/** + * Success + */ +export const zGetApiBasedExtensionResponse = zApiBasedExtensionListResponse + +export const zPostApiBasedExtensionBody = zApiBasedExtensionPayload + +/** + * Extension created successfully + */ +export const zPostApiBasedExtensionResponse = zApiBasedExtensionResponse + +export const zDeleteApiBasedExtensionByIdPath = z.object({ + id: z.string(), +}) + +/** + * Extension deleted successfully + */ +export const zDeleteApiBasedExtensionByIdResponse = z.record(z.string(), z.unknown()) + +export const zGetApiBasedExtensionByIdPath = z.object({ + id: z.string(), +}) + +/** + * Success + */ +export const zGetApiBasedExtensionByIdResponse = zApiBasedExtensionResponse + +export const zPostApiBasedExtensionByIdBody = zApiBasedExtensionPayload + +export const zPostApiBasedExtensionByIdPath = z.object({ + id: z.string(), +}) + +/** + * Extension updated successfully + */ +export const zPostApiBasedExtensionByIdResponse = zApiBasedExtensionResponse diff --git a/packages/contracts/generated/api/console/api-key-auth/orpc.gen.ts b/packages/contracts/generated/api/console/api-key-auth/orpc.gen.ts new file mode 100644 index 0000000000..a113e39c15 --- /dev/null +++ b/packages/contracts/generated/api/console/api-key-auth/orpc.gen.ts @@ -0,0 +1,66 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { + zDeleteApiKeyAuthDataSourceByBindingIdPath, + zDeleteApiKeyAuthDataSourceByBindingIdResponse, + zGetApiKeyAuthDataSourceResponse, + zPostApiKeyAuthDataSourceBindingBody, + zPostApiKeyAuthDataSourceBindingResponse, +} from './zod.gen' + +export const post = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postApiKeyAuthDataSourceBinding', + path: '/api-key-auth/data-source/binding', + tags: ['console'], + }) + .input(z.object({ body: zPostApiKeyAuthDataSourceBindingBody })) + .output(zPostApiKeyAuthDataSourceBindingResponse) + +export const binding = { + post, +} + +export const delete_ = oc + .route({ + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteApiKeyAuthDataSourceByBindingId', + path: '/api-key-auth/data-source/{binding_id}', + tags: ['console'], + }) + .input(z.object({ params: zDeleteApiKeyAuthDataSourceByBindingIdPath })) + .output(zDeleteApiKeyAuthDataSourceByBindingIdResponse) + +export const byBindingId = { + delete: delete_, +} + +export const get = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getApiKeyAuthDataSource', + path: '/api-key-auth/data-source', + tags: ['console'], + }) + .output(zGetApiKeyAuthDataSourceResponse) + +export const dataSource = { + get, + binding, + byBindingId, +} + +export const apiKeyAuth = { + dataSource, +} + +export const contract = { + apiKeyAuth, +} diff --git a/packages/contracts/generated/api/console/api-key-auth/types.gen.ts b/packages/contracts/generated/api/console/api-key-auth/types.gen.ts new file mode 100644 index 0000000000..970b3a44e9 --- /dev/null +++ b/packages/contracts/generated/api/console/api-key-auth/types.gen.ts @@ -0,0 +1,63 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type ApiKeyAuthBindingPayload = { + category: string + credentials: { + [key: string]: unknown + } + provider: string +} + +export type GetApiKeyAuthDataSourceData = { + body?: never + path?: never + query?: never + url: '/api-key-auth/data-source' +} + +export type GetApiKeyAuthDataSourceResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetApiKeyAuthDataSourceResponse + = GetApiKeyAuthDataSourceResponses[keyof GetApiKeyAuthDataSourceResponses] + +export type PostApiKeyAuthDataSourceBindingData = { + body: ApiKeyAuthBindingPayload + path?: never + query?: never + url: '/api-key-auth/data-source/binding' +} + +export type PostApiKeyAuthDataSourceBindingResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostApiKeyAuthDataSourceBindingResponse + = PostApiKeyAuthDataSourceBindingResponses[keyof PostApiKeyAuthDataSourceBindingResponses] + +export type DeleteApiKeyAuthDataSourceByBindingIdData = { + body?: never + path: { + binding_id: string + } + query?: never + url: '/api-key-auth/data-source/{binding_id}' +} + +export type DeleteApiKeyAuthDataSourceByBindingIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type DeleteApiKeyAuthDataSourceByBindingIdResponse + = DeleteApiKeyAuthDataSourceByBindingIdResponses[keyof DeleteApiKeyAuthDataSourceByBindingIdResponses] diff --git a/packages/contracts/generated/api/console/api-key-auth/zod.gen.ts b/packages/contracts/generated/api/console/api-key-auth/zod.gen.ts new file mode 100644 index 0000000000..6c7f5ad19b --- /dev/null +++ b/packages/contracts/generated/api/console/api-key-auth/zod.gen.ts @@ -0,0 +1,33 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * ApiKeyAuthBindingPayload + */ +export const zApiKeyAuthBindingPayload = z.object({ + category: z.string(), + credentials: z.record(z.string(), z.unknown()), + provider: z.string(), +}) + +/** + * Success + */ +export const zGetApiKeyAuthDataSourceResponse = z.record(z.string(), z.unknown()) + +export const zPostApiKeyAuthDataSourceBindingBody = zApiKeyAuthBindingPayload + +/** + * Success + */ +export const zPostApiKeyAuthDataSourceBindingResponse = z.record(z.string(), z.unknown()) + +export const zDeleteApiKeyAuthDataSourceByBindingIdPath = z.object({ + binding_id: z.string(), +}) + +/** + * Success + */ +export const zDeleteApiKeyAuthDataSourceByBindingIdResponse = z.record(z.string(), z.unknown()) diff --git a/packages/contracts/generated/api/console/app/orpc.gen.ts b/packages/contracts/generated/api/console/app/orpc.gen.ts new file mode 100644 index 0000000000..7ccb933866 --- /dev/null +++ b/packages/contracts/generated/api/console/app/orpc.gen.ts @@ -0,0 +1,33 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { zGetAppPromptTemplatesQuery, zGetAppPromptTemplatesResponse } from './zod.gen' + +/** + * Get advanced prompt templates based on app mode and model configuration + */ +export const get = oc + .route({ + description: 'Get advanced prompt templates based on app mode and model configuration', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppPromptTemplates', + path: '/app/prompt-templates', + tags: ['console'], + }) + .input(z.object({ query: zGetAppPromptTemplatesQuery })) + .output(zGetAppPromptTemplatesResponse) + +export const promptTemplates = { + get, +} + +export const app = { + promptTemplates, +} + +export const contract = { + app, +} diff --git a/packages/contracts/generated/api/console/app/types.gen.ts b/packages/contracts/generated/api/console/app/types.gen.ts new file mode 100644 index 0000000000..ad8334ad6d --- /dev/null +++ b/packages/contracts/generated/api/console/app/types.gen.ts @@ -0,0 +1,35 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type GetAppPromptTemplatesData = { + body?: never + path?: never + query: { + app_mode: string + has_context?: string + model_mode: string + model_name: string + } + url: '/app/prompt-templates' +} + +export type GetAppPromptTemplatesErrors = { + 400: { + [key: string]: unknown + } +} + +export type GetAppPromptTemplatesError + = GetAppPromptTemplatesErrors[keyof GetAppPromptTemplatesErrors] + +export type GetAppPromptTemplatesResponses = { + 200: Array<{ + [key: string]: unknown + }> +} + +export type GetAppPromptTemplatesResponse + = GetAppPromptTemplatesResponses[keyof GetAppPromptTemplatesResponses] diff --git a/packages/contracts/generated/api/console/app/zod.gen.ts b/packages/contracts/generated/api/console/app/zod.gen.ts new file mode 100644 index 0000000000..df13f62825 --- /dev/null +++ b/packages/contracts/generated/api/console/app/zod.gen.ts @@ -0,0 +1,15 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +export const zGetAppPromptTemplatesQuery = z.object({ + app_mode: z.string(), + has_context: z.string().optional().default('true'), + model_mode: z.string(), + model_name: z.string(), +}) + +/** + * Prompt templates retrieved successfully + */ +export const zGetAppPromptTemplatesResponse = z.array(z.record(z.string(), z.unknown())) diff --git a/packages/contracts/generated/api/console/apps/orpc.gen.ts b/packages/contracts/generated/api/console/apps/orpc.gen.ts new file mode 100644 index 0000000000..069976904d --- /dev/null +++ b/packages/contracts/generated/api/console/apps/orpc.gen.ts @@ -0,0 +1,3775 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { + zDeleteAppsByAppIdAnnotationsByAnnotationIdPath, + zDeleteAppsByAppIdAnnotationsByAnnotationIdResponse, + zDeleteAppsByAppIdAnnotationsPath, + zDeleteAppsByAppIdAnnotationsResponse, + zDeleteAppsByAppIdChatConversationsByConversationIdPath, + zDeleteAppsByAppIdChatConversationsByConversationIdResponse, + zDeleteAppsByAppIdCompletionConversationsByConversationIdPath, + zDeleteAppsByAppIdCompletionConversationsByConversationIdResponse, + zDeleteAppsByAppIdPath, + zDeleteAppsByAppIdResponse, + zDeleteAppsByAppIdTraceConfigBody, + zDeleteAppsByAppIdTraceConfigPath, + zDeleteAppsByAppIdTraceConfigResponse, + zDeleteAppsByAppIdWorkflowCommentsByCommentIdPath, + zDeleteAppsByAppIdWorkflowCommentsByCommentIdRepliesByReplyIdPath, + zDeleteAppsByAppIdWorkflowCommentsByCommentIdRepliesByReplyIdResponse, + zDeleteAppsByAppIdWorkflowCommentsByCommentIdResponse, + zDeleteAppsByAppIdWorkflowsByWorkflowIdPath, + zDeleteAppsByAppIdWorkflowsByWorkflowIdResponse, + zDeleteAppsByAppIdWorkflowsDraftNodesByNodeIdVariablesPath, + zDeleteAppsByAppIdWorkflowsDraftNodesByNodeIdVariablesResponse, + zDeleteAppsByAppIdWorkflowsDraftVariablesByVariableIdPath, + zDeleteAppsByAppIdWorkflowsDraftVariablesByVariableIdResponse, + zDeleteAppsByAppIdWorkflowsDraftVariablesPath, + zDeleteAppsByAppIdWorkflowsDraftVariablesResponse, + zDeleteAppsByResourceIdApiKeysByApiKeyIdPath, + zDeleteAppsByResourceIdApiKeysByApiKeyIdResponse, + zGetAppsByAppIdAdvancedChatWorkflowRunsCountPath, + zGetAppsByAppIdAdvancedChatWorkflowRunsCountQuery, + zGetAppsByAppIdAdvancedChatWorkflowRunsCountResponse, + zGetAppsByAppIdAdvancedChatWorkflowRunsPath, + zGetAppsByAppIdAdvancedChatWorkflowRunsQuery, + zGetAppsByAppIdAdvancedChatWorkflowRunsResponse, + zGetAppsByAppIdAgentLogsPath, + zGetAppsByAppIdAgentLogsQuery, + zGetAppsByAppIdAgentLogsResponse, + zGetAppsByAppIdAnnotationReplyByActionStatusByJobIdPath, + zGetAppsByAppIdAnnotationReplyByActionStatusByJobIdResponse, + zGetAppsByAppIdAnnotationsBatchImportStatusByJobIdPath, + zGetAppsByAppIdAnnotationsBatchImportStatusByJobIdResponse, + zGetAppsByAppIdAnnotationsByAnnotationIdHitHistoriesPath, + zGetAppsByAppIdAnnotationsByAnnotationIdHitHistoriesQuery, + zGetAppsByAppIdAnnotationsByAnnotationIdHitHistoriesResponse, + zGetAppsByAppIdAnnotationsCountPath, + zGetAppsByAppIdAnnotationsCountResponse, + zGetAppsByAppIdAnnotationSettingPath, + zGetAppsByAppIdAnnotationSettingResponse, + zGetAppsByAppIdAnnotationsExportPath, + zGetAppsByAppIdAnnotationsExportResponse, + zGetAppsByAppIdAnnotationsPath, + zGetAppsByAppIdAnnotationsQuery, + zGetAppsByAppIdAnnotationsResponse, + zGetAppsByAppIdChatConversationsByConversationIdPath, + zGetAppsByAppIdChatConversationsByConversationIdResponse, + zGetAppsByAppIdChatConversationsPath, + zGetAppsByAppIdChatConversationsQuery, + zGetAppsByAppIdChatConversationsResponse, + zGetAppsByAppIdChatMessagesByMessageIdSuggestedQuestionsPath, + zGetAppsByAppIdChatMessagesByMessageIdSuggestedQuestionsResponse, + zGetAppsByAppIdChatMessagesPath, + zGetAppsByAppIdChatMessagesQuery, + zGetAppsByAppIdChatMessagesResponse, + zGetAppsByAppIdCompletionConversationsByConversationIdPath, + zGetAppsByAppIdCompletionConversationsByConversationIdResponse, + zGetAppsByAppIdCompletionConversationsPath, + zGetAppsByAppIdCompletionConversationsQuery, + zGetAppsByAppIdCompletionConversationsResponse, + zGetAppsByAppIdConversationVariablesPath, + zGetAppsByAppIdConversationVariablesQuery, + zGetAppsByAppIdConversationVariablesResponse, + zGetAppsByAppIdExportPath, + zGetAppsByAppIdExportQuery, + zGetAppsByAppIdExportResponse, + zGetAppsByAppIdFeedbacksExportPath, + zGetAppsByAppIdFeedbacksExportQuery, + zGetAppsByAppIdFeedbacksExportResponse, + zGetAppsByAppIdMessagesByMessageIdPath, + zGetAppsByAppIdMessagesByMessageIdResponse, + zGetAppsByAppIdPath, + zGetAppsByAppIdResponse, + zGetAppsByAppIdServerPath, + zGetAppsByAppIdServerResponse, + zGetAppsByAppIdStatisticsAverageResponseTimePath, + zGetAppsByAppIdStatisticsAverageResponseTimeQuery, + zGetAppsByAppIdStatisticsAverageResponseTimeResponse, + zGetAppsByAppIdStatisticsAverageSessionInteractionsPath, + zGetAppsByAppIdStatisticsAverageSessionInteractionsQuery, + zGetAppsByAppIdStatisticsAverageSessionInteractionsResponse, + zGetAppsByAppIdStatisticsDailyConversationsPath, + zGetAppsByAppIdStatisticsDailyConversationsQuery, + zGetAppsByAppIdStatisticsDailyConversationsResponse, + zGetAppsByAppIdStatisticsDailyEndUsersPath, + zGetAppsByAppIdStatisticsDailyEndUsersQuery, + zGetAppsByAppIdStatisticsDailyEndUsersResponse, + zGetAppsByAppIdStatisticsDailyMessagesPath, + zGetAppsByAppIdStatisticsDailyMessagesQuery, + zGetAppsByAppIdStatisticsDailyMessagesResponse, + zGetAppsByAppIdStatisticsTokenCostsPath, + zGetAppsByAppIdStatisticsTokenCostsQuery, + zGetAppsByAppIdStatisticsTokenCostsResponse, + zGetAppsByAppIdStatisticsTokensPerSecondPath, + zGetAppsByAppIdStatisticsTokensPerSecondQuery, + zGetAppsByAppIdStatisticsTokensPerSecondResponse, + zGetAppsByAppIdStatisticsUserSatisfactionRatePath, + zGetAppsByAppIdStatisticsUserSatisfactionRateQuery, + zGetAppsByAppIdStatisticsUserSatisfactionRateResponse, + zGetAppsByAppIdTextToAudioVoicesPath, + zGetAppsByAppIdTextToAudioVoicesQuery, + zGetAppsByAppIdTextToAudioVoicesResponse, + zGetAppsByAppIdTraceConfigPath, + zGetAppsByAppIdTraceConfigQuery, + zGetAppsByAppIdTraceConfigResponse, + zGetAppsByAppIdTracePath, + zGetAppsByAppIdTraceResponse, + zGetAppsByAppIdTriggersPath, + zGetAppsByAppIdTriggersResponse, + zGetAppsByAppIdWorkflowAppLogsPath, + zGetAppsByAppIdWorkflowAppLogsQuery, + zGetAppsByAppIdWorkflowAppLogsResponse, + zGetAppsByAppIdWorkflowArchivedLogsPath, + zGetAppsByAppIdWorkflowArchivedLogsQuery, + zGetAppsByAppIdWorkflowArchivedLogsResponse, + zGetAppsByAppIdWorkflowCommentsByCommentIdPath, + zGetAppsByAppIdWorkflowCommentsByCommentIdResponse, + zGetAppsByAppIdWorkflowCommentsMentionUsersPath, + zGetAppsByAppIdWorkflowCommentsMentionUsersResponse, + zGetAppsByAppIdWorkflowCommentsPath, + zGetAppsByAppIdWorkflowCommentsResponse, + zGetAppsByAppIdWorkflowRunsByRunIdExportPath, + zGetAppsByAppIdWorkflowRunsByRunIdExportResponse, + zGetAppsByAppIdWorkflowRunsByRunIdNodeExecutionsPath, + zGetAppsByAppIdWorkflowRunsByRunIdNodeExecutionsResponse, + zGetAppsByAppIdWorkflowRunsByRunIdPath, + zGetAppsByAppIdWorkflowRunsByRunIdResponse, + zGetAppsByAppIdWorkflowRunsCountPath, + zGetAppsByAppIdWorkflowRunsCountQuery, + zGetAppsByAppIdWorkflowRunsCountResponse, + zGetAppsByAppIdWorkflowRunsPath, + zGetAppsByAppIdWorkflowRunsQuery, + zGetAppsByAppIdWorkflowRunsResponse, + zGetAppsByAppIdWorkflowsDefaultWorkflowBlockConfigsByBlockTypePath, + zGetAppsByAppIdWorkflowsDefaultWorkflowBlockConfigsByBlockTypeQuery, + zGetAppsByAppIdWorkflowsDefaultWorkflowBlockConfigsByBlockTypeResponse, + zGetAppsByAppIdWorkflowsDefaultWorkflowBlockConfigsPath, + zGetAppsByAppIdWorkflowsDefaultWorkflowBlockConfigsResponse, + zGetAppsByAppIdWorkflowsDraftConversationVariablesPath, + zGetAppsByAppIdWorkflowsDraftConversationVariablesResponse, + zGetAppsByAppIdWorkflowsDraftEnvironmentVariablesPath, + zGetAppsByAppIdWorkflowsDraftEnvironmentVariablesResponse, + zGetAppsByAppIdWorkflowsDraftNodesByNodeIdLastRunPath, + zGetAppsByAppIdWorkflowsDraftNodesByNodeIdLastRunResponse, + zGetAppsByAppIdWorkflowsDraftNodesByNodeIdVariablesPath, + zGetAppsByAppIdWorkflowsDraftNodesByNodeIdVariablesResponse, + zGetAppsByAppIdWorkflowsDraftPath, + zGetAppsByAppIdWorkflowsDraftResponse, + zGetAppsByAppIdWorkflowsDraftSystemVariablesPath, + zGetAppsByAppIdWorkflowsDraftSystemVariablesResponse, + zGetAppsByAppIdWorkflowsDraftVariablesByVariableIdPath, + zGetAppsByAppIdWorkflowsDraftVariablesByVariableIdResponse, + zGetAppsByAppIdWorkflowsDraftVariablesPath, + zGetAppsByAppIdWorkflowsDraftVariablesQuery, + zGetAppsByAppIdWorkflowsDraftVariablesResponse, + zGetAppsByAppIdWorkflowsPath, + zGetAppsByAppIdWorkflowsPublishPath, + zGetAppsByAppIdWorkflowsPublishResponse, + zGetAppsByAppIdWorkflowsQuery, + zGetAppsByAppIdWorkflowsResponse, + zGetAppsByAppIdWorkflowStatisticsAverageAppInteractionsPath, + zGetAppsByAppIdWorkflowStatisticsAverageAppInteractionsQuery, + zGetAppsByAppIdWorkflowStatisticsAverageAppInteractionsResponse, + zGetAppsByAppIdWorkflowStatisticsDailyConversationsPath, + zGetAppsByAppIdWorkflowStatisticsDailyConversationsQuery, + zGetAppsByAppIdWorkflowStatisticsDailyConversationsResponse, + zGetAppsByAppIdWorkflowStatisticsDailyTerminalsPath, + zGetAppsByAppIdWorkflowStatisticsDailyTerminalsQuery, + zGetAppsByAppIdWorkflowStatisticsDailyTerminalsResponse, + zGetAppsByAppIdWorkflowStatisticsTokenCostsPath, + zGetAppsByAppIdWorkflowStatisticsTokenCostsQuery, + zGetAppsByAppIdWorkflowStatisticsTokenCostsResponse, + zGetAppsByAppIdWorkflowsTriggersWebhookPath, + zGetAppsByAppIdWorkflowsTriggersWebhookQuery, + zGetAppsByAppIdWorkflowsTriggersWebhookResponse, + zGetAppsByResourceIdApiKeysPath, + zGetAppsByResourceIdApiKeysResponse, + zGetAppsByServerIdServerRefreshPath, + zGetAppsByServerIdServerRefreshResponse, + zGetAppsImportsByAppIdCheckDependenciesPath, + zGetAppsImportsByAppIdCheckDependenciesResponse, + zGetAppsQuery, + zGetAppsResponse, + zGetAppsWorkflowsOnlineUsersQuery, + zGetAppsWorkflowsOnlineUsersResponse, + zPatchAppsByAppIdTraceConfigBody, + zPatchAppsByAppIdTraceConfigPath, + zPatchAppsByAppIdTraceConfigResponse, + zPatchAppsByAppIdWorkflowsByWorkflowIdBody, + zPatchAppsByAppIdWorkflowsByWorkflowIdPath, + zPatchAppsByAppIdWorkflowsByWorkflowIdResponse, + zPatchAppsByAppIdWorkflowsDraftVariablesByVariableIdBody, + zPatchAppsByAppIdWorkflowsDraftVariablesByVariableIdPath, + zPatchAppsByAppIdWorkflowsDraftVariablesByVariableIdResponse, + zPostAppsBody, + zPostAppsByAppIdAdvancedChatWorkflowsDraftHumanInputNodesByNodeIdFormPreviewBody, + zPostAppsByAppIdAdvancedChatWorkflowsDraftHumanInputNodesByNodeIdFormPreviewPath, + zPostAppsByAppIdAdvancedChatWorkflowsDraftHumanInputNodesByNodeIdFormPreviewResponse, + zPostAppsByAppIdAdvancedChatWorkflowsDraftHumanInputNodesByNodeIdFormRunBody, + zPostAppsByAppIdAdvancedChatWorkflowsDraftHumanInputNodesByNodeIdFormRunPath, + zPostAppsByAppIdAdvancedChatWorkflowsDraftHumanInputNodesByNodeIdFormRunResponse, + zPostAppsByAppIdAdvancedChatWorkflowsDraftIterationNodesByNodeIdRunBody, + zPostAppsByAppIdAdvancedChatWorkflowsDraftIterationNodesByNodeIdRunPath, + zPostAppsByAppIdAdvancedChatWorkflowsDraftIterationNodesByNodeIdRunResponse, + zPostAppsByAppIdAdvancedChatWorkflowsDraftLoopNodesByNodeIdRunBody, + zPostAppsByAppIdAdvancedChatWorkflowsDraftLoopNodesByNodeIdRunPath, + zPostAppsByAppIdAdvancedChatWorkflowsDraftLoopNodesByNodeIdRunResponse, + zPostAppsByAppIdAdvancedChatWorkflowsDraftRunBody, + zPostAppsByAppIdAdvancedChatWorkflowsDraftRunPath, + zPostAppsByAppIdAdvancedChatWorkflowsDraftRunResponse, + zPostAppsByAppIdAnnotationReplyByActionBody, + zPostAppsByAppIdAnnotationReplyByActionPath, + zPostAppsByAppIdAnnotationReplyByActionResponse, + zPostAppsByAppIdAnnotationsBatchImportPath, + zPostAppsByAppIdAnnotationsBatchImportResponse, + zPostAppsByAppIdAnnotationsBody, + zPostAppsByAppIdAnnotationsByAnnotationIdBody, + zPostAppsByAppIdAnnotationsByAnnotationIdPath, + zPostAppsByAppIdAnnotationsByAnnotationIdResponse, + zPostAppsByAppIdAnnotationSettingsByAnnotationSettingIdBody, + zPostAppsByAppIdAnnotationSettingsByAnnotationSettingIdPath, + zPostAppsByAppIdAnnotationSettingsByAnnotationSettingIdResponse, + zPostAppsByAppIdAnnotationsPath, + zPostAppsByAppIdAnnotationsResponse, + zPostAppsByAppIdApiEnableBody, + zPostAppsByAppIdApiEnablePath, + zPostAppsByAppIdApiEnableResponse, + zPostAppsByAppIdAudioToTextPath, + zPostAppsByAppIdAudioToTextResponse, + zPostAppsByAppIdChatMessagesByTaskIdStopPath, + zPostAppsByAppIdChatMessagesByTaskIdStopResponse, + zPostAppsByAppIdCompletionMessagesBody, + zPostAppsByAppIdCompletionMessagesByTaskIdStopPath, + zPostAppsByAppIdCompletionMessagesByTaskIdStopResponse, + zPostAppsByAppIdCompletionMessagesPath, + zPostAppsByAppIdCompletionMessagesResponse, + zPostAppsByAppIdConvertToWorkflowBody, + zPostAppsByAppIdConvertToWorkflowPath, + zPostAppsByAppIdConvertToWorkflowResponse, + zPostAppsByAppIdCopyBody, + zPostAppsByAppIdCopyPath, + zPostAppsByAppIdCopyResponse, + zPostAppsByAppIdFeedbacksBody, + zPostAppsByAppIdFeedbacksPath, + zPostAppsByAppIdFeedbacksResponse, + zPostAppsByAppIdIconBody, + zPostAppsByAppIdIconPath, + zPostAppsByAppIdIconResponse, + zPostAppsByAppIdModelConfigBody, + zPostAppsByAppIdModelConfigPath, + zPostAppsByAppIdModelConfigResponse, + zPostAppsByAppIdNameBody, + zPostAppsByAppIdNamePath, + zPostAppsByAppIdNameResponse, + zPostAppsByAppIdPublishToCreatorsPlatformPath, + zPostAppsByAppIdPublishToCreatorsPlatformResponse, + zPostAppsByAppIdServerBody, + zPostAppsByAppIdServerPath, + zPostAppsByAppIdServerResponse, + zPostAppsByAppIdSiteAccessTokenResetPath, + zPostAppsByAppIdSiteAccessTokenResetResponse, + zPostAppsByAppIdSiteBody, + zPostAppsByAppIdSiteEnableBody, + zPostAppsByAppIdSiteEnablePath, + zPostAppsByAppIdSiteEnableResponse, + zPostAppsByAppIdSitePath, + zPostAppsByAppIdSiteResponse, + zPostAppsByAppIdTextToAudioBody, + zPostAppsByAppIdTextToAudioPath, + zPostAppsByAppIdTextToAudioResponse, + zPostAppsByAppIdTraceBody, + zPostAppsByAppIdTraceConfigBody, + zPostAppsByAppIdTraceConfigPath, + zPostAppsByAppIdTraceConfigResponse, + zPostAppsByAppIdTracePath, + zPostAppsByAppIdTraceResponse, + zPostAppsByAppIdTriggerEnableBody, + zPostAppsByAppIdTriggerEnablePath, + zPostAppsByAppIdTriggerEnableResponse, + zPostAppsByAppIdWorkflowCommentsBody, + zPostAppsByAppIdWorkflowCommentsByCommentIdRepliesBody, + zPostAppsByAppIdWorkflowCommentsByCommentIdRepliesPath, + zPostAppsByAppIdWorkflowCommentsByCommentIdRepliesResponse, + zPostAppsByAppIdWorkflowCommentsByCommentIdResolvePath, + zPostAppsByAppIdWorkflowCommentsByCommentIdResolveResponse, + zPostAppsByAppIdWorkflowCommentsPath, + zPostAppsByAppIdWorkflowCommentsResponse, + zPostAppsByAppIdWorkflowRunsTasksByTaskIdStopPath, + zPostAppsByAppIdWorkflowRunsTasksByTaskIdStopResponse, + zPostAppsByAppIdWorkflowsByWorkflowIdRestorePath, + zPostAppsByAppIdWorkflowsByWorkflowIdRestoreResponse, + zPostAppsByAppIdWorkflowsDraftBody, + zPostAppsByAppIdWorkflowsDraftConversationVariablesBody, + zPostAppsByAppIdWorkflowsDraftConversationVariablesPath, + zPostAppsByAppIdWorkflowsDraftConversationVariablesResponse, + zPostAppsByAppIdWorkflowsDraftEnvironmentVariablesBody, + zPostAppsByAppIdWorkflowsDraftEnvironmentVariablesPath, + zPostAppsByAppIdWorkflowsDraftEnvironmentVariablesResponse, + zPostAppsByAppIdWorkflowsDraftFeaturesBody, + zPostAppsByAppIdWorkflowsDraftFeaturesPath, + zPostAppsByAppIdWorkflowsDraftFeaturesResponse, + zPostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdDeliveryTestBody, + zPostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdDeliveryTestPath, + zPostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdDeliveryTestResponse, + zPostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdFormPreviewBody, + zPostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdFormPreviewPath, + zPostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdFormPreviewResponse, + zPostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdFormRunBody, + zPostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdFormRunPath, + zPostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdFormRunResponse, + zPostAppsByAppIdWorkflowsDraftIterationNodesByNodeIdRunBody, + zPostAppsByAppIdWorkflowsDraftIterationNodesByNodeIdRunPath, + zPostAppsByAppIdWorkflowsDraftIterationNodesByNodeIdRunResponse, + zPostAppsByAppIdWorkflowsDraftLoopNodesByNodeIdRunBody, + zPostAppsByAppIdWorkflowsDraftLoopNodesByNodeIdRunPath, + zPostAppsByAppIdWorkflowsDraftLoopNodesByNodeIdRunResponse, + zPostAppsByAppIdWorkflowsDraftNodesByNodeIdRunBody, + zPostAppsByAppIdWorkflowsDraftNodesByNodeIdRunPath, + zPostAppsByAppIdWorkflowsDraftNodesByNodeIdRunResponse, + zPostAppsByAppIdWorkflowsDraftNodesByNodeIdTriggerRunPath, + zPostAppsByAppIdWorkflowsDraftNodesByNodeIdTriggerRunResponse, + zPostAppsByAppIdWorkflowsDraftPath, + zPostAppsByAppIdWorkflowsDraftResponse, + zPostAppsByAppIdWorkflowsDraftRunBody, + zPostAppsByAppIdWorkflowsDraftRunPath, + zPostAppsByAppIdWorkflowsDraftRunResponse, + zPostAppsByAppIdWorkflowsDraftTriggerRunAllBody, + zPostAppsByAppIdWorkflowsDraftTriggerRunAllPath, + zPostAppsByAppIdWorkflowsDraftTriggerRunAllResponse, + zPostAppsByAppIdWorkflowsDraftTriggerRunBody, + zPostAppsByAppIdWorkflowsDraftTriggerRunPath, + zPostAppsByAppIdWorkflowsDraftTriggerRunResponse, + zPostAppsByAppIdWorkflowsPublishBody, + zPostAppsByAppIdWorkflowsPublishPath, + zPostAppsByAppIdWorkflowsPublishResponse, + zPostAppsByResourceIdApiKeysPath, + zPostAppsByResourceIdApiKeysResponse, + zPostAppsImportsBody, + zPostAppsImportsByImportIdConfirmPath, + zPostAppsImportsByImportIdConfirmResponse, + zPostAppsImportsResponse, + zPostAppsResponse, + zPutAppsByAppIdBody, + zPutAppsByAppIdPath, + zPutAppsByAppIdResponse, + zPutAppsByAppIdServerBody, + zPutAppsByAppIdServerPath, + zPutAppsByAppIdServerResponse, + zPutAppsByAppIdWorkflowCommentsByCommentIdBody, + zPutAppsByAppIdWorkflowCommentsByCommentIdPath, + zPutAppsByAppIdWorkflowCommentsByCommentIdRepliesByReplyIdBody, + zPutAppsByAppIdWorkflowCommentsByCommentIdRepliesByReplyIdPath, + zPutAppsByAppIdWorkflowCommentsByCommentIdRepliesByReplyIdResponse, + zPutAppsByAppIdWorkflowCommentsByCommentIdResponse, + zPutAppsByAppIdWorkflowsDraftVariablesByVariableIdResetPath, + zPutAppsByAppIdWorkflowsDraftVariablesByVariableIdResetResponse, +} from './zod.gen' + +export const get = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsImportsByAppIdCheckDependencies', + path: '/apps/imports/{app_id}/check-dependencies', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsImportsByAppIdCheckDependenciesPath })) + .output(zGetAppsImportsByAppIdCheckDependenciesResponse) + +export const checkDependencies = { + get, +} + +export const byAppId = { + checkDependencies, +} + +export const post = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsImportsByImportIdConfirm', + path: '/apps/imports/{import_id}/confirm', + tags: ['console'], + }) + .input(z.object({ params: zPostAppsImportsByImportIdConfirmPath })) + .output(zPostAppsImportsByImportIdConfirmResponse) + +export const confirm = { + post, +} + +export const byImportId = { + confirm, +} + +export const post2 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsImports', + path: '/apps/imports', + tags: ['console'], + }) + .input(z.object({ body: zPostAppsImportsBody })) + .output(zPostAppsImportsResponse) + +export const imports = { + post: post2, + byAppId, + byImportId, +} + +/** + * Get workflow online users + */ +export const get2 = oc + .route({ + description: 'Get workflow online users', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsWorkflowsOnlineUsers', + path: '/apps/workflows/online-users', + tags: ['console'], + }) + .input(z.object({ query: zGetAppsWorkflowsOnlineUsersQuery })) + .output(zGetAppsWorkflowsOnlineUsersResponse) + +export const onlineUsers = { + get: get2, +} + +export const workflows = { + onlineUsers, +} + +/** + * Get advanced chat workflow runs count statistics + * + * Get advanced chat workflow runs count statistics + */ +export const get3 = oc + .route({ + description: 'Get advanced chat workflow runs count statistics', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdAdvancedChatWorkflowRunsCount', + path: '/apps/{app_id}/advanced-chat/workflow-runs/count', + summary: 'Get advanced chat workflow runs count statistics', + tags: ['console'], + }) + .input( + z.object({ + params: zGetAppsByAppIdAdvancedChatWorkflowRunsCountPath, + query: zGetAppsByAppIdAdvancedChatWorkflowRunsCountQuery.optional(), + }), + ) + .output(zGetAppsByAppIdAdvancedChatWorkflowRunsCountResponse) + +export const count = { + get: get3, +} + +/** + * Get advanced chat app workflow run list + * + * Get advanced chat workflow run list + */ +export const get4 = oc + .route({ + description: 'Get advanced chat workflow run list', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdAdvancedChatWorkflowRuns', + path: '/apps/{app_id}/advanced-chat/workflow-runs', + summary: 'Get advanced chat app workflow run list', + tags: ['console'], + }) + .input( + z.object({ + params: zGetAppsByAppIdAdvancedChatWorkflowRunsPath, + query: zGetAppsByAppIdAdvancedChatWorkflowRunsQuery.optional(), + }), + ) + .output(zGetAppsByAppIdAdvancedChatWorkflowRunsResponse) + +export const workflowRuns = { + get: get4, + count, +} + +/** + * Preview human input form content and placeholders + * + * Get human input form preview for advanced chat workflow + */ +export const post3 = oc + .route({ + description: 'Get human input form preview for advanced chat workflow', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdAdvancedChatWorkflowsDraftHumanInputNodesByNodeIdFormPreview', + path: '/apps/{app_id}/advanced-chat/workflows/draft/human-input/nodes/{node_id}/form/preview', + summary: 'Preview human input form content and placeholders', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAppsByAppIdAdvancedChatWorkflowsDraftHumanInputNodesByNodeIdFormPreviewBody, + params: zPostAppsByAppIdAdvancedChatWorkflowsDraftHumanInputNodesByNodeIdFormPreviewPath, + }), + ) + .output(zPostAppsByAppIdAdvancedChatWorkflowsDraftHumanInputNodesByNodeIdFormPreviewResponse) + +export const preview = { + post: post3, +} + +/** + * Submit human input form preview + * + * Submit human input form preview for advanced chat workflow + */ +export const post4 = oc + .route({ + description: 'Submit human input form preview for advanced chat workflow', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdAdvancedChatWorkflowsDraftHumanInputNodesByNodeIdFormRun', + path: '/apps/{app_id}/advanced-chat/workflows/draft/human-input/nodes/{node_id}/form/run', + summary: 'Submit human input form preview', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAppsByAppIdAdvancedChatWorkflowsDraftHumanInputNodesByNodeIdFormRunBody, + params: zPostAppsByAppIdAdvancedChatWorkflowsDraftHumanInputNodesByNodeIdFormRunPath, + }), + ) + .output(zPostAppsByAppIdAdvancedChatWorkflowsDraftHumanInputNodesByNodeIdFormRunResponse) + +export const run = { + post: post4, +} + +export const form = { + preview, + run, +} + +export const byNodeId = { + form, +} + +export const nodes = { + byNodeId, +} + +export const humanInput = { + nodes, +} + +/** + * Run draft workflow iteration node + * + * Run draft workflow iteration node for advanced chat + */ +export const post5 = oc + .route({ + description: 'Run draft workflow iteration node for advanced chat', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdAdvancedChatWorkflowsDraftIterationNodesByNodeIdRun', + path: '/apps/{app_id}/advanced-chat/workflows/draft/iteration/nodes/{node_id}/run', + summary: 'Run draft workflow iteration node', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAppsByAppIdAdvancedChatWorkflowsDraftIterationNodesByNodeIdRunBody, + params: zPostAppsByAppIdAdvancedChatWorkflowsDraftIterationNodesByNodeIdRunPath, + }), + ) + .output(zPostAppsByAppIdAdvancedChatWorkflowsDraftIterationNodesByNodeIdRunResponse) + +export const run2 = { + post: post5, +} + +export const byNodeId2 = { + run: run2, +} + +export const nodes2 = { + byNodeId: byNodeId2, +} + +export const iteration = { + nodes: nodes2, +} + +/** + * Run draft workflow loop node + * + * Run draft workflow loop node for advanced chat + */ +export const post6 = oc + .route({ + description: 'Run draft workflow loop node for advanced chat', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdAdvancedChatWorkflowsDraftLoopNodesByNodeIdRun', + path: '/apps/{app_id}/advanced-chat/workflows/draft/loop/nodes/{node_id}/run', + summary: 'Run draft workflow loop node', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAppsByAppIdAdvancedChatWorkflowsDraftLoopNodesByNodeIdRunBody, + params: zPostAppsByAppIdAdvancedChatWorkflowsDraftLoopNodesByNodeIdRunPath, + }), + ) + .output(zPostAppsByAppIdAdvancedChatWorkflowsDraftLoopNodesByNodeIdRunResponse) + +export const run3 = { + post: post6, +} + +export const byNodeId3 = { + run: run3, +} + +export const nodes3 = { + byNodeId: byNodeId3, +} + +export const loop = { + nodes: nodes3, +} + +/** + * Run draft workflow + * + * Run draft workflow for advanced chat application + */ +export const post7 = oc + .route({ + description: 'Run draft workflow for advanced chat application', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdAdvancedChatWorkflowsDraftRun', + path: '/apps/{app_id}/advanced-chat/workflows/draft/run', + summary: 'Run draft workflow', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAppsByAppIdAdvancedChatWorkflowsDraftRunBody, + params: zPostAppsByAppIdAdvancedChatWorkflowsDraftRunPath, + }), + ) + .output(zPostAppsByAppIdAdvancedChatWorkflowsDraftRunResponse) + +export const run4 = { + post: post7, +} + +export const draft = { + humanInput, + iteration, + loop, + run: run4, +} + +export const workflows2 = { + draft, +} + +export const advancedChat = { + workflowRuns, + workflows: workflows2, +} + +/** + * Get agent logs + * + * Get agent execution logs for an application + */ +export const get5 = oc + .route({ + description: 'Get agent execution logs for an application', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdAgentLogs', + path: '/apps/{app_id}/agent/logs', + summary: 'Get agent logs', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsByAppIdAgentLogsPath, query: zGetAppsByAppIdAgentLogsQuery })) + .output(zGetAppsByAppIdAgentLogsResponse) + +export const logs = { + get: get5, +} + +export const agent = { + logs, +} + +/** + * Get status of annotation reply action job + */ +export const get6 = oc + .route({ + description: 'Get status of annotation reply action job', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdAnnotationReplyByActionStatusByJobId', + path: '/apps/{app_id}/annotation-reply/{action}/status/{job_id}', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsByAppIdAnnotationReplyByActionStatusByJobIdPath })) + .output(zGetAppsByAppIdAnnotationReplyByActionStatusByJobIdResponse) + +export const byJobId = { + get: get6, +} + +export const status = { + byJobId, +} + +/** + * Enable or disable annotation reply for an app + */ +export const post8 = oc + .route({ + description: 'Enable or disable annotation reply for an app', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdAnnotationReplyByAction', + path: '/apps/{app_id}/annotation-reply/{action}', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAppsByAppIdAnnotationReplyByActionBody, + params: zPostAppsByAppIdAnnotationReplyByActionPath, + }), + ) + .output(zPostAppsByAppIdAnnotationReplyByActionResponse) + +export const byAction = { + post: post8, + status, +} + +export const annotationReply = { + byAction, +} + +/** + * Get annotation settings for an app + */ +export const get7 = oc + .route({ + description: 'Get annotation settings for an app', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdAnnotationSetting', + path: '/apps/{app_id}/annotation-setting', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsByAppIdAnnotationSettingPath })) + .output(zGetAppsByAppIdAnnotationSettingResponse) + +export const annotationSetting = { + get: get7, +} + +/** + * Update annotation settings for an app + */ +export const post9 = oc + .route({ + description: 'Update annotation settings for an app', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdAnnotationSettingsByAnnotationSettingId', + path: '/apps/{app_id}/annotation-settings/{annotation_setting_id}', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAppsByAppIdAnnotationSettingsByAnnotationSettingIdBody, + params: zPostAppsByAppIdAnnotationSettingsByAnnotationSettingIdPath, + }), + ) + .output(zPostAppsByAppIdAnnotationSettingsByAnnotationSettingIdResponse) + +export const byAnnotationSettingId = { + post: post9, +} + +export const annotationSettings = { + byAnnotationSettingId, +} + +/** + * Batch import annotations from CSV file with rate limiting and security checks + */ +export const post10 = oc + .route({ + description: 'Batch import annotations from CSV file with rate limiting and security checks', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdAnnotationsBatchImport', + path: '/apps/{app_id}/annotations/batch-import', + tags: ['console'], + }) + .input(z.object({ params: zPostAppsByAppIdAnnotationsBatchImportPath })) + .output(zPostAppsByAppIdAnnotationsBatchImportResponse) + +export const batchImport = { + post: post10, +} + +/** + * Get status of batch import job + */ +export const get8 = oc + .route({ + description: 'Get status of batch import job', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdAnnotationsBatchImportStatusByJobId', + path: '/apps/{app_id}/annotations/batch-import-status/{job_id}', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsByAppIdAnnotationsBatchImportStatusByJobIdPath })) + .output(zGetAppsByAppIdAnnotationsBatchImportStatusByJobIdResponse) + +export const byJobId2 = { + get: get8, +} + +export const batchImportStatus = { + byJobId: byJobId2, +} + +/** + * Get count of message annotations for the app + */ +export const get9 = oc + .route({ + description: 'Get count of message annotations for the app', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdAnnotationsCount', + path: '/apps/{app_id}/annotations/count', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsByAppIdAnnotationsCountPath })) + .output(zGetAppsByAppIdAnnotationsCountResponse) + +export const count2 = { + get: get9, +} + +/** + * Export all annotations for an app with CSV injection protection + */ +export const get10 = oc + .route({ + description: 'Export all annotations for an app with CSV injection protection', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdAnnotationsExport', + path: '/apps/{app_id}/annotations/export', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsByAppIdAnnotationsExportPath })) + .output(zGetAppsByAppIdAnnotationsExportResponse) + +export const export_ = { + get: get10, +} + +/** + * Get hit histories for an annotation + */ +export const get11 = oc + .route({ + description: 'Get hit histories for an annotation', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdAnnotationsByAnnotationIdHitHistories', + path: '/apps/{app_id}/annotations/{annotation_id}/hit-histories', + tags: ['console'], + }) + .input( + z.object({ + params: zGetAppsByAppIdAnnotationsByAnnotationIdHitHistoriesPath, + query: zGetAppsByAppIdAnnotationsByAnnotationIdHitHistoriesQuery.optional(), + }), + ) + .output(zGetAppsByAppIdAnnotationsByAnnotationIdHitHistoriesResponse) + +export const hitHistories = { + get: get11, +} + +export const delete_ = oc + .route({ + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteAppsByAppIdAnnotationsByAnnotationId', + path: '/apps/{app_id}/annotations/{annotation_id}', + tags: ['console'], + }) + .input(z.object({ params: zDeleteAppsByAppIdAnnotationsByAnnotationIdPath })) + .output(zDeleteAppsByAppIdAnnotationsByAnnotationIdResponse) + +/** + * Update or delete an annotation + */ +export const post11 = oc + .route({ + description: 'Update or delete an annotation', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdAnnotationsByAnnotationId', + path: '/apps/{app_id}/annotations/{annotation_id}', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAppsByAppIdAnnotationsByAnnotationIdBody, + params: zPostAppsByAppIdAnnotationsByAnnotationIdPath, + }), + ) + .output(zPostAppsByAppIdAnnotationsByAnnotationIdResponse) + +export const byAnnotationId = { + delete: delete_, + post: post11, + hitHistories, +} + +export const delete2 = oc + .route({ + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteAppsByAppIdAnnotations', + path: '/apps/{app_id}/annotations', + tags: ['console'], + }) + .input(z.object({ params: zDeleteAppsByAppIdAnnotationsPath })) + .output(zDeleteAppsByAppIdAnnotationsResponse) + +/** + * Get annotations for an app with pagination + */ +export const get12 = oc + .route({ + description: 'Get annotations for an app with pagination', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdAnnotations', + path: '/apps/{app_id}/annotations', + tags: ['console'], + }) + .input( + z.object({ + params: zGetAppsByAppIdAnnotationsPath, + query: zGetAppsByAppIdAnnotationsQuery.optional(), + }), + ) + .output(zGetAppsByAppIdAnnotationsResponse) + +/** + * Create a new annotation for an app + */ +export const post12 = oc + .route({ + description: 'Create a new annotation for an app', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdAnnotations', + path: '/apps/{app_id}/annotations', + successStatus: 201, + tags: ['console'], + }) + .input( + z.object({ body: zPostAppsByAppIdAnnotationsBody, params: zPostAppsByAppIdAnnotationsPath }), + ) + .output(zPostAppsByAppIdAnnotationsResponse) + +export const annotations = { + delete: delete2, + get: get12, + post: post12, + batchImport, + batchImportStatus, + count: count2, + export: export_, + byAnnotationId, +} + +/** + * Enable or disable app API + */ +export const post13 = oc + .route({ + description: 'Enable or disable app API', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdApiEnable', + path: '/apps/{app_id}/api-enable', + tags: ['console'], + }) + .input(z.object({ body: zPostAppsByAppIdApiEnableBody, params: zPostAppsByAppIdApiEnablePath })) + .output(zPostAppsByAppIdApiEnableResponse) + +export const apiEnable = { + post: post13, +} + +/** + * Transcript audio to text for chat messages + */ +export const post14 = oc + .route({ + description: 'Transcript audio to text for chat messages', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdAudioToText', + path: '/apps/{app_id}/audio-to-text', + tags: ['console'], + }) + .input(z.object({ params: zPostAppsByAppIdAudioToTextPath })) + .output(zPostAppsByAppIdAudioToTextResponse) + +export const audioToText = { + post: post14, +} + +/** + * Delete a chat conversation + */ +export const delete3 = oc + .route({ + description: 'Delete a chat conversation', + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteAppsByAppIdChatConversationsByConversationId', + path: '/apps/{app_id}/chat-conversations/{conversation_id}', + successStatus: 204, + tags: ['console'], + }) + .input(z.object({ params: zDeleteAppsByAppIdChatConversationsByConversationIdPath })) + .output(zDeleteAppsByAppIdChatConversationsByConversationIdResponse) + +/** + * Get chat conversation details + */ +export const get13 = oc + .route({ + description: 'Get chat conversation details', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdChatConversationsByConversationId', + path: '/apps/{app_id}/chat-conversations/{conversation_id}', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsByAppIdChatConversationsByConversationIdPath })) + .output(zGetAppsByAppIdChatConversationsByConversationIdResponse) + +export const byConversationId = { + delete: delete3, + get: get13, +} + +/** + * Get chat conversations with pagination, filtering and summary + */ +export const get14 = oc + .route({ + description: 'Get chat conversations with pagination, filtering and summary', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdChatConversations', + path: '/apps/{app_id}/chat-conversations', + tags: ['console'], + }) + .input( + z.object({ + params: zGetAppsByAppIdChatConversationsPath, + query: zGetAppsByAppIdChatConversationsQuery.optional(), + }), + ) + .output(zGetAppsByAppIdChatConversationsResponse) + +export const chatConversations = { + get: get14, + byConversationId, +} + +/** + * Get suggested questions for a message + */ +export const get15 = oc + .route({ + description: 'Get suggested questions for a message', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdChatMessagesByMessageIdSuggestedQuestions', + path: '/apps/{app_id}/chat-messages/{message_id}/suggested-questions', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsByAppIdChatMessagesByMessageIdSuggestedQuestionsPath })) + .output(zGetAppsByAppIdChatMessagesByMessageIdSuggestedQuestionsResponse) + +export const suggestedQuestions = { + get: get15, +} + +export const byMessageId = { + suggestedQuestions, +} + +/** + * Stop a running chat message generation + */ +export const post15 = oc + .route({ + description: 'Stop a running chat message generation', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdChatMessagesByTaskIdStop', + path: '/apps/{app_id}/chat-messages/{task_id}/stop', + tags: ['console'], + }) + .input(z.object({ params: zPostAppsByAppIdChatMessagesByTaskIdStopPath })) + .output(zPostAppsByAppIdChatMessagesByTaskIdStopResponse) + +export const stop = { + post: post15, +} + +export const byTaskId = { + stop, +} + +/** + * Get chat messages for a conversation with pagination + */ +export const get16 = oc + .route({ + description: 'Get chat messages for a conversation with pagination', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdChatMessages', + path: '/apps/{app_id}/chat-messages', + tags: ['console'], + }) + .input( + z.object({ params: zGetAppsByAppIdChatMessagesPath, query: zGetAppsByAppIdChatMessagesQuery }), + ) + .output(zGetAppsByAppIdChatMessagesResponse) + +export const chatMessages = { + get: get16, + byMessageId, + byTaskId, +} + +/** + * Delete a completion conversation + */ +export const delete4 = oc + .route({ + description: 'Delete a completion conversation', + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteAppsByAppIdCompletionConversationsByConversationId', + path: '/apps/{app_id}/completion-conversations/{conversation_id}', + successStatus: 204, + tags: ['console'], + }) + .input(z.object({ params: zDeleteAppsByAppIdCompletionConversationsByConversationIdPath })) + .output(zDeleteAppsByAppIdCompletionConversationsByConversationIdResponse) + +/** + * Get completion conversation details with messages + */ +export const get17 = oc + .route({ + description: 'Get completion conversation details with messages', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdCompletionConversationsByConversationId', + path: '/apps/{app_id}/completion-conversations/{conversation_id}', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsByAppIdCompletionConversationsByConversationIdPath })) + .output(zGetAppsByAppIdCompletionConversationsByConversationIdResponse) + +export const byConversationId2 = { + delete: delete4, + get: get17, +} + +/** + * Get completion conversations with pagination and filtering + */ +export const get18 = oc + .route({ + description: 'Get completion conversations with pagination and filtering', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdCompletionConversations', + path: '/apps/{app_id}/completion-conversations', + tags: ['console'], + }) + .input( + z.object({ + params: zGetAppsByAppIdCompletionConversationsPath, + query: zGetAppsByAppIdCompletionConversationsQuery.optional(), + }), + ) + .output(zGetAppsByAppIdCompletionConversationsResponse) + +export const completionConversations = { + get: get18, + byConversationId: byConversationId2, +} + +/** + * Stop a running completion message generation + */ +export const post16 = oc + .route({ + description: 'Stop a running completion message generation', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdCompletionMessagesByTaskIdStop', + path: '/apps/{app_id}/completion-messages/{task_id}/stop', + tags: ['console'], + }) + .input(z.object({ params: zPostAppsByAppIdCompletionMessagesByTaskIdStopPath })) + .output(zPostAppsByAppIdCompletionMessagesByTaskIdStopResponse) + +export const stop2 = { + post: post16, +} + +export const byTaskId2 = { + stop: stop2, +} + +/** + * Generate completion message for debugging + */ +export const post17 = oc + .route({ + description: 'Generate completion message for debugging', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdCompletionMessages', + path: '/apps/{app_id}/completion-messages', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAppsByAppIdCompletionMessagesBody, + params: zPostAppsByAppIdCompletionMessagesPath, + }), + ) + .output(zPostAppsByAppIdCompletionMessagesResponse) + +export const completionMessages = { + post: post17, + byTaskId: byTaskId2, +} + +/** + * Get conversation variables for an application + */ +export const get19 = oc + .route({ + description: 'Get conversation variables for an application', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdConversationVariables', + path: '/apps/{app_id}/conversation-variables', + tags: ['console'], + }) + .input( + z.object({ + params: zGetAppsByAppIdConversationVariablesPath, + query: zGetAppsByAppIdConversationVariablesQuery, + }), + ) + .output(zGetAppsByAppIdConversationVariablesResponse) + +export const conversationVariables = { + get: get19, +} + +/** + * Convert basic mode of chatbot app to workflow mode + * + * Convert application to workflow mode + * Convert expert mode of chatbot app to workflow mode + * Convert Completion App to Workflow App + */ +export const post18 = oc + .route({ + description: + 'Convert application to workflow mode\nConvert expert mode of chatbot app to workflow mode\nConvert Completion App to Workflow App', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdConvertToWorkflow', + path: '/apps/{app_id}/convert-to-workflow', + summary: 'Convert basic mode of chatbot app to workflow mode', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAppsByAppIdConvertToWorkflowBody, + params: zPostAppsByAppIdConvertToWorkflowPath, + }), + ) + .output(zPostAppsByAppIdConvertToWorkflowResponse) + +export const convertToWorkflow = { + post: post18, +} + +/** + * Copy app + * + * Create a copy of an existing application + */ +export const post19 = oc + .route({ + description: 'Create a copy of an existing application', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdCopy', + path: '/apps/{app_id}/copy', + successStatus: 201, + summary: 'Copy app', + tags: ['console'], + }) + .input(z.object({ body: zPostAppsByAppIdCopyBody, params: zPostAppsByAppIdCopyPath })) + .output(zPostAppsByAppIdCopyResponse) + +export const copy = { + post: post19, +} + +/** + * Export app + * + * Export application configuration as DSL + */ +export const get20 = oc + .route({ + description: 'Export application configuration as DSL', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdExport', + path: '/apps/{app_id}/export', + summary: 'Export app', + tags: ['console'], + }) + .input( + z.object({ params: zGetAppsByAppIdExportPath, query: zGetAppsByAppIdExportQuery.optional() }), + ) + .output(zGetAppsByAppIdExportResponse) + +export const export2 = { + get: get20, +} + +/** + * Export user feedback data for Google Sheets + */ +export const get21 = oc + .route({ + description: 'Export user feedback data for Google Sheets', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdFeedbacksExport', + path: '/apps/{app_id}/feedbacks/export', + tags: ['console'], + }) + .input( + z.object({ + params: zGetAppsByAppIdFeedbacksExportPath, + query: zGetAppsByAppIdFeedbacksExportQuery.optional(), + }), + ) + .output(zGetAppsByAppIdFeedbacksExportResponse) + +export const export3 = { + get: get21, +} + +/** + * Create or update message feedback (like/dislike) + */ +export const post20 = oc + .route({ + description: 'Create or update message feedback (like/dislike)', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdFeedbacks', + path: '/apps/{app_id}/feedbacks', + tags: ['console'], + }) + .input(z.object({ body: zPostAppsByAppIdFeedbacksBody, params: zPostAppsByAppIdFeedbacksPath })) + .output(zPostAppsByAppIdFeedbacksResponse) + +export const feedbacks = { + post: post20, + export: export3, +} + +/** + * Update application icon + */ +export const post21 = oc + .route({ + description: 'Update application icon', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdIcon', + path: '/apps/{app_id}/icon', + tags: ['console'], + }) + .input(z.object({ body: zPostAppsByAppIdIconBody, params: zPostAppsByAppIdIconPath })) + .output(zPostAppsByAppIdIconResponse) + +export const icon = { + post: post21, +} + +/** + * Get message details by ID + */ +export const get22 = oc + .route({ + description: 'Get message details by ID', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdMessagesByMessageId', + path: '/apps/{app_id}/messages/{message_id}', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsByAppIdMessagesByMessageIdPath })) + .output(zGetAppsByAppIdMessagesByMessageIdResponse) + +export const byMessageId2 = { + get: get22, +} + +export const messages = { + byMessageId: byMessageId2, +} + +/** + * Modify app model config + * + * Update application model configuration + */ +export const post22 = oc + .route({ + description: 'Update application model configuration', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdModelConfig', + path: '/apps/{app_id}/model-config', + summary: 'Modify app model config', + tags: ['console'], + }) + .input( + z.object({ body: zPostAppsByAppIdModelConfigBody, params: zPostAppsByAppIdModelConfigPath }), + ) + .output(zPostAppsByAppIdModelConfigResponse) + +export const modelConfig = { + post: post22, +} + +/** + * Check if app name is available + */ +export const post23 = oc + .route({ + description: 'Check if app name is available', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdName', + path: '/apps/{app_id}/name', + tags: ['console'], + }) + .input(z.object({ body: zPostAppsByAppIdNameBody, params: zPostAppsByAppIdNamePath })) + .output(zPostAppsByAppIdNameResponse) + +export const name = { + post: post23, +} + +/** + * Publish app to Creators Platform + */ +export const post24 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdPublishToCreatorsPlatform', + path: '/apps/{app_id}/publish-to-creators-platform', + summary: 'Publish app to Creators Platform', + tags: ['console'], + }) + .input(z.object({ params: zPostAppsByAppIdPublishToCreatorsPlatformPath })) + .output(zPostAppsByAppIdPublishToCreatorsPlatformResponse) + +export const publishToCreatorsPlatform = { + post: post24, +} + +/** + * Get MCP server configuration for an application + */ +export const get23 = oc + .route({ + description: 'Get MCP server configuration for an application', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdServer', + path: '/apps/{app_id}/server', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsByAppIdServerPath })) + .output(zGetAppsByAppIdServerResponse) + +/** + * Create MCP server configuration for an application + */ +export const post25 = oc + .route({ + description: 'Create MCP server configuration for an application', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdServer', + path: '/apps/{app_id}/server', + successStatus: 201, + tags: ['console'], + }) + .input(z.object({ body: zPostAppsByAppIdServerBody, params: zPostAppsByAppIdServerPath })) + .output(zPostAppsByAppIdServerResponse) + +/** + * Update MCP server configuration for an application + */ +export const put = oc + .route({ + description: 'Update MCP server configuration for an application', + inputStructure: 'detailed', + method: 'PUT', + operationId: 'putAppsByAppIdServer', + path: '/apps/{app_id}/server', + tags: ['console'], + }) + .input(z.object({ body: zPutAppsByAppIdServerBody, params: zPutAppsByAppIdServerPath })) + .output(zPutAppsByAppIdServerResponse) + +export const server = { + get: get23, + post: post25, + put, +} + +/** + * Reset access token for application site + */ +export const post26 = oc + .route({ + description: 'Reset access token for application site', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdSiteAccessTokenReset', + path: '/apps/{app_id}/site/access-token-reset', + tags: ['console'], + }) + .input(z.object({ params: zPostAppsByAppIdSiteAccessTokenResetPath })) + .output(zPostAppsByAppIdSiteAccessTokenResetResponse) + +export const accessTokenReset = { + post: post26, +} + +/** + * Update application site configuration + */ +export const post27 = oc + .route({ + description: 'Update application site configuration', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdSite', + path: '/apps/{app_id}/site', + tags: ['console'], + }) + .input(z.object({ body: zPostAppsByAppIdSiteBody, params: zPostAppsByAppIdSitePath })) + .output(zPostAppsByAppIdSiteResponse) + +export const site = { + post: post27, + accessTokenReset, +} + +/** + * Enable or disable app site + */ +export const post28 = oc + .route({ + description: 'Enable or disable app site', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdSiteEnable', + path: '/apps/{app_id}/site-enable', + tags: ['console'], + }) + .input(z.object({ body: zPostAppsByAppIdSiteEnableBody, params: zPostAppsByAppIdSiteEnablePath })) + .output(zPostAppsByAppIdSiteEnableResponse) + +export const siteEnable = { + post: post28, +} + +/** + * Get average response time statistics for an application + */ +export const get24 = oc + .route({ + description: 'Get average response time statistics for an application', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdStatisticsAverageResponseTime', + path: '/apps/{app_id}/statistics/average-response-time', + tags: ['console'], + }) + .input( + z.object({ + params: zGetAppsByAppIdStatisticsAverageResponseTimePath, + query: zGetAppsByAppIdStatisticsAverageResponseTimeQuery.optional(), + }), + ) + .output(zGetAppsByAppIdStatisticsAverageResponseTimeResponse) + +export const averageResponseTime = { + get: get24, +} + +/** + * Get average session interaction statistics for an application + */ +export const get25 = oc + .route({ + description: 'Get average session interaction statistics for an application', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdStatisticsAverageSessionInteractions', + path: '/apps/{app_id}/statistics/average-session-interactions', + tags: ['console'], + }) + .input( + z.object({ + params: zGetAppsByAppIdStatisticsAverageSessionInteractionsPath, + query: zGetAppsByAppIdStatisticsAverageSessionInteractionsQuery.optional(), + }), + ) + .output(zGetAppsByAppIdStatisticsAverageSessionInteractionsResponse) + +export const averageSessionInteractions = { + get: get25, +} + +/** + * Get daily conversation statistics for an application + */ +export const get26 = oc + .route({ + description: 'Get daily conversation statistics for an application', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdStatisticsDailyConversations', + path: '/apps/{app_id}/statistics/daily-conversations', + tags: ['console'], + }) + .input( + z.object({ + params: zGetAppsByAppIdStatisticsDailyConversationsPath, + query: zGetAppsByAppIdStatisticsDailyConversationsQuery.optional(), + }), + ) + .output(zGetAppsByAppIdStatisticsDailyConversationsResponse) + +export const dailyConversations = { + get: get26, +} + +/** + * Get daily terminal/end-user statistics for an application + */ +export const get27 = oc + .route({ + description: 'Get daily terminal/end-user statistics for an application', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdStatisticsDailyEndUsers', + path: '/apps/{app_id}/statistics/daily-end-users', + tags: ['console'], + }) + .input( + z.object({ + params: zGetAppsByAppIdStatisticsDailyEndUsersPath, + query: zGetAppsByAppIdStatisticsDailyEndUsersQuery.optional(), + }), + ) + .output(zGetAppsByAppIdStatisticsDailyEndUsersResponse) + +export const dailyEndUsers = { + get: get27, +} + +/** + * Get daily message statistics for an application + */ +export const get28 = oc + .route({ + description: 'Get daily message statistics for an application', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdStatisticsDailyMessages', + path: '/apps/{app_id}/statistics/daily-messages', + tags: ['console'], + }) + .input( + z.object({ + params: zGetAppsByAppIdStatisticsDailyMessagesPath, + query: zGetAppsByAppIdStatisticsDailyMessagesQuery.optional(), + }), + ) + .output(zGetAppsByAppIdStatisticsDailyMessagesResponse) + +export const dailyMessages = { + get: get28, +} + +/** + * Get daily token cost statistics for an application + */ +export const get29 = oc + .route({ + description: 'Get daily token cost statistics for an application', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdStatisticsTokenCosts', + path: '/apps/{app_id}/statistics/token-costs', + tags: ['console'], + }) + .input( + z.object({ + params: zGetAppsByAppIdStatisticsTokenCostsPath, + query: zGetAppsByAppIdStatisticsTokenCostsQuery.optional(), + }), + ) + .output(zGetAppsByAppIdStatisticsTokenCostsResponse) + +export const tokenCosts = { + get: get29, +} + +/** + * Get tokens per second statistics for an application + */ +export const get30 = oc + .route({ + description: 'Get tokens per second statistics for an application', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdStatisticsTokensPerSecond', + path: '/apps/{app_id}/statistics/tokens-per-second', + tags: ['console'], + }) + .input( + z.object({ + params: zGetAppsByAppIdStatisticsTokensPerSecondPath, + query: zGetAppsByAppIdStatisticsTokensPerSecondQuery.optional(), + }), + ) + .output(zGetAppsByAppIdStatisticsTokensPerSecondResponse) + +export const tokensPerSecond = { + get: get30, +} + +/** + * Get user satisfaction rate statistics for an application + */ +export const get31 = oc + .route({ + description: 'Get user satisfaction rate statistics for an application', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdStatisticsUserSatisfactionRate', + path: '/apps/{app_id}/statistics/user-satisfaction-rate', + tags: ['console'], + }) + .input( + z.object({ + params: zGetAppsByAppIdStatisticsUserSatisfactionRatePath, + query: zGetAppsByAppIdStatisticsUserSatisfactionRateQuery.optional(), + }), + ) + .output(zGetAppsByAppIdStatisticsUserSatisfactionRateResponse) + +export const userSatisfactionRate = { + get: get31, +} + +export const statistics = { + averageResponseTime, + averageSessionInteractions, + dailyConversations, + dailyEndUsers, + dailyMessages, + tokenCosts, + tokensPerSecond, + userSatisfactionRate, +} + +/** + * Get available TTS voices for a specific language + */ +export const get32 = oc + .route({ + description: 'Get available TTS voices for a specific language', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdTextToAudioVoices', + path: '/apps/{app_id}/text-to-audio/voices', + tags: ['console'], + }) + .input( + z.object({ + params: zGetAppsByAppIdTextToAudioVoicesPath, + query: zGetAppsByAppIdTextToAudioVoicesQuery, + }), + ) + .output(zGetAppsByAppIdTextToAudioVoicesResponse) + +export const voices = { + get: get32, +} + +/** + * Convert text to speech for chat messages + */ +export const post29 = oc + .route({ + description: 'Convert text to speech for chat messages', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdTextToAudio', + path: '/apps/{app_id}/text-to-audio', + tags: ['console'], + }) + .input( + z.object({ body: zPostAppsByAppIdTextToAudioBody, params: zPostAppsByAppIdTextToAudioPath }), + ) + .output(zPostAppsByAppIdTextToAudioResponse) + +export const textToAudio = { + post: post29, + voices, +} + +/** + * Get app trace + * + * Get app tracing configuration + */ +export const get33 = oc + .route({ + description: 'Get app tracing configuration', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdTrace', + path: '/apps/{app_id}/trace', + summary: 'Get app trace', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsByAppIdTracePath })) + .output(zGetAppsByAppIdTraceResponse) + +/** + * Update app tracing configuration + */ +export const post30 = oc + .route({ + description: 'Update app tracing configuration', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdTrace', + path: '/apps/{app_id}/trace', + tags: ['console'], + }) + .input(z.object({ body: zPostAppsByAppIdTraceBody, params: zPostAppsByAppIdTracePath })) + .output(zPostAppsByAppIdTraceResponse) + +export const trace = { + get: get33, + post: post30, +} + +/** + * Delete an existing trace app configuration + * + * Delete an existing tracing configuration for an application + */ +export const delete5 = oc + .route({ + description: 'Delete an existing tracing configuration for an application', + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteAppsByAppIdTraceConfig', + path: '/apps/{app_id}/trace-config', + successStatus: 204, + summary: 'Delete an existing trace app configuration', + tags: ['console'], + }) + .input( + z.object({ + body: zDeleteAppsByAppIdTraceConfigBody, + params: zDeleteAppsByAppIdTraceConfigPath, + }), + ) + .output(zDeleteAppsByAppIdTraceConfigResponse) + +/** + * Get tracing configuration for an application + */ +export const get34 = oc + .route({ + description: 'Get tracing configuration for an application', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdTraceConfig', + path: '/apps/{app_id}/trace-config', + tags: ['console'], + }) + .input( + z.object({ params: zGetAppsByAppIdTraceConfigPath, query: zGetAppsByAppIdTraceConfigQuery }), + ) + .output(zGetAppsByAppIdTraceConfigResponse) + +/** + * Update an existing trace app configuration + * + * Update an existing tracing configuration for an application + */ +export const patch = oc + .route({ + description: 'Update an existing tracing configuration for an application', + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchAppsByAppIdTraceConfig', + path: '/apps/{app_id}/trace-config', + summary: 'Update an existing trace app configuration', + tags: ['console'], + }) + .input( + z.object({ body: zPatchAppsByAppIdTraceConfigBody, params: zPatchAppsByAppIdTraceConfigPath }), + ) + .output(zPatchAppsByAppIdTraceConfigResponse) + +/** + * Create a new trace app configuration + * + * Create a new tracing configuration for an application + */ +export const post31 = oc + .route({ + description: 'Create a new tracing configuration for an application', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdTraceConfig', + path: '/apps/{app_id}/trace-config', + successStatus: 201, + summary: 'Create a new trace app configuration', + tags: ['console'], + }) + .input( + z.object({ body: zPostAppsByAppIdTraceConfigBody, params: zPostAppsByAppIdTraceConfigPath }), + ) + .output(zPostAppsByAppIdTraceConfigResponse) + +export const traceConfig = { + delete: delete5, + get: get34, + patch, + post: post31, +} + +/** + * Update app trigger (enable/disable) + */ +export const post32 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdTriggerEnable', + path: '/apps/{app_id}/trigger-enable', + summary: 'Update app trigger (enable/disable)', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAppsByAppIdTriggerEnableBody, + params: zPostAppsByAppIdTriggerEnablePath, + }), + ) + .output(zPostAppsByAppIdTriggerEnableResponse) + +export const triggerEnable = { + post: post32, +} + +/** + * Get app triggers list + */ +export const get35 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdTriggers', + path: '/apps/{app_id}/triggers', + summary: 'Get app triggers list', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsByAppIdTriggersPath })) + .output(zGetAppsByAppIdTriggersResponse) + +export const triggers = { + get: get35, +} + +/** + * Get workflow app logs + * + * Get workflow application execution logs + */ +export const get36 = oc + .route({ + description: 'Get workflow application execution logs', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdWorkflowAppLogs', + path: '/apps/{app_id}/workflow-app-logs', + summary: 'Get workflow app logs', + tags: ['console'], + }) + .input( + z.object({ + params: zGetAppsByAppIdWorkflowAppLogsPath, + query: zGetAppsByAppIdWorkflowAppLogsQuery.optional(), + }), + ) + .output(zGetAppsByAppIdWorkflowAppLogsResponse) + +export const workflowAppLogs = { + get: get36, +} + +/** + * Get workflow archived logs + * + * Get workflow archived execution logs + */ +export const get37 = oc + .route({ + description: 'Get workflow archived execution logs', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdWorkflowArchivedLogs', + path: '/apps/{app_id}/workflow-archived-logs', + summary: 'Get workflow archived logs', + tags: ['console'], + }) + .input( + z.object({ + params: zGetAppsByAppIdWorkflowArchivedLogsPath, + query: zGetAppsByAppIdWorkflowArchivedLogsQuery.optional(), + }), + ) + .output(zGetAppsByAppIdWorkflowArchivedLogsResponse) + +export const workflowArchivedLogs = { + get: get37, +} + +/** + * Get workflow runs count statistics + * + * Get workflow runs count statistics + */ +export const get38 = oc + .route({ + description: 'Get workflow runs count statistics', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdWorkflowRunsCount', + path: '/apps/{app_id}/workflow-runs/count', + summary: 'Get workflow runs count statistics', + tags: ['console'], + }) + .input( + z.object({ + params: zGetAppsByAppIdWorkflowRunsCountPath, + query: zGetAppsByAppIdWorkflowRunsCountQuery.optional(), + }), + ) + .output(zGetAppsByAppIdWorkflowRunsCountResponse) + +export const count3 = { + get: get38, +} + +/** + * Stop workflow task + * + * Stop running workflow task + */ +export const post33 = oc + .route({ + description: 'Stop running workflow task', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdWorkflowRunsTasksByTaskIdStop', + path: '/apps/{app_id}/workflow-runs/tasks/{task_id}/stop', + summary: 'Stop workflow task', + tags: ['console'], + }) + .input(z.object({ params: zPostAppsByAppIdWorkflowRunsTasksByTaskIdStopPath })) + .output(zPostAppsByAppIdWorkflowRunsTasksByTaskIdStopResponse) + +export const stop3 = { + post: post33, +} + +export const byTaskId3 = { + stop: stop3, +} + +export const tasks = { + byTaskId: byTaskId3, +} + +/** + * Generate a download URL for an archived workflow run. + */ +export const get39 = oc + .route({ + description: 'Generate a download URL for an archived workflow run.', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdWorkflowRunsByRunIdExport', + path: '/apps/{app_id}/workflow-runs/{run_id}/export', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsByAppIdWorkflowRunsByRunIdExportPath })) + .output(zGetAppsByAppIdWorkflowRunsByRunIdExportResponse) + +export const export4 = { + get: get39, +} + +/** + * Get workflow run node execution list + * + * Get workflow run node execution list + */ +export const get40 = oc + .route({ + description: 'Get workflow run node execution list', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdWorkflowRunsByRunIdNodeExecutions', + path: '/apps/{app_id}/workflow-runs/{run_id}/node-executions', + summary: 'Get workflow run node execution list', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsByAppIdWorkflowRunsByRunIdNodeExecutionsPath })) + .output(zGetAppsByAppIdWorkflowRunsByRunIdNodeExecutionsResponse) + +export const nodeExecutions = { + get: get40, +} + +/** + * Get workflow run detail + * + * Get workflow run detail + */ +export const get41 = oc + .route({ + description: 'Get workflow run detail', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdWorkflowRunsByRunId', + path: '/apps/{app_id}/workflow-runs/{run_id}', + summary: 'Get workflow run detail', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsByAppIdWorkflowRunsByRunIdPath })) + .output(zGetAppsByAppIdWorkflowRunsByRunIdResponse) + +export const byRunId = { + get: get41, + export: export4, + nodeExecutions, +} + +/** + * Get workflow run list + * + * Get workflow run list + */ +export const get42 = oc + .route({ + description: 'Get workflow run list', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdWorkflowRuns', + path: '/apps/{app_id}/workflow-runs', + summary: 'Get workflow run list', + tags: ['console'], + }) + .input( + z.object({ + params: zGetAppsByAppIdWorkflowRunsPath, + query: zGetAppsByAppIdWorkflowRunsQuery.optional(), + }), + ) + .output(zGetAppsByAppIdWorkflowRunsResponse) + +export const workflowRuns2 = { + get: get42, + count: count3, + tasks, + byRunId, +} + +/** + * Get all users in current tenant for mentions + * + * Get all users in current tenant for mentions + */ +export const get43 = oc + .route({ + description: 'Get all users in current tenant for mentions', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdWorkflowCommentsMentionUsers', + path: '/apps/{app_id}/workflow/comments/mention-users', + summary: 'Get all users in current tenant for mentions', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsByAppIdWorkflowCommentsMentionUsersPath })) + .output(zGetAppsByAppIdWorkflowCommentsMentionUsersResponse) + +export const mentionUsers = { + get: get43, +} + +/** + * Delete a comment reply + * + * Delete a comment reply + */ +export const delete6 = oc + .route({ + description: 'Delete a comment reply', + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteAppsByAppIdWorkflowCommentsByCommentIdRepliesByReplyId', + path: '/apps/{app_id}/workflow/comments/{comment_id}/replies/{reply_id}', + successStatus: 204, + summary: 'Delete a comment reply', + tags: ['console'], + }) + .input(z.object({ params: zDeleteAppsByAppIdWorkflowCommentsByCommentIdRepliesByReplyIdPath })) + .output(zDeleteAppsByAppIdWorkflowCommentsByCommentIdRepliesByReplyIdResponse) + +/** + * Update a comment reply + * + * Update a comment reply + */ +export const put2 = oc + .route({ + description: 'Update a comment reply', + inputStructure: 'detailed', + method: 'PUT', + operationId: 'putAppsByAppIdWorkflowCommentsByCommentIdRepliesByReplyId', + path: '/apps/{app_id}/workflow/comments/{comment_id}/replies/{reply_id}', + summary: 'Update a comment reply', + tags: ['console'], + }) + .input( + z.object({ + body: zPutAppsByAppIdWorkflowCommentsByCommentIdRepliesByReplyIdBody, + params: zPutAppsByAppIdWorkflowCommentsByCommentIdRepliesByReplyIdPath, + }), + ) + .output(zPutAppsByAppIdWorkflowCommentsByCommentIdRepliesByReplyIdResponse) + +export const byReplyId = { + delete: delete6, + put: put2, +} + +/** + * Add a reply to a workflow comment + * + * Add a reply to a workflow comment + */ +export const post34 = oc + .route({ + description: 'Add a reply to a workflow comment', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdWorkflowCommentsByCommentIdReplies', + path: '/apps/{app_id}/workflow/comments/{comment_id}/replies', + successStatus: 201, + summary: 'Add a reply to a workflow comment', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAppsByAppIdWorkflowCommentsByCommentIdRepliesBody, + params: zPostAppsByAppIdWorkflowCommentsByCommentIdRepliesPath, + }), + ) + .output(zPostAppsByAppIdWorkflowCommentsByCommentIdRepliesResponse) + +export const replies = { + post: post34, + byReplyId, +} + +/** + * Resolve a workflow comment + * + * Resolve a workflow comment + */ +export const post35 = oc + .route({ + description: 'Resolve a workflow comment', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdWorkflowCommentsByCommentIdResolve', + path: '/apps/{app_id}/workflow/comments/{comment_id}/resolve', + summary: 'Resolve a workflow comment', + tags: ['console'], + }) + .input(z.object({ params: zPostAppsByAppIdWorkflowCommentsByCommentIdResolvePath })) + .output(zPostAppsByAppIdWorkflowCommentsByCommentIdResolveResponse) + +export const resolve = { + post: post35, +} + +/** + * Delete a workflow comment + * + * Delete a workflow comment + */ +export const delete7 = oc + .route({ + description: 'Delete a workflow comment', + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteAppsByAppIdWorkflowCommentsByCommentId', + path: '/apps/{app_id}/workflow/comments/{comment_id}', + successStatus: 204, + summary: 'Delete a workflow comment', + tags: ['console'], + }) + .input(z.object({ params: zDeleteAppsByAppIdWorkflowCommentsByCommentIdPath })) + .output(zDeleteAppsByAppIdWorkflowCommentsByCommentIdResponse) + +/** + * Get a specific workflow comment + * + * Get a specific workflow comment + */ +export const get44 = oc + .route({ + description: 'Get a specific workflow comment', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdWorkflowCommentsByCommentId', + path: '/apps/{app_id}/workflow/comments/{comment_id}', + summary: 'Get a specific workflow comment', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsByAppIdWorkflowCommentsByCommentIdPath })) + .output(zGetAppsByAppIdWorkflowCommentsByCommentIdResponse) + +/** + * Update a workflow comment + * + * Update a workflow comment + */ +export const put3 = oc + .route({ + description: 'Update a workflow comment', + inputStructure: 'detailed', + method: 'PUT', + operationId: 'putAppsByAppIdWorkflowCommentsByCommentId', + path: '/apps/{app_id}/workflow/comments/{comment_id}', + summary: 'Update a workflow comment', + tags: ['console'], + }) + .input( + z.object({ + body: zPutAppsByAppIdWorkflowCommentsByCommentIdBody, + params: zPutAppsByAppIdWorkflowCommentsByCommentIdPath, + }), + ) + .output(zPutAppsByAppIdWorkflowCommentsByCommentIdResponse) + +export const byCommentId = { + delete: delete7, + get: get44, + put: put3, + replies, + resolve, +} + +/** + * Get all comments for a workflow + * + * Get all comments for a workflow + */ +export const get45 = oc + .route({ + description: 'Get all comments for a workflow', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdWorkflowComments', + path: '/apps/{app_id}/workflow/comments', + summary: 'Get all comments for a workflow', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsByAppIdWorkflowCommentsPath })) + .output(zGetAppsByAppIdWorkflowCommentsResponse) + +/** + * Create a new workflow comment + * + * Create a new workflow comment + */ +export const post36 = oc + .route({ + description: 'Create a new workflow comment', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdWorkflowComments', + path: '/apps/{app_id}/workflow/comments', + successStatus: 201, + summary: 'Create a new workflow comment', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAppsByAppIdWorkflowCommentsBody, + params: zPostAppsByAppIdWorkflowCommentsPath, + }), + ) + .output(zPostAppsByAppIdWorkflowCommentsResponse) + +export const comments = { + get: get45, + post: post36, + mentionUsers, + byCommentId, +} + +/** + * Get workflow average app interaction statistics + */ +export const get46 = oc + .route({ + description: 'Get workflow average app interaction statistics', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdWorkflowStatisticsAverageAppInteractions', + path: '/apps/{app_id}/workflow/statistics/average-app-interactions', + tags: ['console'], + }) + .input( + z.object({ + params: zGetAppsByAppIdWorkflowStatisticsAverageAppInteractionsPath, + query: zGetAppsByAppIdWorkflowStatisticsAverageAppInteractionsQuery.optional(), + }), + ) + .output(zGetAppsByAppIdWorkflowStatisticsAverageAppInteractionsResponse) + +export const averageAppInteractions = { + get: get46, +} + +/** + * Get workflow daily runs statistics + */ +export const get47 = oc + .route({ + description: 'Get workflow daily runs statistics', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdWorkflowStatisticsDailyConversations', + path: '/apps/{app_id}/workflow/statistics/daily-conversations', + tags: ['console'], + }) + .input( + z.object({ + params: zGetAppsByAppIdWorkflowStatisticsDailyConversationsPath, + query: zGetAppsByAppIdWorkflowStatisticsDailyConversationsQuery.optional(), + }), + ) + .output(zGetAppsByAppIdWorkflowStatisticsDailyConversationsResponse) + +export const dailyConversations2 = { + get: get47, +} + +/** + * Get workflow daily terminals statistics + */ +export const get48 = oc + .route({ + description: 'Get workflow daily terminals statistics', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdWorkflowStatisticsDailyTerminals', + path: '/apps/{app_id}/workflow/statistics/daily-terminals', + tags: ['console'], + }) + .input( + z.object({ + params: zGetAppsByAppIdWorkflowStatisticsDailyTerminalsPath, + query: zGetAppsByAppIdWorkflowStatisticsDailyTerminalsQuery.optional(), + }), + ) + .output(zGetAppsByAppIdWorkflowStatisticsDailyTerminalsResponse) + +export const dailyTerminals = { + get: get48, +} + +/** + * Get workflow daily token cost statistics + */ +export const get49 = oc + .route({ + description: 'Get workflow daily token cost statistics', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdWorkflowStatisticsTokenCosts', + path: '/apps/{app_id}/workflow/statistics/token-costs', + tags: ['console'], + }) + .input( + z.object({ + params: zGetAppsByAppIdWorkflowStatisticsTokenCostsPath, + query: zGetAppsByAppIdWorkflowStatisticsTokenCostsQuery.optional(), + }), + ) + .output(zGetAppsByAppIdWorkflowStatisticsTokenCostsResponse) + +export const tokenCosts2 = { + get: get49, +} + +export const statistics2 = { + averageAppInteractions, + dailyConversations: dailyConversations2, + dailyTerminals, + tokenCosts: tokenCosts2, +} + +export const workflow = { + comments, + statistics: statistics2, +} + +/** + * Get default block config + * + * Get default block configuration by type + */ +export const get50 = oc + .route({ + description: 'Get default block configuration by type', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdWorkflowsDefaultWorkflowBlockConfigsByBlockType', + path: '/apps/{app_id}/workflows/default-workflow-block-configs/{block_type}', + summary: 'Get default block config', + tags: ['console'], + }) + .input( + z.object({ + params: zGetAppsByAppIdWorkflowsDefaultWorkflowBlockConfigsByBlockTypePath, + query: zGetAppsByAppIdWorkflowsDefaultWorkflowBlockConfigsByBlockTypeQuery.optional(), + }), + ) + .output(zGetAppsByAppIdWorkflowsDefaultWorkflowBlockConfigsByBlockTypeResponse) + +export const byBlockType = { + get: get50, +} + +/** + * Get default block config + * + * Get default block configurations for workflow + */ +export const get51 = oc + .route({ + description: 'Get default block configurations for workflow', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdWorkflowsDefaultWorkflowBlockConfigs', + path: '/apps/{app_id}/workflows/default-workflow-block-configs', + summary: 'Get default block config', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsByAppIdWorkflowsDefaultWorkflowBlockConfigsPath })) + .output(zGetAppsByAppIdWorkflowsDefaultWorkflowBlockConfigsResponse) + +export const defaultWorkflowBlockConfigs = { + get: get51, + byBlockType, +} + +/** + * Get conversation variables for workflow + */ +export const get52 = oc + .route({ + description: 'Get conversation variables for workflow', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdWorkflowsDraftConversationVariables', + path: '/apps/{app_id}/workflows/draft/conversation-variables', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsByAppIdWorkflowsDraftConversationVariablesPath })) + .output(zGetAppsByAppIdWorkflowsDraftConversationVariablesResponse) + +/** + * Update conversation variables for workflow draft + */ +export const post37 = oc + .route({ + description: 'Update conversation variables for workflow draft', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdWorkflowsDraftConversationVariables', + path: '/apps/{app_id}/workflows/draft/conversation-variables', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAppsByAppIdWorkflowsDraftConversationVariablesBody, + params: zPostAppsByAppIdWorkflowsDraftConversationVariablesPath, + }), + ) + .output(zPostAppsByAppIdWorkflowsDraftConversationVariablesResponse) + +export const conversationVariables2 = { + get: get52, + post: post37, +} + +/** + * Get draft workflow + * + * Get environment variables for workflow + */ +export const get53 = oc + .route({ + description: 'Get environment variables for workflow', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdWorkflowsDraftEnvironmentVariables', + path: '/apps/{app_id}/workflows/draft/environment-variables', + summary: 'Get draft workflow', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsByAppIdWorkflowsDraftEnvironmentVariablesPath })) + .output(zGetAppsByAppIdWorkflowsDraftEnvironmentVariablesResponse) + +/** + * Update environment variables for workflow draft + */ +export const post38 = oc + .route({ + description: 'Update environment variables for workflow draft', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdWorkflowsDraftEnvironmentVariables', + path: '/apps/{app_id}/workflows/draft/environment-variables', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAppsByAppIdWorkflowsDraftEnvironmentVariablesBody, + params: zPostAppsByAppIdWorkflowsDraftEnvironmentVariablesPath, + }), + ) + .output(zPostAppsByAppIdWorkflowsDraftEnvironmentVariablesResponse) + +export const environmentVariables = { + get: get53, + post: post38, +} + +/** + * Update draft workflow features + */ +export const post39 = oc + .route({ + description: 'Update draft workflow features', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdWorkflowsDraftFeatures', + path: '/apps/{app_id}/workflows/draft/features', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAppsByAppIdWorkflowsDraftFeaturesBody, + params: zPostAppsByAppIdWorkflowsDraftFeaturesPath, + }), + ) + .output(zPostAppsByAppIdWorkflowsDraftFeaturesResponse) + +export const features = { + post: post39, +} + +/** + * Test human input delivery + * + * Test human input delivery for workflow + */ +export const post40 = oc + .route({ + description: 'Test human input delivery for workflow', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdDeliveryTest', + path: '/apps/{app_id}/workflows/draft/human-input/nodes/{node_id}/delivery-test', + summary: 'Test human input delivery', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdDeliveryTestBody, + params: zPostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdDeliveryTestPath, + }), + ) + .output(zPostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdDeliveryTestResponse) + +export const deliveryTest = { + post: post40, +} + +/** + * Preview human input form content and placeholders + * + * Get human input form preview for workflow + */ +export const post41 = oc + .route({ + description: 'Get human input form preview for workflow', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdFormPreview', + path: '/apps/{app_id}/workflows/draft/human-input/nodes/{node_id}/form/preview', + summary: 'Preview human input form content and placeholders', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdFormPreviewBody, + params: zPostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdFormPreviewPath, + }), + ) + .output(zPostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdFormPreviewResponse) + +export const preview2 = { + post: post41, +} + +/** + * Submit human input form preview + * + * Submit human input form preview for workflow + */ +export const post42 = oc + .route({ + description: 'Submit human input form preview for workflow', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdFormRun', + path: '/apps/{app_id}/workflows/draft/human-input/nodes/{node_id}/form/run', + summary: 'Submit human input form preview', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdFormRunBody, + params: zPostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdFormRunPath, + }), + ) + .output(zPostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdFormRunResponse) + +export const run5 = { + post: post42, +} + +export const form2 = { + preview: preview2, + run: run5, +} + +export const byNodeId4 = { + deliveryTest, + form: form2, +} + +export const nodes4 = { + byNodeId: byNodeId4, +} + +export const humanInput2 = { + nodes: nodes4, +} + +/** + * Run draft workflow iteration node + * + * Run draft workflow iteration node + */ +export const post43 = oc + .route({ + description: 'Run draft workflow iteration node', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdWorkflowsDraftIterationNodesByNodeIdRun', + path: '/apps/{app_id}/workflows/draft/iteration/nodes/{node_id}/run', + summary: 'Run draft workflow iteration node', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAppsByAppIdWorkflowsDraftIterationNodesByNodeIdRunBody, + params: zPostAppsByAppIdWorkflowsDraftIterationNodesByNodeIdRunPath, + }), + ) + .output(zPostAppsByAppIdWorkflowsDraftIterationNodesByNodeIdRunResponse) + +export const run6 = { + post: post43, +} + +export const byNodeId5 = { + run: run6, +} + +export const nodes5 = { + byNodeId: byNodeId5, +} + +export const iteration2 = { + nodes: nodes5, +} + +/** + * Run draft workflow loop node + * + * Run draft workflow loop node + */ +export const post44 = oc + .route({ + description: 'Run draft workflow loop node', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdWorkflowsDraftLoopNodesByNodeIdRun', + path: '/apps/{app_id}/workflows/draft/loop/nodes/{node_id}/run', + summary: 'Run draft workflow loop node', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAppsByAppIdWorkflowsDraftLoopNodesByNodeIdRunBody, + params: zPostAppsByAppIdWorkflowsDraftLoopNodesByNodeIdRunPath, + }), + ) + .output(zPostAppsByAppIdWorkflowsDraftLoopNodesByNodeIdRunResponse) + +export const run7 = { + post: post44, +} + +export const byNodeId6 = { + run: run7, +} + +export const nodes6 = { + byNodeId: byNodeId6, +} + +export const loop2 = { + nodes: nodes6, +} + +/** + * Get last run result for draft workflow node + */ +export const get54 = oc + .route({ + description: 'Get last run result for draft workflow node', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdWorkflowsDraftNodesByNodeIdLastRun', + path: '/apps/{app_id}/workflows/draft/nodes/{node_id}/last-run', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsByAppIdWorkflowsDraftNodesByNodeIdLastRunPath })) + .output(zGetAppsByAppIdWorkflowsDraftNodesByNodeIdLastRunResponse) + +export const lastRun = { + get: get54, +} + +/** + * Run draft workflow node + * + * Run draft workflow node + */ +export const post45 = oc + .route({ + description: 'Run draft workflow node', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdWorkflowsDraftNodesByNodeIdRun', + path: '/apps/{app_id}/workflows/draft/nodes/{node_id}/run', + summary: 'Run draft workflow node', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAppsByAppIdWorkflowsDraftNodesByNodeIdRunBody, + params: zPostAppsByAppIdWorkflowsDraftNodesByNodeIdRunPath, + }), + ) + .output(zPostAppsByAppIdWorkflowsDraftNodesByNodeIdRunResponse) + +export const run8 = { + post: post45, +} + +/** + * Poll for trigger events and execute single node when event arrives + * + * Poll for trigger events and execute single node when event arrives + */ +export const post46 = oc + .route({ + description: 'Poll for trigger events and execute single node when event arrives', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdWorkflowsDraftNodesByNodeIdTriggerRun', + path: '/apps/{app_id}/workflows/draft/nodes/{node_id}/trigger/run', + summary: 'Poll for trigger events and execute single node when event arrives', + tags: ['console'], + }) + .input(z.object({ params: zPostAppsByAppIdWorkflowsDraftNodesByNodeIdTriggerRunPath })) + .output(zPostAppsByAppIdWorkflowsDraftNodesByNodeIdTriggerRunResponse) + +export const run9 = { + post: post46, +} + +export const trigger = { + run: run9, +} + +/** + * Delete all variables for a specific node + */ +export const delete8 = oc + .route({ + description: 'Delete all variables for a specific node', + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteAppsByAppIdWorkflowsDraftNodesByNodeIdVariables', + path: '/apps/{app_id}/workflows/draft/nodes/{node_id}/variables', + successStatus: 204, + tags: ['console'], + }) + .input(z.object({ params: zDeleteAppsByAppIdWorkflowsDraftNodesByNodeIdVariablesPath })) + .output(zDeleteAppsByAppIdWorkflowsDraftNodesByNodeIdVariablesResponse) + +/** + * Get variables for a specific node + */ +export const get55 = oc + .route({ + description: 'Get variables for a specific node', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdWorkflowsDraftNodesByNodeIdVariables', + path: '/apps/{app_id}/workflows/draft/nodes/{node_id}/variables', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsByAppIdWorkflowsDraftNodesByNodeIdVariablesPath })) + .output(zGetAppsByAppIdWorkflowsDraftNodesByNodeIdVariablesResponse) + +export const variables = { + delete: delete8, + get: get55, +} + +export const byNodeId7 = { + lastRun, + run: run8, + trigger, + variables, +} + +export const nodes7 = { + byNodeId: byNodeId7, +} + +/** + * Run draft workflow + * + * Run draft workflow + */ +export const post47 = oc + .route({ + description: 'Run draft workflow', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdWorkflowsDraftRun', + path: '/apps/{app_id}/workflows/draft/run', + summary: 'Run draft workflow', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAppsByAppIdWorkflowsDraftRunBody, + params: zPostAppsByAppIdWorkflowsDraftRunPath, + }), + ) + .output(zPostAppsByAppIdWorkflowsDraftRunResponse) + +export const run10 = { + post: post47, +} + +/** + * Get system variables for workflow + */ +export const get56 = oc + .route({ + description: 'Get system variables for workflow', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdWorkflowsDraftSystemVariables', + path: '/apps/{app_id}/workflows/draft/system-variables', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsByAppIdWorkflowsDraftSystemVariablesPath })) + .output(zGetAppsByAppIdWorkflowsDraftSystemVariablesResponse) + +export const systemVariables = { + get: get56, +} + +/** + * Poll for trigger events and execute full workflow when event arrives + * + * Poll for trigger events and execute full workflow when event arrives + */ +export const post48 = oc + .route({ + description: 'Poll for trigger events and execute full workflow when event arrives', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdWorkflowsDraftTriggerRun', + path: '/apps/{app_id}/workflows/draft/trigger/run', + summary: 'Poll for trigger events and execute full workflow when event arrives', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAppsByAppIdWorkflowsDraftTriggerRunBody, + params: zPostAppsByAppIdWorkflowsDraftTriggerRunPath, + }), + ) + .output(zPostAppsByAppIdWorkflowsDraftTriggerRunResponse) + +export const run11 = { + post: post48, +} + +/** + * Full workflow debug when the start node is a trigger + * + * Full workflow debug when the start node is a trigger + */ +export const post49 = oc + .route({ + description: 'Full workflow debug when the start node is a trigger', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdWorkflowsDraftTriggerRunAll', + path: '/apps/{app_id}/workflows/draft/trigger/run-all', + summary: 'Full workflow debug when the start node is a trigger', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAppsByAppIdWorkflowsDraftTriggerRunAllBody, + params: zPostAppsByAppIdWorkflowsDraftTriggerRunAllPath, + }), + ) + .output(zPostAppsByAppIdWorkflowsDraftTriggerRunAllResponse) + +export const runAll = { + post: post49, +} + +export const trigger2 = { + run: run11, + runAll, +} + +/** + * Reset a workflow variable to its default value + */ +export const put4 = oc + .route({ + description: 'Reset a workflow variable to its default value', + inputStructure: 'detailed', + method: 'PUT', + operationId: 'putAppsByAppIdWorkflowsDraftVariablesByVariableIdReset', + path: '/apps/{app_id}/workflows/draft/variables/{variable_id}/reset', + tags: ['console'], + }) + .input(z.object({ params: zPutAppsByAppIdWorkflowsDraftVariablesByVariableIdResetPath })) + .output(zPutAppsByAppIdWorkflowsDraftVariablesByVariableIdResetResponse) + +export const reset = { + put: put4, +} + +/** + * Delete a workflow variable + */ +export const delete9 = oc + .route({ + description: 'Delete a workflow variable', + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteAppsByAppIdWorkflowsDraftVariablesByVariableId', + path: '/apps/{app_id}/workflows/draft/variables/{variable_id}', + successStatus: 204, + tags: ['console'], + }) + .input(z.object({ params: zDeleteAppsByAppIdWorkflowsDraftVariablesByVariableIdPath })) + .output(zDeleteAppsByAppIdWorkflowsDraftVariablesByVariableIdResponse) + +/** + * Get a specific workflow variable + */ +export const get57 = oc + .route({ + description: 'Get a specific workflow variable', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdWorkflowsDraftVariablesByVariableId', + path: '/apps/{app_id}/workflows/draft/variables/{variable_id}', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsByAppIdWorkflowsDraftVariablesByVariableIdPath })) + .output(zGetAppsByAppIdWorkflowsDraftVariablesByVariableIdResponse) + +/** + * Update a workflow variable + */ +export const patch2 = oc + .route({ + description: 'Update a workflow variable', + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchAppsByAppIdWorkflowsDraftVariablesByVariableId', + path: '/apps/{app_id}/workflows/draft/variables/{variable_id}', + tags: ['console'], + }) + .input( + z.object({ + body: zPatchAppsByAppIdWorkflowsDraftVariablesByVariableIdBody, + params: zPatchAppsByAppIdWorkflowsDraftVariablesByVariableIdPath, + }), + ) + .output(zPatchAppsByAppIdWorkflowsDraftVariablesByVariableIdResponse) + +export const byVariableId = { + delete: delete9, + get: get57, + patch: patch2, + reset, +} + +/** + * Delete all draft workflow variables + */ +export const delete10 = oc + .route({ + description: 'Delete all draft workflow variables', + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteAppsByAppIdWorkflowsDraftVariables', + path: '/apps/{app_id}/workflows/draft/variables', + successStatus: 204, + tags: ['console'], + }) + .input(z.object({ params: zDeleteAppsByAppIdWorkflowsDraftVariablesPath })) + .output(zDeleteAppsByAppIdWorkflowsDraftVariablesResponse) + +/** + * Get draft workflow + * + * Get draft workflow variables + */ +export const get58 = oc + .route({ + description: 'Get draft workflow variables', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdWorkflowsDraftVariables', + path: '/apps/{app_id}/workflows/draft/variables', + summary: 'Get draft workflow', + tags: ['console'], + }) + .input( + z.object({ + params: zGetAppsByAppIdWorkflowsDraftVariablesPath, + query: zGetAppsByAppIdWorkflowsDraftVariablesQuery.optional(), + }), + ) + .output(zGetAppsByAppIdWorkflowsDraftVariablesResponse) + +export const variables2 = { + delete: delete10, + get: get58, + byVariableId, +} + +/** + * Get draft workflow + * + * Get draft workflow for an application + */ +export const get59 = oc + .route({ + description: 'Get draft workflow for an application', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdWorkflowsDraft', + path: '/apps/{app_id}/workflows/draft', + summary: 'Get draft workflow', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsByAppIdWorkflowsDraftPath })) + .output(zGetAppsByAppIdWorkflowsDraftResponse) + +/** + * Sync draft workflow + * + * Sync draft workflow configuration + */ +export const post50 = oc + .route({ + description: 'Sync draft workflow configuration', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdWorkflowsDraft', + path: '/apps/{app_id}/workflows/draft', + summary: 'Sync draft workflow', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAppsByAppIdWorkflowsDraftBody, + params: zPostAppsByAppIdWorkflowsDraftPath, + }), + ) + .output(zPostAppsByAppIdWorkflowsDraftResponse) + +export const draft2 = { + get: get59, + post: post50, + conversationVariables: conversationVariables2, + environmentVariables, + features, + humanInput: humanInput2, + iteration: iteration2, + loop: loop2, + nodes: nodes7, + run: run10, + systemVariables, + trigger: trigger2, + variables: variables2, +} + +/** + * Get published workflow + * + * Get published workflow for an application + */ +export const get60 = oc + .route({ + description: 'Get published workflow for an application', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdWorkflowsPublish', + path: '/apps/{app_id}/workflows/publish', + summary: 'Get published workflow', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsByAppIdWorkflowsPublishPath })) + .output(zGetAppsByAppIdWorkflowsPublishResponse) + +/** + * Publish workflow + */ +export const post51 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdWorkflowsPublish', + path: '/apps/{app_id}/workflows/publish', + summary: 'Publish workflow', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAppsByAppIdWorkflowsPublishBody, + params: zPostAppsByAppIdWorkflowsPublishPath, + }), + ) + .output(zPostAppsByAppIdWorkflowsPublishResponse) + +export const publish = { + get: get60, + post: post51, +} + +/** + * Get webhook trigger for a node + */ +export const get61 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdWorkflowsTriggersWebhook', + path: '/apps/{app_id}/workflows/triggers/webhook', + summary: 'Get webhook trigger for a node', + tags: ['console'], + }) + .input( + z.object({ + params: zGetAppsByAppIdWorkflowsTriggersWebhookPath, + query: zGetAppsByAppIdWorkflowsTriggersWebhookQuery, + }), + ) + .output(zGetAppsByAppIdWorkflowsTriggersWebhookResponse) + +export const webhook = { + get: get61, +} + +export const triggers2 = { + webhook, +} + +/** + * Restore a published workflow version into the draft workflow + */ +export const post52 = oc + .route({ + description: 'Restore a published workflow version into the draft workflow', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByAppIdWorkflowsByWorkflowIdRestore', + path: '/apps/{app_id}/workflows/{workflow_id}/restore', + tags: ['console'], + }) + .input(z.object({ params: zPostAppsByAppIdWorkflowsByWorkflowIdRestorePath })) + .output(zPostAppsByAppIdWorkflowsByWorkflowIdRestoreResponse) + +export const restore = { + post: post52, +} + +/** + * Delete workflow + */ +export const delete11 = oc + .route({ + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteAppsByAppIdWorkflowsByWorkflowId', + path: '/apps/{app_id}/workflows/{workflow_id}', + summary: 'Delete workflow', + tags: ['console'], + }) + .input(z.object({ params: zDeleteAppsByAppIdWorkflowsByWorkflowIdPath })) + .output(zDeleteAppsByAppIdWorkflowsByWorkflowIdResponse) + +/** + * Update workflow attributes + * + * Update workflow by ID + */ +export const patch3 = oc + .route({ + description: 'Update workflow by ID', + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchAppsByAppIdWorkflowsByWorkflowId', + path: '/apps/{app_id}/workflows/{workflow_id}', + summary: 'Update workflow attributes', + tags: ['console'], + }) + .input( + z.object({ + body: zPatchAppsByAppIdWorkflowsByWorkflowIdBody, + params: zPatchAppsByAppIdWorkflowsByWorkflowIdPath, + }), + ) + .output(zPatchAppsByAppIdWorkflowsByWorkflowIdResponse) + +export const byWorkflowId = { + delete: delete11, + patch: patch3, + restore, +} + +/** + * Get published workflows + * + * Get all published workflows for an application + */ +export const get62 = oc + .route({ + description: 'Get all published workflows for an application', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppIdWorkflows', + path: '/apps/{app_id}/workflows', + summary: 'Get published workflows', + tags: ['console'], + }) + .input( + z.object({ + params: zGetAppsByAppIdWorkflowsPath, + query: zGetAppsByAppIdWorkflowsQuery.optional(), + }), + ) + .output(zGetAppsByAppIdWorkflowsResponse) + +export const workflows3 = { + get: get62, + defaultWorkflowBlockConfigs, + draft: draft2, + publish, + triggers: triggers2, + byWorkflowId, +} + +/** + * Delete app + * + * Delete application + */ +export const delete12 = oc + .route({ + description: 'Delete application', + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteAppsByAppId', + path: '/apps/{app_id}', + successStatus: 204, + summary: 'Delete app', + tags: ['console'], + }) + .input(z.object({ params: zDeleteAppsByAppIdPath })) + .output(zDeleteAppsByAppIdResponse) + +/** + * Get app detail + * + * Get application details + */ +export const get63 = oc + .route({ + description: 'Get application details', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByAppId', + path: '/apps/{app_id}', + summary: 'Get app detail', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsByAppIdPath })) + .output(zGetAppsByAppIdResponse) + +/** + * Update app + * + * Update application details + */ +export const put5 = oc + .route({ + description: 'Update application details', + inputStructure: 'detailed', + method: 'PUT', + operationId: 'putAppsByAppId', + path: '/apps/{app_id}', + summary: 'Update app', + tags: ['console'], + }) + .input(z.object({ body: zPutAppsByAppIdBody, params: zPutAppsByAppIdPath })) + .output(zPutAppsByAppIdResponse) + +export const byAppId2 = { + delete: delete12, + get: get63, + put: put5, + advancedChat, + agent, + annotationReply, + annotationSetting, + annotationSettings, + annotations, + apiEnable, + audioToText, + chatConversations, + chatMessages, + completionConversations, + completionMessages, + conversationVariables, + convertToWorkflow, + copy, + export: export2, + feedbacks, + icon, + messages, + modelConfig, + name, + publishToCreatorsPlatform, + server, + site, + siteEnable, + statistics, + textToAudio, + trace, + traceConfig, + triggerEnable, + triggers, + workflowAppLogs, + workflowArchivedLogs, + workflowRuns: workflowRuns2, + workflow, + workflows: workflows3, +} + +/** + * Delete an API key for an app + * + * Delete an API key for an app + */ +export const delete13 = oc + .route({ + description: 'Delete an API key for an app', + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteAppsByResourceIdApiKeysByApiKeyId', + path: '/apps/{resource_id}/api-keys/{api_key_id}', + successStatus: 204, + summary: 'Delete an API key for an app', + tags: ['console'], + }) + .input(z.object({ params: zDeleteAppsByResourceIdApiKeysByApiKeyIdPath })) + .output(zDeleteAppsByResourceIdApiKeysByApiKeyIdResponse) + +export const byApiKeyId = { + delete: delete13, +} + +/** + * Get all API keys for an app + * + * Get all API keys for an app + */ +export const get64 = oc + .route({ + description: 'Get all API keys for an app', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByResourceIdApiKeys', + path: '/apps/{resource_id}/api-keys', + summary: 'Get all API keys for an app', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsByResourceIdApiKeysPath })) + .output(zGetAppsByResourceIdApiKeysResponse) + +/** + * Create a new API key for an app + * + * Create a new API key for an app + */ +export const post53 = oc + .route({ + description: 'Create a new API key for an app', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsByResourceIdApiKeys', + path: '/apps/{resource_id}/api-keys', + successStatus: 201, + summary: 'Create a new API key for an app', + tags: ['console'], + }) + .input(z.object({ params: zPostAppsByResourceIdApiKeysPath })) + .output(zPostAppsByResourceIdApiKeysResponse) + +export const apiKeys = { + get: get64, + post: post53, + byApiKeyId, +} + +export const byResourceId = { + apiKeys, +} + +/** + * Refresh MCP server configuration and regenerate server code + */ +export const get65 = oc + .route({ + description: 'Refresh MCP server configuration and regenerate server code', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsByServerIdServerRefresh', + path: '/apps/{server_id}/server/refresh', + tags: ['console'], + }) + .input(z.object({ params: zGetAppsByServerIdServerRefreshPath })) + .output(zGetAppsByServerIdServerRefreshResponse) + +export const refresh = { + get: get65, +} + +export const server2 = { + refresh, +} + +export const byServerId = { + server: server2, +} + +/** + * Get app list + * + * Get list of applications with pagination and filtering + */ +export const get66 = oc + .route({ + description: 'Get list of applications with pagination and filtering', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getApps', + path: '/apps', + summary: 'Get app list', + tags: ['console'], + }) + .input(z.object({ query: zGetAppsQuery.optional() })) + .output(zGetAppsResponse) + +/** + * Create app + * + * Create a new application + */ +export const post54 = oc + .route({ + description: 'Create a new application', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postApps', + path: '/apps', + successStatus: 201, + summary: 'Create app', + tags: ['console'], + }) + .input(z.object({ body: zPostAppsBody })) + .output(zPostAppsResponse) + +export const apps = { + get: get66, + post: post54, + imports, + workflows, + byAppId: byAppId2, + byResourceId, + byServerId, +} + +export const contract = { + apps, +} diff --git a/packages/contracts/generated/api/console/apps/types.gen.ts b/packages/contracts/generated/api/console/apps/types.gen.ts new file mode 100644 index 0000000000..4a4742adcf --- /dev/null +++ b/packages/contracts/generated/api/console/apps/types.gen.ts @@ -0,0 +1,4493 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type AppPagination = { + has_next: boolean + items: Array + page: number + per_page: number + total: number +} + +export type CreateAppPayload = { + description?: string | null + icon?: string | null + icon_background?: string | null + icon_type?: IconType + mode: 'chat' | 'agent-chat' | 'advanced-chat' | 'workflow' | 'completion' + name: string +} + +export type AppDetail = { + access_mode?: string | null + app_model_config?: ModelConfig + created_at?: number | null + created_by?: string | null + description?: string | null + enable_api: boolean + enable_site: boolean + icon?: string | null + icon_background?: string | null + id: string + mode_compatible_with_agent: string + name: string + tags?: Array + tracing?: JsonValue + updated_at?: number | null + updated_by?: string | null + use_icon_as_answer_icon?: boolean | null + workflow?: WorkflowPartial +} + +export type AppImportPayload = { + app_id?: string | null + description?: string | null + icon?: string | null + icon_background?: string | null + icon_type?: string | null + mode: string + name?: string | null + yaml_content?: string | null + yaml_url?: string | null +} + +export type Import = { + app_id?: string | null + app_mode?: string | null + current_dsl_version?: string + error?: string + id: string + imported_dsl_version?: string + status: ImportStatus +} + +export type CheckDependenciesResult = { + leaked_dependencies?: Array +} + +export type AppDetailWithSite = { + access_mode?: string | null + api_base_url?: string | null + app_model_config?: ModelConfig + created_at?: number | null + created_by?: string | null + deleted_tools?: Array + description?: string | null + enable_api: boolean + enable_site: boolean + icon?: string | null + icon_background?: string | null + icon_type?: string | null + id: string + max_active_requests?: number | null + mode_compatible_with_agent: string + name: string + site?: Site + tags?: Array + tracing?: JsonValue + updated_at?: number | null + updated_by?: string | null + use_icon_as_answer_icon?: boolean | null + workflow?: WorkflowPartial +} + +export type UpdateAppPayload = { + description?: string | null + icon?: string | null + icon_background?: string | null + icon_type?: IconType + max_active_requests?: number | null + name: string + use_icon_as_answer_icon?: boolean | null +} + +export type AdvancedChatWorkflowRunPagination = { + [key: string]: unknown +} + +export type WorkflowRunCount = { + [key: string]: unknown +} + +export type HumanInputFormPreviewPayload = { + inputs?: { + [key: string]: unknown + } +} + +export type HumanInputFormSubmitPayload = { + action: string + form_inputs: { + [key: string]: unknown + } + inputs: { + [key: string]: unknown + } +} + +export type IterationNodeRunPayload = { + inputs?: { + [key: string]: unknown + } | null +} + +export type LoopNodeRunPayload = { + inputs?: { + [key: string]: unknown + } | null +} + +export type AdvancedChatWorkflowRunPayload = { + conversation_id?: string | null + files?: Array<{ + [key: string]: unknown + }> | null + inputs?: { + [key: string]: unknown + } | null + parent_message_id?: string | null + query?: string +} + +export type AnnotationReplyPayload = { + embedding_model_name: string + embedding_provider_name: string + score_threshold: number +} + +export type AnnotationSettingUpdatePayload = { + score_threshold: number +} + +export type CreateAnnotationPayload = { + annotation_reply?: { + [key: string]: unknown + } | null + answer?: string | null + content?: string | null + message_id?: string | null + question?: string | null +} + +export type Annotation = { + content?: string | null + created_at?: number | null + hit_count?: number | null + id: string + question?: string | null +} + +export type AnnotationCountResponse = { + count: number +} + +export type AnnotationExportList = { + data: Array +} + +export type UpdateAnnotationPayload = { + annotation_reply?: { + [key: string]: unknown + } | null + answer?: string | null + content?: string | null + question?: string | null +} + +export type AnnotationHitHistoryList = { + data: Array + has_more: boolean + limit: number + page: number + total: number +} + +export type AppApiStatusPayload = { + enable_api: boolean +} + +export type AudioTranscriptResponse = { + text: string +} + +export type ConversationWithSummaryPagination = { + has_next: boolean + items: Array + page: number + per_page: number + total: number +} + +export type ConversationDetail = { + admin_feedback_stats?: FeedbackStat + annotated: boolean + created_at?: number | null + from_account_id?: string | null + from_end_user_id?: string | null + from_source: string + id: string + introduction?: string | null + message_count: number + model_config?: ModelConfig + status: string + updated_at?: number | null + user_feedback_stats?: FeedbackStat +} + +export type MessageInfiniteScrollPaginationResponse = { + data: Array + has_more: boolean + limit: number +} + +export type SuggestedQuestionsResponse = { + data: Array +} + +export type ConversationPagination = { + has_next: boolean + items: Array + page: number + per_page: number + total: number +} + +export type ConversationMessageDetail = { + created_at?: number | null + first_message?: MessageDetail + from_account_id?: string | null + from_end_user_id?: string | null + from_source: string + id: string + model_config?: ModelConfig + status: string +} + +export type CompletionMessagePayload = { + files?: Array | null + inputs: { + [key: string]: unknown + } + model_config: { + [key: string]: unknown + } + query?: string + response_mode?: 'blocking' | 'streaming' + retriever_from?: string +} + +export type PaginatedConversationVariableResponse = { + data: Array + has_more: boolean + limit: number + page: number + total: number +} + +export type ConvertToWorkflowPayload = { + icon?: string | null + icon_background?: string | null + icon_type?: string | null + name?: string | null +} + +export type CopyAppPayload = { + description?: string | null + icon?: string | null + icon_background?: string | null + icon_type?: IconType + name?: string | null +} + +export type AppExportResponse = { + data: string +} + +export type MessageFeedbackPayload = { + content?: string | null + message_id: string + rating?: 'like' | 'dislike' | null +} + +export type AppIconPayload = { + icon?: string | null + icon_background?: string | null + icon_type?: IconType +} + +export type MessageDetailResponse = { + agent_thoughts?: Array + annotation?: ConversationAnnotation + annotation_hit_history?: ConversationAnnotationHitHistory + answer_tokens?: number | null + conversation_id: string + created_at?: number | null + error?: string | null + extra_contents?: Array + feedbacks?: Array + from_account_id?: string | null + from_end_user_id?: string | null + from_source: string + id: string + inputs: { + [key: string]: JsonValue + } + message?: JsonValue + message_files?: Array + message_metadata_dict?: JsonValue + message_tokens?: number | null + parent_message_id?: string | null + provider_response_latency?: number | null + query: string + re_sign_file_url_answer: string + status: string + workflow_run_id?: string | null +} + +export type ModelConfigRequest = { + agent_mode?: { + [key: string]: unknown + } | null + configs?: { + [key: string]: unknown + } | null + dataset_configs?: { + [key: string]: unknown + } | null + model?: string | null + more_like_this?: { + [key: string]: unknown + } | null + opening_statement?: string | null + provider?: string | null + retrieval_model?: { + [key: string]: unknown + } | null + speech_to_text?: { + [key: string]: unknown + } | null + suggested_questions?: Array | null + text_to_speech?: { + [key: string]: unknown + } | null + tools?: Array<{ + [key: string]: unknown + }> | null +} + +export type AppNamePayload = { + name: string +} + +export type AppMcpServerResponse = { + created_at?: number | null + description: string + id: string + name: string + parameters: unknown + server_code: string + status: AppMcpServerStatus + updated_at?: number | null +} + +export type McpServerCreatePayload = { + description?: string | null + parameters: { + [key: string]: unknown + } +} + +export type McpServerUpdatePayload = { + description?: string | null + id: string + parameters: { + [key: string]: unknown + } + status?: string | null +} + +export type AppSiteUpdatePayload = { + chat_color_theme?: string | null + chat_color_theme_inverted?: boolean | null + copyright?: string | null + custom_disclaimer?: string | null + customize_domain?: string | null + customize_token_strategy?: 'must' | 'allow' | 'not_allow' | null + default_language?: string | null + description?: string | null + icon?: string | null + icon_background?: string | null + icon_type?: string | null + privacy_policy?: string | null + prompt_public?: boolean | null + show_workflow_steps?: boolean | null + title?: string | null + use_icon_as_answer_icon?: boolean | null +} + +export type AppSiteResponse = { + app_id: string + code?: string | null + copyright?: string | null + custom_disclaimer?: string | null + customize_domain?: string | null + customize_token_strategy: string + default_language: string + description?: string | null + icon?: string | null + icon_background?: string | null + privacy_policy?: string | null + prompt_public: boolean + show_workflow_steps: boolean + title: string + use_icon_as_answer_icon: boolean +} + +export type AppSiteStatusPayload = { + enable_site: boolean +} + +export type TextToSpeechPayload = { + message_id?: string | null + streaming?: boolean | null + text: string + voice?: string | null +} + +export type AppTracePayload = { + enabled: boolean + tracing_provider?: string | null +} + +export type TraceProviderQuery = { + tracing_provider: string +} + +export type TraceConfigPayload = { + tracing_config: { + [key: string]: unknown + } + tracing_provider: string +} + +export type ParserEnable = { + enable_trigger: boolean + trigger_id: string +} + +export type WorkflowTriggerResponse = { + created_at?: string | null + icon: string + id: string + node_id: string + provider_name: string + status: string + title: string + trigger_type: string + updated_at?: string | null +} + +export type WorkflowTriggerListResponse = { + data: Array +} + +export type WorkflowAppLogPaginationResponse = { + data: Array + has_more: boolean + limit: number + page: number + total: number +} + +export type WorkflowArchivedLogPaginationResponse = { + data: Array + has_more: boolean + limit: number + page: number + total: number +} + +export type WorkflowRunPagination = { + [key: string]: unknown +} + +export type WorkflowRunDetail = { + [key: string]: unknown +} + +export type WorkflowRunExport = { + [key: string]: unknown +} + +export type WorkflowRunNodeExecutionList = { + [key: string]: unknown +} + +export type WorkflowCommentBasic = { + [key: string]: unknown +} + +export type WorkflowCommentCreatePayload = { + content: string + mentioned_user_ids?: Array + position_x: number + position_y: number +} + +export type WorkflowCommentCreate = { + [key: string]: unknown +} + +export type WorkflowCommentMentionUsersPayload = { + users: Array +} + +export type WorkflowCommentDetail = { + [key: string]: unknown +} + +export type WorkflowCommentUpdatePayload = { + content: string + mentioned_user_ids?: Array | null + position_x?: number | null + position_y?: number | null +} + +export type WorkflowCommentUpdate = { + [key: string]: unknown +} + +export type WorkflowCommentReplyPayload = { + content: string + mentioned_user_ids?: Array +} + +export type WorkflowCommentReplyCreate = { + [key: string]: unknown +} + +export type WorkflowCommentReplyUpdate = { + [key: string]: unknown +} + +export type WorkflowCommentResolve = { + [key: string]: unknown +} + +export type WorkflowPagination = { + [key: string]: unknown +} + +export type Workflow = { + [key: string]: unknown +} + +export type SyncDraftWorkflowPayload = { + conversation_variables?: Array<{ + [key: string]: unknown + }> + environment_variables?: Array<{ + [key: string]: unknown + }> + features: { + [key: string]: unknown + } + graph: { + [key: string]: unknown + } + hash?: string | null +} + +export type SyncDraftWorkflowResponse = { + [key: string]: unknown +} + +export type WorkflowDraftVariableList = { + [key: string]: unknown +} + +export type ConversationVariableUpdatePayload = { + conversation_variables: Array<{ + [key: string]: unknown + }> +} + +export type EnvironmentVariableUpdatePayload = { + environment_variables: Array<{ + [key: string]: unknown + }> +} + +export type WorkflowFeaturesPayload = { + features: { + [key: string]: unknown + } +} + +export type HumanInputDeliveryTestPayload = { + delivery_method_id: string + inputs?: { + [key: string]: unknown + } +} + +export type WorkflowRunNodeExecution = { + [key: string]: unknown +} + +export type DraftWorkflowNodeRunPayload = { + files?: Array<{ + [key: string]: unknown + }> | null + inputs: { + [key: string]: unknown + } + query?: string +} + +export type DraftWorkflowRunPayload = { + datasource_info_list: Array<{ + [key: string]: unknown + }> + datasource_type: string + inputs: { + [key: string]: unknown + } + start_node_id: string +} + +export type DraftWorkflowTriggerRunRequest = { + [key: string]: unknown +} + +export type DraftWorkflowTriggerRunAllPayload = { + node_ids: Array +} + +export type WorkflowDraftVariableListWithoutValue = { + [key: string]: unknown +} + +export type WorkflowDraftVariable = { + [key: string]: unknown +} + +export type WorkflowDraftVariableUpdatePayload = { + name?: string | null + value?: unknown +} + +export type PublishWorkflowPayload = { + marked_comment?: string | null + marked_name?: string | null +} + +export type WebhookTriggerResponse = { + created_at?: string | null + id: string + node_id: string + webhook_debug_url: string + webhook_id: string + webhook_url: string +} + +export type WorkflowUpdatePayload = { + marked_comment?: string | null + marked_name?: string | null +} + +export type ApiKeyList = { + data: Array +} + +export type ApiKeyItem = { + created_at?: number | null + id: string + last_used_at?: number | null + token: string + type: string +} + +export type AppPartial = { + access_mode?: string | null + app_model_config?: ModelConfigPartial + author_name?: string | null + create_user_name?: string | null + created_at?: number | null + created_by?: string | null + desc_or_prompt?: string | null + has_draft_trigger?: boolean | null + icon?: string | null + icon_background?: string | null + icon_type?: string | null + id: string + max_active_requests?: number | null + mode_compatible_with_agent: string + name: string + tags?: Array + updated_at?: number | null + updated_by?: string | null + use_icon_as_answer_icon?: boolean | null + workflow?: WorkflowPartial +} + +export type IconType = 'image' | 'emoji' | 'link' + +export type ModelConfig = { + agent_mode_dict?: JsonValue + annotation_reply_dict?: JsonValue + chat_prompt_config_dict?: JsonValue + completion_prompt_config_dict?: JsonValue + created_at?: number | null + created_by?: string | null + dataset_configs_dict?: JsonValue + dataset_query_variable?: string | null + external_data_tools_list?: JsonValue + file_upload_dict?: JsonValue + model_dict?: JsonValue + more_like_this_dict?: JsonValue + opening_statement?: string | null + pre_prompt?: string | null + prompt_type?: string | null + retriever_resource_dict?: JsonValue + sensitive_word_avoidance_dict?: JsonValue + speech_to_text_dict?: JsonValue + suggested_questions_after_answer_dict?: JsonValue + suggested_questions_list?: JsonValue + text_to_speech_dict?: JsonValue + updated_at?: number | null + updated_by?: string | null + user_input_form_list?: JsonValue +} + +export type Tag = { + id: string + name: string + type: string +} + +export type JsonValue = unknown + +export type WorkflowPartial = { + created_at?: number | null + created_by?: string | null + id: string + updated_at?: number | null + updated_by?: string | null +} + +export type ImportStatus = 'completed' | 'completed-with-warnings' | 'pending' | 'failed' + +export type PluginDependency = { + current_identifier?: string | null + type: Type + value: unknown +} + +export type DeletedTool = { + provider_id: string + tool_name: string + type: string +} + +export type Site = { + app_base_url?: string | null + chat_color_theme?: string | null + chat_color_theme_inverted?: boolean | null + code?: string | null + copyright?: string | null + created_at?: number | null + created_by?: string | null + custom_disclaimer?: string | null + customize_domain?: string | null + customize_token_strategy?: string | null + default_language?: string | null + description?: string | null + icon?: string | null + icon_background?: string | null + icon_type?: unknown + privacy_policy?: string | null + prompt_public?: boolean | null + show_workflow_steps?: boolean | null + title?: string | null + updated_at?: number | null + updated_by?: string | null + use_icon_as_answer_icon?: boolean | null +} + +export type AnnotationHitHistory = { + annotation_content?: string | null + annotation_question?: string | null + created_at?: number | null + id: string + question?: string | null + score?: number | null + source?: string | null +} + +export type ConversationWithSummary = { + admin_feedback_stats?: FeedbackStat + annotated: boolean + created_at?: number | null + from_account_id?: string | null + from_account_name?: string | null + from_end_user_id?: string | null + from_end_user_session_id?: string | null + from_source: string + id: string + message_count: number + model_config?: SimpleModelConfig + name: string + read_at?: number | null + status: string + status_count?: StatusCount + summary_or_query: string + updated_at?: number | null + user_feedback_stats?: FeedbackStat +} + +export type FeedbackStat = { + dislike: number + like: number +} + +export type Conversation = { + admin_feedback_stats?: FeedbackStat + annotation?: ConversationAnnotation + created_at?: number | null + first_message?: SimpleMessageDetail + from_account_id?: string | null + from_account_name?: string | null + from_end_user_id?: string | null + from_end_user_session_id?: string | null + from_source: string + id: string + model_config?: SimpleModelConfig + read_at?: number | null + status: string + updated_at?: number | null + user_feedback_stats?: FeedbackStat +} + +export type MessageDetail = { + agent_thoughts: Array + annotation?: ConversationAnnotation + annotation_hit_history?: ConversationAnnotationHitHistory + answer_tokens: number + conversation_id: string + created_at?: number | null + error?: string | null + feedbacks: Array + from_account_id?: string | null + from_end_user_id?: string | null + from_source: string + id: string + inputs: { + [key: string]: JsonValue + } + message: JsonValue + message_files: Array + message_metadata_dict: JsonValue + message_tokens: number + parent_message_id?: string | null + provider_response_latency: number + query: string + re_sign_file_url_answer: string + status: string + workflow_run_id?: string | null +} + +export type ConversationVariableResponse = { + created_at?: number | null + description?: string | null + id: string + name: string + updated_at?: number | null + value?: string | null + value_type: string +} + +export type AgentThought = { + chain_id?: string | null + created_at?: number | null + files: Array + id: string + message_chain_id?: string | null + message_id: string + observation?: string | null + position: number + thought?: string | null + tool?: string | null + tool_input?: string | null + tool_labels: JsonValue +} + +export type ConversationAnnotation = { + account?: SimpleAccount + content: string + created_at?: number | null + id: string + question?: string | null +} + +export type ConversationAnnotationHitHistory = { + annotation_create_account?: SimpleAccount + created_at?: number | null + id: string +} + +export type HumanInputContent = { + form_definition?: HumanInputFormDefinition + form_submission_data?: HumanInputFormSubmissionData + submitted: boolean + type?: ExecutionContentType + workflow_run_id: string +} + +export type Feedback = { + content?: string | null + from_account?: SimpleAccount + from_end_user_id?: string | null + from_source: string + rating: string +} + +export type MessageFile = { + belongs_to?: string | null + filename: string + id: string + mime_type?: string | null + size?: number | null + transfer_method: string + type: string + upload_file_id?: string | null + url?: string | null +} + +export type AppMcpServerStatus = 'normal' | 'active' | 'inactive' + +export type WorkflowAppLogPartialResponse = { + created_at?: number | null + created_by_account?: SimpleAccount + created_by_end_user?: SimpleEndUser + created_by_role?: string | null + created_from?: string | null + details?: unknown + id: string + workflow_run?: WorkflowRunForLogResponse +} + +export type WorkflowArchivedLogPartialResponse = { + created_at?: number | null + created_by_account?: SimpleAccount + created_by_end_user?: SimpleEndUser + id: string + trigger_metadata?: unknown + workflow_run?: WorkflowRunForArchivedLogResponse +} + +export type AccountWithRole = { + avatar?: string | null + created_at?: number | null + email: string + id: string + last_active_at?: number | null + last_login_at?: number | null + name: string + role: string + status: string +} + +export type ModelConfigPartial = { + created_at?: number | null + created_by?: string | null + model_dict?: JsonValue + pre_prompt?: string | null + updated_at?: number | null + updated_by?: string | null +} + +export type Type = 'github' | 'marketplace' | 'package' + +export type Github = { + github_plugin_unique_identifier: string + package: string + repo: string + version: string +} + +export type Marketplace = { + marketplace_plugin_unique_identifier: string + version?: string | null +} + +export type Package = { + plugin_unique_identifier: string + version?: string | null +} + +export type SimpleModelConfig = { + model_dict?: JsonValue + pre_prompt?: string | null +} + +export type StatusCount = { + failed: number + partial_success: number + paused: number + success: number +} + +export type SimpleMessageDetail = { + answer: string + inputs: { + [key: string]: JsonValue + } + message: string + query: string +} + +export type SimpleAccount = { + email: string + id: string + name: string +} + +export type HumanInputFormDefinition = { + actions?: Array + display_in_ui?: boolean + expiration_time: number + form_content: string + form_id: string + form_token?: string | null + inputs?: Array + node_id: string + node_title: string + resolved_default_values?: { + [key: string]: unknown + } +} + +export type HumanInputFormSubmissionData = { + action_id: string + action_text: string + node_id: string + node_title: string + rendered_content: string +} + +export type ExecutionContentType = 'human_input' + +export type SimpleEndUser = { + id: string + is_anonymous: boolean + session_id?: string | null + type: string +} + +export type WorkflowRunForLogResponse = { + created_at?: number | null + elapsed_time?: number | null + error?: string | null + exceptions_count?: number | null + finished_at?: number | null + id: string + status?: string | null + total_steps?: number | null + total_tokens?: number | null + triggered_from?: string | null + version?: string | null +} + +export type WorkflowRunForArchivedLogResponse = { + elapsed_time?: number | null + id: string + status?: string | null + total_tokens?: number | null + triggered_from?: string | null +} + +export type UserAction = { + button_style?: ButtonStyle + id: string + title: string +} + +export type FormInput = { + default?: FormInputDefault + output_variable_name: string + type: FormInputType +} + +export type ButtonStyle = 'primary' | 'default' | 'accent' | 'ghost' + +export type FormInputDefault = { + selector?: Array + type: PlaceholderType + value?: string +} + +export type FormInputType = 'text_input' | 'paragraph' + +export type PlaceholderType = 'variable' | 'constant' + +export type GetAppsData = { + body?: never + path?: never + query?: { + is_created_by_me?: boolean | null + limit?: number + mode?: 'completion' | 'chat' | 'advanced-chat' | 'workflow' | 'agent-chat' | 'channel' | 'all' + name?: string | null + page?: number + tag_ids?: Array | null + } + url: '/apps' +} + +export type GetAppsResponses = { + 200: AppPagination +} + +export type GetAppsResponse = GetAppsResponses[keyof GetAppsResponses] + +export type PostAppsData = { + body: CreateAppPayload + path?: never + query?: never + url: '/apps' +} + +export type PostAppsErrors = { + 400: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } +} + +export type PostAppsError = PostAppsErrors[keyof PostAppsErrors] + +export type PostAppsResponses = { + 201: AppDetail +} + +export type PostAppsResponse = PostAppsResponses[keyof PostAppsResponses] + +export type PostAppsImportsData = { + body: AppImportPayload + path?: never + query?: never + url: '/apps/imports' +} + +export type PostAppsImportsErrors = { + 400: Import +} + +export type PostAppsImportsError = PostAppsImportsErrors[keyof PostAppsImportsErrors] + +export type PostAppsImportsResponses = { + 200: Import + 202: Import +} + +export type PostAppsImportsResponse = PostAppsImportsResponses[keyof PostAppsImportsResponses] + +export type GetAppsImportsByAppIdCheckDependenciesData = { + body?: never + path: { + app_id: string + } + query?: never + url: '/apps/imports/{app_id}/check-dependencies' +} + +export type GetAppsImportsByAppIdCheckDependenciesResponses = { + 200: CheckDependenciesResult +} + +export type GetAppsImportsByAppIdCheckDependenciesResponse + = GetAppsImportsByAppIdCheckDependenciesResponses[keyof GetAppsImportsByAppIdCheckDependenciesResponses] + +export type PostAppsImportsByImportIdConfirmData = { + body?: never + path: { + import_id: string + } + query?: never + url: '/apps/imports/{import_id}/confirm' +} + +export type PostAppsImportsByImportIdConfirmErrors = { + 400: Import +} + +export type PostAppsImportsByImportIdConfirmError + = PostAppsImportsByImportIdConfirmErrors[keyof PostAppsImportsByImportIdConfirmErrors] + +export type PostAppsImportsByImportIdConfirmResponses = { + 200: Import +} + +export type PostAppsImportsByImportIdConfirmResponse + = PostAppsImportsByImportIdConfirmResponses[keyof PostAppsImportsByImportIdConfirmResponses] + +export type GetAppsWorkflowsOnlineUsersData = { + body?: never + path?: never + query: { + app_ids: string + } + url: '/apps/workflows/online-users' +} + +export type GetAppsWorkflowsOnlineUsersResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetAppsWorkflowsOnlineUsersResponse + = GetAppsWorkflowsOnlineUsersResponses[keyof GetAppsWorkflowsOnlineUsersResponses] + +export type DeleteAppsByAppIdData = { + body?: never + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}' +} + +export type DeleteAppsByAppIdErrors = { + 403: { + [key: string]: unknown + } +} + +export type DeleteAppsByAppIdError = DeleteAppsByAppIdErrors[keyof DeleteAppsByAppIdErrors] + +export type DeleteAppsByAppIdResponses = { + 204: { + [key: string]: unknown + } +} + +export type DeleteAppsByAppIdResponse = DeleteAppsByAppIdResponses[keyof DeleteAppsByAppIdResponses] + +export type GetAppsByAppIdData = { + body?: never + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}' +} + +export type GetAppsByAppIdResponses = { + 200: AppDetailWithSite +} + +export type GetAppsByAppIdResponse = GetAppsByAppIdResponses[keyof GetAppsByAppIdResponses] + +export type PutAppsByAppIdData = { + body: UpdateAppPayload + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}' +} + +export type PutAppsByAppIdErrors = { + 400: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } +} + +export type PutAppsByAppIdError = PutAppsByAppIdErrors[keyof PutAppsByAppIdErrors] + +export type PutAppsByAppIdResponses = { + 200: AppDetailWithSite +} + +export type PutAppsByAppIdResponse = PutAppsByAppIdResponses[keyof PutAppsByAppIdResponses] + +export type GetAppsByAppIdAdvancedChatWorkflowRunsData = { + body?: never + path: { + app_id: string + } + query?: { + triggered_from?: 'debugging' | 'app-run' | null + status?: 'running' | 'succeeded' | 'failed' | 'stopped' | 'partial-succeeded' | null + last_id?: string | null + limit?: number + } + url: '/apps/{app_id}/advanced-chat/workflow-runs' +} + +export type GetAppsByAppIdAdvancedChatWorkflowRunsResponses = { + 200: AdvancedChatWorkflowRunPagination +} + +export type GetAppsByAppIdAdvancedChatWorkflowRunsResponse + = GetAppsByAppIdAdvancedChatWorkflowRunsResponses[keyof GetAppsByAppIdAdvancedChatWorkflowRunsResponses] + +export type GetAppsByAppIdAdvancedChatWorkflowRunsCountData = { + body?: never + path: { + app_id: string + } + query?: { + triggered_from?: 'debugging' | 'app-run' | null + time_range?: string | null + status?: 'running' | 'succeeded' | 'failed' | 'stopped' | 'partial-succeeded' | null + } + url: '/apps/{app_id}/advanced-chat/workflow-runs/count' +} + +export type GetAppsByAppIdAdvancedChatWorkflowRunsCountResponses = { + 200: WorkflowRunCount +} + +export type GetAppsByAppIdAdvancedChatWorkflowRunsCountResponse + = GetAppsByAppIdAdvancedChatWorkflowRunsCountResponses[keyof GetAppsByAppIdAdvancedChatWorkflowRunsCountResponses] + +export type PostAppsByAppIdAdvancedChatWorkflowsDraftHumanInputNodesByNodeIdFormPreviewData = { + body: HumanInputFormPreviewPayload + path: { + app_id: string + node_id: string + } + query?: never + url: '/apps/{app_id}/advanced-chat/workflows/draft/human-input/nodes/{node_id}/form/preview' +} + +export type PostAppsByAppIdAdvancedChatWorkflowsDraftHumanInputNodesByNodeIdFormPreviewResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdAdvancedChatWorkflowsDraftHumanInputNodesByNodeIdFormPreviewResponse + = PostAppsByAppIdAdvancedChatWorkflowsDraftHumanInputNodesByNodeIdFormPreviewResponses[keyof PostAppsByAppIdAdvancedChatWorkflowsDraftHumanInputNodesByNodeIdFormPreviewResponses] + +export type PostAppsByAppIdAdvancedChatWorkflowsDraftHumanInputNodesByNodeIdFormRunData = { + body: HumanInputFormSubmitPayload + path: { + app_id: string + node_id: string + } + query?: never + url: '/apps/{app_id}/advanced-chat/workflows/draft/human-input/nodes/{node_id}/form/run' +} + +export type PostAppsByAppIdAdvancedChatWorkflowsDraftHumanInputNodesByNodeIdFormRunResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdAdvancedChatWorkflowsDraftHumanInputNodesByNodeIdFormRunResponse + = PostAppsByAppIdAdvancedChatWorkflowsDraftHumanInputNodesByNodeIdFormRunResponses[keyof PostAppsByAppIdAdvancedChatWorkflowsDraftHumanInputNodesByNodeIdFormRunResponses] + +export type PostAppsByAppIdAdvancedChatWorkflowsDraftIterationNodesByNodeIdRunData = { + body: IterationNodeRunPayload + path: { + app_id: string + node_id: string + } + query?: never + url: '/apps/{app_id}/advanced-chat/workflows/draft/iteration/nodes/{node_id}/run' +} + +export type PostAppsByAppIdAdvancedChatWorkflowsDraftIterationNodesByNodeIdRunErrors = { + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdAdvancedChatWorkflowsDraftIterationNodesByNodeIdRunError + = PostAppsByAppIdAdvancedChatWorkflowsDraftIterationNodesByNodeIdRunErrors[keyof PostAppsByAppIdAdvancedChatWorkflowsDraftIterationNodesByNodeIdRunErrors] + +export type PostAppsByAppIdAdvancedChatWorkflowsDraftIterationNodesByNodeIdRunResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdAdvancedChatWorkflowsDraftIterationNodesByNodeIdRunResponse + = PostAppsByAppIdAdvancedChatWorkflowsDraftIterationNodesByNodeIdRunResponses[keyof PostAppsByAppIdAdvancedChatWorkflowsDraftIterationNodesByNodeIdRunResponses] + +export type PostAppsByAppIdAdvancedChatWorkflowsDraftLoopNodesByNodeIdRunData = { + body: LoopNodeRunPayload + path: { + app_id: string + node_id: string + } + query?: never + url: '/apps/{app_id}/advanced-chat/workflows/draft/loop/nodes/{node_id}/run' +} + +export type PostAppsByAppIdAdvancedChatWorkflowsDraftLoopNodesByNodeIdRunErrors = { + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdAdvancedChatWorkflowsDraftLoopNodesByNodeIdRunError + = PostAppsByAppIdAdvancedChatWorkflowsDraftLoopNodesByNodeIdRunErrors[keyof PostAppsByAppIdAdvancedChatWorkflowsDraftLoopNodesByNodeIdRunErrors] + +export type PostAppsByAppIdAdvancedChatWorkflowsDraftLoopNodesByNodeIdRunResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdAdvancedChatWorkflowsDraftLoopNodesByNodeIdRunResponse + = PostAppsByAppIdAdvancedChatWorkflowsDraftLoopNodesByNodeIdRunResponses[keyof PostAppsByAppIdAdvancedChatWorkflowsDraftLoopNodesByNodeIdRunResponses] + +export type PostAppsByAppIdAdvancedChatWorkflowsDraftRunData = { + body: AdvancedChatWorkflowRunPayload + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/advanced-chat/workflows/draft/run' +} + +export type PostAppsByAppIdAdvancedChatWorkflowsDraftRunErrors = { + 400: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdAdvancedChatWorkflowsDraftRunError + = PostAppsByAppIdAdvancedChatWorkflowsDraftRunErrors[keyof PostAppsByAppIdAdvancedChatWorkflowsDraftRunErrors] + +export type PostAppsByAppIdAdvancedChatWorkflowsDraftRunResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdAdvancedChatWorkflowsDraftRunResponse + = PostAppsByAppIdAdvancedChatWorkflowsDraftRunResponses[keyof PostAppsByAppIdAdvancedChatWorkflowsDraftRunResponses] + +export type GetAppsByAppIdAgentLogsData = { + body?: never + path: { + app_id: string + } + query: { + conversation_id: string + message_id: string + } + url: '/apps/{app_id}/agent/logs' +} + +export type GetAppsByAppIdAgentLogsErrors = { + 400: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdAgentLogsError + = GetAppsByAppIdAgentLogsErrors[keyof GetAppsByAppIdAgentLogsErrors] + +export type GetAppsByAppIdAgentLogsResponses = { + 200: Array<{ + [key: string]: unknown + }> +} + +export type GetAppsByAppIdAgentLogsResponse + = GetAppsByAppIdAgentLogsResponses[keyof GetAppsByAppIdAgentLogsResponses] + +export type PostAppsByAppIdAnnotationReplyByActionData = { + body: AnnotationReplyPayload + path: { + app_id: string + action: string + } + query?: never + url: '/apps/{app_id}/annotation-reply/{action}' +} + +export type PostAppsByAppIdAnnotationReplyByActionErrors = { + 403: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdAnnotationReplyByActionError + = PostAppsByAppIdAnnotationReplyByActionErrors[keyof PostAppsByAppIdAnnotationReplyByActionErrors] + +export type PostAppsByAppIdAnnotationReplyByActionResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdAnnotationReplyByActionResponse + = PostAppsByAppIdAnnotationReplyByActionResponses[keyof PostAppsByAppIdAnnotationReplyByActionResponses] + +export type GetAppsByAppIdAnnotationReplyByActionStatusByJobIdData = { + body?: never + path: { + app_id: string + action: string + job_id: string + } + query?: never + url: '/apps/{app_id}/annotation-reply/{action}/status/{job_id}' +} + +export type GetAppsByAppIdAnnotationReplyByActionStatusByJobIdErrors = { + 403: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdAnnotationReplyByActionStatusByJobIdError + = GetAppsByAppIdAnnotationReplyByActionStatusByJobIdErrors[keyof GetAppsByAppIdAnnotationReplyByActionStatusByJobIdErrors] + +export type GetAppsByAppIdAnnotationReplyByActionStatusByJobIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdAnnotationReplyByActionStatusByJobIdResponse + = GetAppsByAppIdAnnotationReplyByActionStatusByJobIdResponses[keyof GetAppsByAppIdAnnotationReplyByActionStatusByJobIdResponses] + +export type GetAppsByAppIdAnnotationSettingData = { + body?: never + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/annotation-setting' +} + +export type GetAppsByAppIdAnnotationSettingErrors = { + 403: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdAnnotationSettingError + = GetAppsByAppIdAnnotationSettingErrors[keyof GetAppsByAppIdAnnotationSettingErrors] + +export type GetAppsByAppIdAnnotationSettingResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdAnnotationSettingResponse + = GetAppsByAppIdAnnotationSettingResponses[keyof GetAppsByAppIdAnnotationSettingResponses] + +export type PostAppsByAppIdAnnotationSettingsByAnnotationSettingIdData = { + body: AnnotationSettingUpdatePayload + path: { + app_id: string + annotation_setting_id: string + } + query?: never + url: '/apps/{app_id}/annotation-settings/{annotation_setting_id}' +} + +export type PostAppsByAppIdAnnotationSettingsByAnnotationSettingIdErrors = { + 403: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdAnnotationSettingsByAnnotationSettingIdError + = PostAppsByAppIdAnnotationSettingsByAnnotationSettingIdErrors[keyof PostAppsByAppIdAnnotationSettingsByAnnotationSettingIdErrors] + +export type PostAppsByAppIdAnnotationSettingsByAnnotationSettingIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdAnnotationSettingsByAnnotationSettingIdResponse + = PostAppsByAppIdAnnotationSettingsByAnnotationSettingIdResponses[keyof PostAppsByAppIdAnnotationSettingsByAnnotationSettingIdResponses] + +export type DeleteAppsByAppIdAnnotationsData = { + body?: never + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/annotations' +} + +export type DeleteAppsByAppIdAnnotationsResponses = { + 200: { + [key: string]: unknown + } +} + +export type DeleteAppsByAppIdAnnotationsResponse + = DeleteAppsByAppIdAnnotationsResponses[keyof DeleteAppsByAppIdAnnotationsResponses] + +export type GetAppsByAppIdAnnotationsData = { + body?: never + path: { + app_id: string + } + query?: { + keyword?: string + limit?: number + page?: number + } + url: '/apps/{app_id}/annotations' +} + +export type GetAppsByAppIdAnnotationsErrors = { + 403: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdAnnotationsError + = GetAppsByAppIdAnnotationsErrors[keyof GetAppsByAppIdAnnotationsErrors] + +export type GetAppsByAppIdAnnotationsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdAnnotationsResponse + = GetAppsByAppIdAnnotationsResponses[keyof GetAppsByAppIdAnnotationsResponses] + +export type PostAppsByAppIdAnnotationsData = { + body: CreateAnnotationPayload + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/annotations' +} + +export type PostAppsByAppIdAnnotationsErrors = { + 403: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdAnnotationsError + = PostAppsByAppIdAnnotationsErrors[keyof PostAppsByAppIdAnnotationsErrors] + +export type PostAppsByAppIdAnnotationsResponses = { + 201: Annotation +} + +export type PostAppsByAppIdAnnotationsResponse + = PostAppsByAppIdAnnotationsResponses[keyof PostAppsByAppIdAnnotationsResponses] + +export type PostAppsByAppIdAnnotationsBatchImportData = { + body?: never + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/annotations/batch-import' +} + +export type PostAppsByAppIdAnnotationsBatchImportErrors = { + 400: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 413: { + [key: string]: unknown + } + 429: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdAnnotationsBatchImportError + = PostAppsByAppIdAnnotationsBatchImportErrors[keyof PostAppsByAppIdAnnotationsBatchImportErrors] + +export type PostAppsByAppIdAnnotationsBatchImportResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdAnnotationsBatchImportResponse + = PostAppsByAppIdAnnotationsBatchImportResponses[keyof PostAppsByAppIdAnnotationsBatchImportResponses] + +export type GetAppsByAppIdAnnotationsBatchImportStatusByJobIdData = { + body?: never + path: { + app_id: string + job_id: string + } + query?: never + url: '/apps/{app_id}/annotations/batch-import-status/{job_id}' +} + +export type GetAppsByAppIdAnnotationsBatchImportStatusByJobIdErrors = { + 403: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdAnnotationsBatchImportStatusByJobIdError + = GetAppsByAppIdAnnotationsBatchImportStatusByJobIdErrors[keyof GetAppsByAppIdAnnotationsBatchImportStatusByJobIdErrors] + +export type GetAppsByAppIdAnnotationsBatchImportStatusByJobIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdAnnotationsBatchImportStatusByJobIdResponse + = GetAppsByAppIdAnnotationsBatchImportStatusByJobIdResponses[keyof GetAppsByAppIdAnnotationsBatchImportStatusByJobIdResponses] + +export type GetAppsByAppIdAnnotationsCountData = { + body?: never + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/annotations/count' +} + +export type GetAppsByAppIdAnnotationsCountResponses = { + 200: AnnotationCountResponse +} + +export type GetAppsByAppIdAnnotationsCountResponse + = GetAppsByAppIdAnnotationsCountResponses[keyof GetAppsByAppIdAnnotationsCountResponses] + +export type GetAppsByAppIdAnnotationsExportData = { + body?: never + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/annotations/export' +} + +export type GetAppsByAppIdAnnotationsExportErrors = { + 403: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdAnnotationsExportError + = GetAppsByAppIdAnnotationsExportErrors[keyof GetAppsByAppIdAnnotationsExportErrors] + +export type GetAppsByAppIdAnnotationsExportResponses = { + 200: AnnotationExportList +} + +export type GetAppsByAppIdAnnotationsExportResponse + = GetAppsByAppIdAnnotationsExportResponses[keyof GetAppsByAppIdAnnotationsExportResponses] + +export type DeleteAppsByAppIdAnnotationsByAnnotationIdData = { + body?: never + path: { + annotation_id: string + app_id: string + } + query?: never + url: '/apps/{app_id}/annotations/{annotation_id}' +} + +export type DeleteAppsByAppIdAnnotationsByAnnotationIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type DeleteAppsByAppIdAnnotationsByAnnotationIdResponse + = DeleteAppsByAppIdAnnotationsByAnnotationIdResponses[keyof DeleteAppsByAppIdAnnotationsByAnnotationIdResponses] + +export type PostAppsByAppIdAnnotationsByAnnotationIdData = { + body: UpdateAnnotationPayload + path: { + app_id: string + annotation_id: string + } + query?: never + url: '/apps/{app_id}/annotations/{annotation_id}' +} + +export type PostAppsByAppIdAnnotationsByAnnotationIdErrors = { + 403: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdAnnotationsByAnnotationIdError + = PostAppsByAppIdAnnotationsByAnnotationIdErrors[keyof PostAppsByAppIdAnnotationsByAnnotationIdErrors] + +export type PostAppsByAppIdAnnotationsByAnnotationIdResponses = { + 200: Annotation + 204: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdAnnotationsByAnnotationIdResponse + = PostAppsByAppIdAnnotationsByAnnotationIdResponses[keyof PostAppsByAppIdAnnotationsByAnnotationIdResponses] + +export type GetAppsByAppIdAnnotationsByAnnotationIdHitHistoriesData = { + body?: never + path: { + app_id: string + annotation_id: string + } + query?: { + page?: number + limit?: number + } + url: '/apps/{app_id}/annotations/{annotation_id}/hit-histories' +} + +export type GetAppsByAppIdAnnotationsByAnnotationIdHitHistoriesErrors = { + 403: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdAnnotationsByAnnotationIdHitHistoriesError + = GetAppsByAppIdAnnotationsByAnnotationIdHitHistoriesErrors[keyof GetAppsByAppIdAnnotationsByAnnotationIdHitHistoriesErrors] + +export type GetAppsByAppIdAnnotationsByAnnotationIdHitHistoriesResponses = { + 200: AnnotationHitHistoryList +} + +export type GetAppsByAppIdAnnotationsByAnnotationIdHitHistoriesResponse + = GetAppsByAppIdAnnotationsByAnnotationIdHitHistoriesResponses[keyof GetAppsByAppIdAnnotationsByAnnotationIdHitHistoriesResponses] + +export type PostAppsByAppIdApiEnableData = { + body: AppApiStatusPayload + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/api-enable' +} + +export type PostAppsByAppIdApiEnableErrors = { + 403: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdApiEnableError + = PostAppsByAppIdApiEnableErrors[keyof PostAppsByAppIdApiEnableErrors] + +export type PostAppsByAppIdApiEnableResponses = { + 200: AppDetail +} + +export type PostAppsByAppIdApiEnableResponse + = PostAppsByAppIdApiEnableResponses[keyof PostAppsByAppIdApiEnableResponses] + +export type PostAppsByAppIdAudioToTextData = { + body?: never + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/audio-to-text' +} + +export type PostAppsByAppIdAudioToTextErrors = { + 400: { + [key: string]: unknown + } + 413: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdAudioToTextError + = PostAppsByAppIdAudioToTextErrors[keyof PostAppsByAppIdAudioToTextErrors] + +export type PostAppsByAppIdAudioToTextResponses = { + 200: AudioTranscriptResponse +} + +export type PostAppsByAppIdAudioToTextResponse + = PostAppsByAppIdAudioToTextResponses[keyof PostAppsByAppIdAudioToTextResponses] + +export type GetAppsByAppIdChatConversationsData = { + body?: never + path: { + app_id: string + } + query?: { + annotation_status?: 'annotated' | 'not_annotated' | 'all' + end?: string | null + keyword?: string | null + limit?: number + page?: number + sort_by?: 'created_at' | '-created_at' | 'updated_at' | '-updated_at' + start?: string | null + } + url: '/apps/{app_id}/chat-conversations' +} + +export type GetAppsByAppIdChatConversationsErrors = { + 403: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdChatConversationsError + = GetAppsByAppIdChatConversationsErrors[keyof GetAppsByAppIdChatConversationsErrors] + +export type GetAppsByAppIdChatConversationsResponses = { + 200: ConversationWithSummaryPagination +} + +export type GetAppsByAppIdChatConversationsResponse + = GetAppsByAppIdChatConversationsResponses[keyof GetAppsByAppIdChatConversationsResponses] + +export type DeleteAppsByAppIdChatConversationsByConversationIdData = { + body?: never + path: { + app_id: string + conversation_id: string + } + query?: never + url: '/apps/{app_id}/chat-conversations/{conversation_id}' +} + +export type DeleteAppsByAppIdChatConversationsByConversationIdErrors = { + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type DeleteAppsByAppIdChatConversationsByConversationIdError + = DeleteAppsByAppIdChatConversationsByConversationIdErrors[keyof DeleteAppsByAppIdChatConversationsByConversationIdErrors] + +export type DeleteAppsByAppIdChatConversationsByConversationIdResponses = { + 204: { + [key: string]: unknown + } +} + +export type DeleteAppsByAppIdChatConversationsByConversationIdResponse + = DeleteAppsByAppIdChatConversationsByConversationIdResponses[keyof DeleteAppsByAppIdChatConversationsByConversationIdResponses] + +export type GetAppsByAppIdChatConversationsByConversationIdData = { + body?: never + path: { + app_id: string + conversation_id: string + } + query?: never + url: '/apps/{app_id}/chat-conversations/{conversation_id}' +} + +export type GetAppsByAppIdChatConversationsByConversationIdErrors = { + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdChatConversationsByConversationIdError + = GetAppsByAppIdChatConversationsByConversationIdErrors[keyof GetAppsByAppIdChatConversationsByConversationIdErrors] + +export type GetAppsByAppIdChatConversationsByConversationIdResponses = { + 200: ConversationDetail +} + +export type GetAppsByAppIdChatConversationsByConversationIdResponse + = GetAppsByAppIdChatConversationsByConversationIdResponses[keyof GetAppsByAppIdChatConversationsByConversationIdResponses] + +export type GetAppsByAppIdChatMessagesData = { + body?: never + path: { + app_id: string + } + query: { + conversation_id: string + first_id?: string | null + limit?: number + } + url: '/apps/{app_id}/chat-messages' +} + +export type GetAppsByAppIdChatMessagesErrors = { + 404: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdChatMessagesError + = GetAppsByAppIdChatMessagesErrors[keyof GetAppsByAppIdChatMessagesErrors] + +export type GetAppsByAppIdChatMessagesResponses = { + 200: MessageInfiniteScrollPaginationResponse +} + +export type GetAppsByAppIdChatMessagesResponse + = GetAppsByAppIdChatMessagesResponses[keyof GetAppsByAppIdChatMessagesResponses] + +export type GetAppsByAppIdChatMessagesByMessageIdSuggestedQuestionsData = { + body?: never + path: { + app_id: string + message_id: string + } + query?: never + url: '/apps/{app_id}/chat-messages/{message_id}/suggested-questions' +} + +export type GetAppsByAppIdChatMessagesByMessageIdSuggestedQuestionsErrors = { + 404: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdChatMessagesByMessageIdSuggestedQuestionsError + = GetAppsByAppIdChatMessagesByMessageIdSuggestedQuestionsErrors[keyof GetAppsByAppIdChatMessagesByMessageIdSuggestedQuestionsErrors] + +export type GetAppsByAppIdChatMessagesByMessageIdSuggestedQuestionsResponses = { + 200: SuggestedQuestionsResponse +} + +export type GetAppsByAppIdChatMessagesByMessageIdSuggestedQuestionsResponse + = GetAppsByAppIdChatMessagesByMessageIdSuggestedQuestionsResponses[keyof GetAppsByAppIdChatMessagesByMessageIdSuggestedQuestionsResponses] + +export type PostAppsByAppIdChatMessagesByTaskIdStopData = { + body?: never + path: { + app_id: string + task_id: string + } + query?: never + url: '/apps/{app_id}/chat-messages/{task_id}/stop' +} + +export type PostAppsByAppIdChatMessagesByTaskIdStopResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdChatMessagesByTaskIdStopResponse + = PostAppsByAppIdChatMessagesByTaskIdStopResponses[keyof PostAppsByAppIdChatMessagesByTaskIdStopResponses] + +export type GetAppsByAppIdCompletionConversationsData = { + body?: never + path: { + app_id: string + } + query?: { + annotation_status?: 'annotated' | 'not_annotated' | 'all' + end?: string | null + keyword?: string | null + limit?: number + page?: number + start?: string | null + } + url: '/apps/{app_id}/completion-conversations' +} + +export type GetAppsByAppIdCompletionConversationsErrors = { + 403: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdCompletionConversationsError + = GetAppsByAppIdCompletionConversationsErrors[keyof GetAppsByAppIdCompletionConversationsErrors] + +export type GetAppsByAppIdCompletionConversationsResponses = { + 200: ConversationPagination +} + +export type GetAppsByAppIdCompletionConversationsResponse + = GetAppsByAppIdCompletionConversationsResponses[keyof GetAppsByAppIdCompletionConversationsResponses] + +export type DeleteAppsByAppIdCompletionConversationsByConversationIdData = { + body?: never + path: { + app_id: string + conversation_id: string + } + query?: never + url: '/apps/{app_id}/completion-conversations/{conversation_id}' +} + +export type DeleteAppsByAppIdCompletionConversationsByConversationIdErrors = { + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type DeleteAppsByAppIdCompletionConversationsByConversationIdError + = DeleteAppsByAppIdCompletionConversationsByConversationIdErrors[keyof DeleteAppsByAppIdCompletionConversationsByConversationIdErrors] + +export type DeleteAppsByAppIdCompletionConversationsByConversationIdResponses = { + 204: { + [key: string]: unknown + } +} + +export type DeleteAppsByAppIdCompletionConversationsByConversationIdResponse + = DeleteAppsByAppIdCompletionConversationsByConversationIdResponses[keyof DeleteAppsByAppIdCompletionConversationsByConversationIdResponses] + +export type GetAppsByAppIdCompletionConversationsByConversationIdData = { + body?: never + path: { + app_id: string + conversation_id: string + } + query?: never + url: '/apps/{app_id}/completion-conversations/{conversation_id}' +} + +export type GetAppsByAppIdCompletionConversationsByConversationIdErrors = { + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdCompletionConversationsByConversationIdError + = GetAppsByAppIdCompletionConversationsByConversationIdErrors[keyof GetAppsByAppIdCompletionConversationsByConversationIdErrors] + +export type GetAppsByAppIdCompletionConversationsByConversationIdResponses = { + 200: ConversationMessageDetail +} + +export type GetAppsByAppIdCompletionConversationsByConversationIdResponse + = GetAppsByAppIdCompletionConversationsByConversationIdResponses[keyof GetAppsByAppIdCompletionConversationsByConversationIdResponses] + +export type PostAppsByAppIdCompletionMessagesData = { + body: CompletionMessagePayload + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/completion-messages' +} + +export type PostAppsByAppIdCompletionMessagesErrors = { + 400: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdCompletionMessagesError + = PostAppsByAppIdCompletionMessagesErrors[keyof PostAppsByAppIdCompletionMessagesErrors] + +export type PostAppsByAppIdCompletionMessagesResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdCompletionMessagesResponse + = PostAppsByAppIdCompletionMessagesResponses[keyof PostAppsByAppIdCompletionMessagesResponses] + +export type PostAppsByAppIdCompletionMessagesByTaskIdStopData = { + body?: never + path: { + app_id: string + task_id: string + } + query?: never + url: '/apps/{app_id}/completion-messages/{task_id}/stop' +} + +export type PostAppsByAppIdCompletionMessagesByTaskIdStopResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdCompletionMessagesByTaskIdStopResponse + = PostAppsByAppIdCompletionMessagesByTaskIdStopResponses[keyof PostAppsByAppIdCompletionMessagesByTaskIdStopResponses] + +export type GetAppsByAppIdConversationVariablesData = { + body?: never + path: { + app_id: string + } + query: { + conversation_id: string + } + url: '/apps/{app_id}/conversation-variables' +} + +export type GetAppsByAppIdConversationVariablesResponses = { + 200: PaginatedConversationVariableResponse +} + +export type GetAppsByAppIdConversationVariablesResponse + = GetAppsByAppIdConversationVariablesResponses[keyof GetAppsByAppIdConversationVariablesResponses] + +export type PostAppsByAppIdConvertToWorkflowData = { + body: ConvertToWorkflowPayload + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/convert-to-workflow' +} + +export type PostAppsByAppIdConvertToWorkflowErrors = { + 400: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdConvertToWorkflowError + = PostAppsByAppIdConvertToWorkflowErrors[keyof PostAppsByAppIdConvertToWorkflowErrors] + +export type PostAppsByAppIdConvertToWorkflowResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdConvertToWorkflowResponse + = PostAppsByAppIdConvertToWorkflowResponses[keyof PostAppsByAppIdConvertToWorkflowResponses] + +export type PostAppsByAppIdCopyData = { + body: CopyAppPayload + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/copy' +} + +export type PostAppsByAppIdCopyErrors = { + 403: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdCopyError = PostAppsByAppIdCopyErrors[keyof PostAppsByAppIdCopyErrors] + +export type PostAppsByAppIdCopyResponses = { + 201: AppDetailWithSite +} + +export type PostAppsByAppIdCopyResponse + = PostAppsByAppIdCopyResponses[keyof PostAppsByAppIdCopyResponses] + +export type GetAppsByAppIdExportData = { + body?: never + path: { + app_id: string + } + query?: { + include_secret?: boolean + workflow_id?: string | null + } + url: '/apps/{app_id}/export' +} + +export type GetAppsByAppIdExportErrors = { + 403: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdExportError = GetAppsByAppIdExportErrors[keyof GetAppsByAppIdExportErrors] + +export type GetAppsByAppIdExportResponses = { + 200: AppExportResponse +} + +export type GetAppsByAppIdExportResponse + = GetAppsByAppIdExportResponses[keyof GetAppsByAppIdExportResponses] + +export type PostAppsByAppIdFeedbacksData = { + body: MessageFeedbackPayload + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/feedbacks' +} + +export type PostAppsByAppIdFeedbacksErrors = { + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdFeedbacksError + = PostAppsByAppIdFeedbacksErrors[keyof PostAppsByAppIdFeedbacksErrors] + +export type PostAppsByAppIdFeedbacksResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdFeedbacksResponse + = PostAppsByAppIdFeedbacksResponses[keyof PostAppsByAppIdFeedbacksResponses] + +export type GetAppsByAppIdFeedbacksExportData = { + body?: never + path: { + app_id: string + } + query?: { + end_date?: string | null + format?: 'csv' | 'json' + from_source?: 'user' | 'admin' | null + has_comment?: boolean | null + rating?: 'like' | 'dislike' | null + start_date?: string | null + } + url: '/apps/{app_id}/feedbacks/export' +} + +export type GetAppsByAppIdFeedbacksExportErrors = { + 400: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdFeedbacksExportError + = GetAppsByAppIdFeedbacksExportErrors[keyof GetAppsByAppIdFeedbacksExportErrors] + +export type GetAppsByAppIdFeedbacksExportResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdFeedbacksExportResponse + = GetAppsByAppIdFeedbacksExportResponses[keyof GetAppsByAppIdFeedbacksExportResponses] + +export type PostAppsByAppIdIconData = { + body: AppIconPayload + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/icon' +} + +export type PostAppsByAppIdIconErrors = { + 403: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdIconError = PostAppsByAppIdIconErrors[keyof PostAppsByAppIdIconErrors] + +export type PostAppsByAppIdIconResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdIconResponse + = PostAppsByAppIdIconResponses[keyof PostAppsByAppIdIconResponses] + +export type GetAppsByAppIdMessagesByMessageIdData = { + body?: never + path: { + app_id: string + message_id: string + } + query?: never + url: '/apps/{app_id}/messages/{message_id}' +} + +export type GetAppsByAppIdMessagesByMessageIdErrors = { + 404: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdMessagesByMessageIdError + = GetAppsByAppIdMessagesByMessageIdErrors[keyof GetAppsByAppIdMessagesByMessageIdErrors] + +export type GetAppsByAppIdMessagesByMessageIdResponses = { + 200: MessageDetailResponse +} + +export type GetAppsByAppIdMessagesByMessageIdResponse + = GetAppsByAppIdMessagesByMessageIdResponses[keyof GetAppsByAppIdMessagesByMessageIdResponses] + +export type PostAppsByAppIdModelConfigData = { + body: ModelConfigRequest + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/model-config' +} + +export type PostAppsByAppIdModelConfigErrors = { + 400: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdModelConfigError + = PostAppsByAppIdModelConfigErrors[keyof PostAppsByAppIdModelConfigErrors] + +export type PostAppsByAppIdModelConfigResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdModelConfigResponse + = PostAppsByAppIdModelConfigResponses[keyof PostAppsByAppIdModelConfigResponses] + +export type PostAppsByAppIdNameData = { + body: AppNamePayload + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/name' +} + +export type PostAppsByAppIdNameResponses = { + 200: AppDetail +} + +export type PostAppsByAppIdNameResponse + = PostAppsByAppIdNameResponses[keyof PostAppsByAppIdNameResponses] + +export type PostAppsByAppIdPublishToCreatorsPlatformData = { + body?: never + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/publish-to-creators-platform' +} + +export type PostAppsByAppIdPublishToCreatorsPlatformResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdPublishToCreatorsPlatformResponse + = PostAppsByAppIdPublishToCreatorsPlatformResponses[keyof PostAppsByAppIdPublishToCreatorsPlatformResponses] + +export type GetAppsByAppIdServerData = { + body?: never + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/server' +} + +export type GetAppsByAppIdServerResponses = { + 200: AppMcpServerResponse +} + +export type GetAppsByAppIdServerResponse + = GetAppsByAppIdServerResponses[keyof GetAppsByAppIdServerResponses] + +export type PostAppsByAppIdServerData = { + body: McpServerCreatePayload + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/server' +} + +export type PostAppsByAppIdServerErrors = { + 403: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdServerError + = PostAppsByAppIdServerErrors[keyof PostAppsByAppIdServerErrors] + +export type PostAppsByAppIdServerResponses = { + 201: AppMcpServerResponse +} + +export type PostAppsByAppIdServerResponse + = PostAppsByAppIdServerResponses[keyof PostAppsByAppIdServerResponses] + +export type PutAppsByAppIdServerData = { + body: McpServerUpdatePayload + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/server' +} + +export type PutAppsByAppIdServerErrors = { + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PutAppsByAppIdServerError = PutAppsByAppIdServerErrors[keyof PutAppsByAppIdServerErrors] + +export type PutAppsByAppIdServerResponses = { + 200: AppMcpServerResponse +} + +export type PutAppsByAppIdServerResponse + = PutAppsByAppIdServerResponses[keyof PutAppsByAppIdServerResponses] + +export type PostAppsByAppIdSiteData = { + body: AppSiteUpdatePayload + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/site' +} + +export type PostAppsByAppIdSiteErrors = { + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdSiteError = PostAppsByAppIdSiteErrors[keyof PostAppsByAppIdSiteErrors] + +export type PostAppsByAppIdSiteResponses = { + 200: AppSiteResponse +} + +export type PostAppsByAppIdSiteResponse + = PostAppsByAppIdSiteResponses[keyof PostAppsByAppIdSiteResponses] + +export type PostAppsByAppIdSiteEnableData = { + body: AppSiteStatusPayload + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/site-enable' +} + +export type PostAppsByAppIdSiteEnableErrors = { + 403: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdSiteEnableError + = PostAppsByAppIdSiteEnableErrors[keyof PostAppsByAppIdSiteEnableErrors] + +export type PostAppsByAppIdSiteEnableResponses = { + 200: AppDetail +} + +export type PostAppsByAppIdSiteEnableResponse + = PostAppsByAppIdSiteEnableResponses[keyof PostAppsByAppIdSiteEnableResponses] + +export type PostAppsByAppIdSiteAccessTokenResetData = { + body?: never + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/site/access-token-reset' +} + +export type PostAppsByAppIdSiteAccessTokenResetErrors = { + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdSiteAccessTokenResetError + = PostAppsByAppIdSiteAccessTokenResetErrors[keyof PostAppsByAppIdSiteAccessTokenResetErrors] + +export type PostAppsByAppIdSiteAccessTokenResetResponses = { + 200: AppSiteResponse +} + +export type PostAppsByAppIdSiteAccessTokenResetResponse + = PostAppsByAppIdSiteAccessTokenResetResponses[keyof PostAppsByAppIdSiteAccessTokenResetResponses] + +export type GetAppsByAppIdStatisticsAverageResponseTimeData = { + body?: never + path: { + app_id: string + } + query?: { + end?: string | null + start?: string | null + } + url: '/apps/{app_id}/statistics/average-response-time' +} + +export type GetAppsByAppIdStatisticsAverageResponseTimeResponses = { + 200: Array<{ + [key: string]: unknown + }> +} + +export type GetAppsByAppIdStatisticsAverageResponseTimeResponse + = GetAppsByAppIdStatisticsAverageResponseTimeResponses[keyof GetAppsByAppIdStatisticsAverageResponseTimeResponses] + +export type GetAppsByAppIdStatisticsAverageSessionInteractionsData = { + body?: never + path: { + app_id: string + } + query?: { + end?: string | null + start?: string | null + } + url: '/apps/{app_id}/statistics/average-session-interactions' +} + +export type GetAppsByAppIdStatisticsAverageSessionInteractionsResponses = { + 200: Array<{ + [key: string]: unknown + }> +} + +export type GetAppsByAppIdStatisticsAverageSessionInteractionsResponse + = GetAppsByAppIdStatisticsAverageSessionInteractionsResponses[keyof GetAppsByAppIdStatisticsAverageSessionInteractionsResponses] + +export type GetAppsByAppIdStatisticsDailyConversationsData = { + body?: never + path: { + app_id: string + } + query?: { + end?: string | null + start?: string | null + } + url: '/apps/{app_id}/statistics/daily-conversations' +} + +export type GetAppsByAppIdStatisticsDailyConversationsResponses = { + 200: Array<{ + [key: string]: unknown + }> +} + +export type GetAppsByAppIdStatisticsDailyConversationsResponse + = GetAppsByAppIdStatisticsDailyConversationsResponses[keyof GetAppsByAppIdStatisticsDailyConversationsResponses] + +export type GetAppsByAppIdStatisticsDailyEndUsersData = { + body?: never + path: { + app_id: string + } + query?: { + end?: string | null + start?: string | null + } + url: '/apps/{app_id}/statistics/daily-end-users' +} + +export type GetAppsByAppIdStatisticsDailyEndUsersResponses = { + 200: Array<{ + [key: string]: unknown + }> +} + +export type GetAppsByAppIdStatisticsDailyEndUsersResponse + = GetAppsByAppIdStatisticsDailyEndUsersResponses[keyof GetAppsByAppIdStatisticsDailyEndUsersResponses] + +export type GetAppsByAppIdStatisticsDailyMessagesData = { + body?: never + path: { + app_id: string + } + query?: { + end?: string | null + start?: string | null + } + url: '/apps/{app_id}/statistics/daily-messages' +} + +export type GetAppsByAppIdStatisticsDailyMessagesResponses = { + 200: Array<{ + [key: string]: unknown + }> +} + +export type GetAppsByAppIdStatisticsDailyMessagesResponse + = GetAppsByAppIdStatisticsDailyMessagesResponses[keyof GetAppsByAppIdStatisticsDailyMessagesResponses] + +export type GetAppsByAppIdStatisticsTokenCostsData = { + body?: never + path: { + app_id: string + } + query?: { + end?: string | null + start?: string | null + } + url: '/apps/{app_id}/statistics/token-costs' +} + +export type GetAppsByAppIdStatisticsTokenCostsResponses = { + 200: Array<{ + [key: string]: unknown + }> +} + +export type GetAppsByAppIdStatisticsTokenCostsResponse + = GetAppsByAppIdStatisticsTokenCostsResponses[keyof GetAppsByAppIdStatisticsTokenCostsResponses] + +export type GetAppsByAppIdStatisticsTokensPerSecondData = { + body?: never + path: { + app_id: string + } + query?: { + end?: string | null + start?: string | null + } + url: '/apps/{app_id}/statistics/tokens-per-second' +} + +export type GetAppsByAppIdStatisticsTokensPerSecondResponses = { + 200: Array<{ + [key: string]: unknown + }> +} + +export type GetAppsByAppIdStatisticsTokensPerSecondResponse + = GetAppsByAppIdStatisticsTokensPerSecondResponses[keyof GetAppsByAppIdStatisticsTokensPerSecondResponses] + +export type GetAppsByAppIdStatisticsUserSatisfactionRateData = { + body?: never + path: { + app_id: string + } + query?: { + end?: string | null + start?: string | null + } + url: '/apps/{app_id}/statistics/user-satisfaction-rate' +} + +export type GetAppsByAppIdStatisticsUserSatisfactionRateResponses = { + 200: Array<{ + [key: string]: unknown + }> +} + +export type GetAppsByAppIdStatisticsUserSatisfactionRateResponse + = GetAppsByAppIdStatisticsUserSatisfactionRateResponses[keyof GetAppsByAppIdStatisticsUserSatisfactionRateResponses] + +export type PostAppsByAppIdTextToAudioData = { + body: TextToSpeechPayload + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/text-to-audio' +} + +export type PostAppsByAppIdTextToAudioErrors = { + 400: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdTextToAudioError + = PostAppsByAppIdTextToAudioErrors[keyof PostAppsByAppIdTextToAudioErrors] + +export type PostAppsByAppIdTextToAudioResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdTextToAudioResponse + = PostAppsByAppIdTextToAudioResponses[keyof PostAppsByAppIdTextToAudioResponses] + +export type GetAppsByAppIdTextToAudioVoicesData = { + body?: never + path: { + app_id: string + } + query: { + language: string + } + url: '/apps/{app_id}/text-to-audio/voices' +} + +export type GetAppsByAppIdTextToAudioVoicesErrors = { + 400: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdTextToAudioVoicesError + = GetAppsByAppIdTextToAudioVoicesErrors[keyof GetAppsByAppIdTextToAudioVoicesErrors] + +export type GetAppsByAppIdTextToAudioVoicesResponses = { + 200: Array<{ + [key: string]: unknown + }> +} + +export type GetAppsByAppIdTextToAudioVoicesResponse + = GetAppsByAppIdTextToAudioVoicesResponses[keyof GetAppsByAppIdTextToAudioVoicesResponses] + +export type GetAppsByAppIdTraceData = { + body?: never + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/trace' +} + +export type GetAppsByAppIdTraceResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdTraceResponse + = GetAppsByAppIdTraceResponses[keyof GetAppsByAppIdTraceResponses] + +export type PostAppsByAppIdTraceData = { + body: AppTracePayload + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/trace' +} + +export type PostAppsByAppIdTraceErrors = { + 403: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdTraceError = PostAppsByAppIdTraceErrors[keyof PostAppsByAppIdTraceErrors] + +export type PostAppsByAppIdTraceResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdTraceResponse + = PostAppsByAppIdTraceResponses[keyof PostAppsByAppIdTraceResponses] + +export type DeleteAppsByAppIdTraceConfigData = { + body: TraceProviderQuery + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/trace-config' +} + +export type DeleteAppsByAppIdTraceConfigErrors = { + 400: { + [key: string]: unknown + } +} + +export type DeleteAppsByAppIdTraceConfigError + = DeleteAppsByAppIdTraceConfigErrors[keyof DeleteAppsByAppIdTraceConfigErrors] + +export type DeleteAppsByAppIdTraceConfigResponses = { + 204: { + [key: string]: unknown + } +} + +export type DeleteAppsByAppIdTraceConfigResponse + = DeleteAppsByAppIdTraceConfigResponses[keyof DeleteAppsByAppIdTraceConfigResponses] + +export type GetAppsByAppIdTraceConfigData = { + body?: never + path: { + app_id: string + } + query: { + tracing_provider: string + } + url: '/apps/{app_id}/trace-config' +} + +export type GetAppsByAppIdTraceConfigErrors = { + 400: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdTraceConfigError + = GetAppsByAppIdTraceConfigErrors[keyof GetAppsByAppIdTraceConfigErrors] + +export type GetAppsByAppIdTraceConfigResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdTraceConfigResponse + = GetAppsByAppIdTraceConfigResponses[keyof GetAppsByAppIdTraceConfigResponses] + +export type PatchAppsByAppIdTraceConfigData = { + body: TraceConfigPayload + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/trace-config' +} + +export type PatchAppsByAppIdTraceConfigErrors = { + 400: { + [key: string]: unknown + } +} + +export type PatchAppsByAppIdTraceConfigError + = PatchAppsByAppIdTraceConfigErrors[keyof PatchAppsByAppIdTraceConfigErrors] + +export type PatchAppsByAppIdTraceConfigResponses = { + 200: { + [key: string]: unknown + } +} + +export type PatchAppsByAppIdTraceConfigResponse + = PatchAppsByAppIdTraceConfigResponses[keyof PatchAppsByAppIdTraceConfigResponses] + +export type PostAppsByAppIdTraceConfigData = { + body: TraceConfigPayload + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/trace-config' +} + +export type PostAppsByAppIdTraceConfigErrors = { + 400: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdTraceConfigError + = PostAppsByAppIdTraceConfigErrors[keyof PostAppsByAppIdTraceConfigErrors] + +export type PostAppsByAppIdTraceConfigResponses = { + 201: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdTraceConfigResponse + = PostAppsByAppIdTraceConfigResponses[keyof PostAppsByAppIdTraceConfigResponses] + +export type PostAppsByAppIdTriggerEnableData = { + body: ParserEnable + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/trigger-enable' +} + +export type PostAppsByAppIdTriggerEnableResponses = { + 200: WorkflowTriggerResponse +} + +export type PostAppsByAppIdTriggerEnableResponse + = PostAppsByAppIdTriggerEnableResponses[keyof PostAppsByAppIdTriggerEnableResponses] + +export type GetAppsByAppIdTriggersData = { + body?: never + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/triggers' +} + +export type GetAppsByAppIdTriggersResponses = { + 200: WorkflowTriggerListResponse +} + +export type GetAppsByAppIdTriggersResponse + = GetAppsByAppIdTriggersResponses[keyof GetAppsByAppIdTriggersResponses] + +export type GetAppsByAppIdWorkflowAppLogsData = { + body?: never + path: { + app_id: string + } + query?: { + created_at__after?: string | null + created_at__before?: string | null + created_by_account?: string | null + created_by_end_user_session_id?: string | null + detail?: boolean + keyword?: string | null + limit?: number + page?: number + status?: string | null + } + url: '/apps/{app_id}/workflow-app-logs' +} + +export type GetAppsByAppIdWorkflowAppLogsResponses = { + 200: WorkflowAppLogPaginationResponse +} + +export type GetAppsByAppIdWorkflowAppLogsResponse + = GetAppsByAppIdWorkflowAppLogsResponses[keyof GetAppsByAppIdWorkflowAppLogsResponses] + +export type GetAppsByAppIdWorkflowArchivedLogsData = { + body?: never + path: { + app_id: string + } + query?: { + created_at__after?: string | null + created_at__before?: string | null + created_by_account?: string | null + created_by_end_user_session_id?: string | null + detail?: boolean + keyword?: string | null + limit?: number + page?: number + status?: string | null + } + url: '/apps/{app_id}/workflow-archived-logs' +} + +export type GetAppsByAppIdWorkflowArchivedLogsResponses = { + 200: WorkflowArchivedLogPaginationResponse +} + +export type GetAppsByAppIdWorkflowArchivedLogsResponse + = GetAppsByAppIdWorkflowArchivedLogsResponses[keyof GetAppsByAppIdWorkflowArchivedLogsResponses] + +export type GetAppsByAppIdWorkflowRunsData = { + body?: never + path: { + app_id: string + } + query?: { + triggered_from?: 'debugging' | 'app-run' | null + status?: 'running' | 'succeeded' | 'failed' | 'stopped' | 'partial-succeeded' | null + last_id?: string | null + limit?: number + } + url: '/apps/{app_id}/workflow-runs' +} + +export type GetAppsByAppIdWorkflowRunsResponses = { + 200: WorkflowRunPagination +} + +export type GetAppsByAppIdWorkflowRunsResponse + = GetAppsByAppIdWorkflowRunsResponses[keyof GetAppsByAppIdWorkflowRunsResponses] + +export type GetAppsByAppIdWorkflowRunsCountData = { + body?: never + path: { + app_id: string + } + query?: { + triggered_from?: 'debugging' | 'app-run' | null + time_range?: string | null + status?: 'running' | 'succeeded' | 'failed' | 'stopped' | 'partial-succeeded' | null + } + url: '/apps/{app_id}/workflow-runs/count' +} + +export type GetAppsByAppIdWorkflowRunsCountResponses = { + 200: WorkflowRunCount +} + +export type GetAppsByAppIdWorkflowRunsCountResponse + = GetAppsByAppIdWorkflowRunsCountResponses[keyof GetAppsByAppIdWorkflowRunsCountResponses] + +export type PostAppsByAppIdWorkflowRunsTasksByTaskIdStopData = { + body?: never + path: { + app_id: string + task_id: string + } + query?: never + url: '/apps/{app_id}/workflow-runs/tasks/{task_id}/stop' +} + +export type PostAppsByAppIdWorkflowRunsTasksByTaskIdStopErrors = { + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdWorkflowRunsTasksByTaskIdStopError + = PostAppsByAppIdWorkflowRunsTasksByTaskIdStopErrors[keyof PostAppsByAppIdWorkflowRunsTasksByTaskIdStopErrors] + +export type PostAppsByAppIdWorkflowRunsTasksByTaskIdStopResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdWorkflowRunsTasksByTaskIdStopResponse + = PostAppsByAppIdWorkflowRunsTasksByTaskIdStopResponses[keyof PostAppsByAppIdWorkflowRunsTasksByTaskIdStopResponses] + +export type GetAppsByAppIdWorkflowRunsByRunIdData = { + body?: never + path: { + app_id: string + run_id: string + } + query?: never + url: '/apps/{app_id}/workflow-runs/{run_id}' +} + +export type GetAppsByAppIdWorkflowRunsByRunIdErrors = { + 404: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdWorkflowRunsByRunIdError + = GetAppsByAppIdWorkflowRunsByRunIdErrors[keyof GetAppsByAppIdWorkflowRunsByRunIdErrors] + +export type GetAppsByAppIdWorkflowRunsByRunIdResponses = { + 200: WorkflowRunDetail +} + +export type GetAppsByAppIdWorkflowRunsByRunIdResponse + = GetAppsByAppIdWorkflowRunsByRunIdResponses[keyof GetAppsByAppIdWorkflowRunsByRunIdResponses] + +export type GetAppsByAppIdWorkflowRunsByRunIdExportData = { + body?: never + path: { + app_id: string + run_id: string + } + query?: never + url: '/apps/{app_id}/workflow-runs/{run_id}/export' +} + +export type GetAppsByAppIdWorkflowRunsByRunIdExportResponses = { + 200: WorkflowRunExport +} + +export type GetAppsByAppIdWorkflowRunsByRunIdExportResponse + = GetAppsByAppIdWorkflowRunsByRunIdExportResponses[keyof GetAppsByAppIdWorkflowRunsByRunIdExportResponses] + +export type GetAppsByAppIdWorkflowRunsByRunIdNodeExecutionsData = { + body?: never + path: { + app_id: string + run_id: string + } + query?: never + url: '/apps/{app_id}/workflow-runs/{run_id}/node-executions' +} + +export type GetAppsByAppIdWorkflowRunsByRunIdNodeExecutionsErrors = { + 404: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdWorkflowRunsByRunIdNodeExecutionsError + = GetAppsByAppIdWorkflowRunsByRunIdNodeExecutionsErrors[keyof GetAppsByAppIdWorkflowRunsByRunIdNodeExecutionsErrors] + +export type GetAppsByAppIdWorkflowRunsByRunIdNodeExecutionsResponses = { + 200: WorkflowRunNodeExecutionList +} + +export type GetAppsByAppIdWorkflowRunsByRunIdNodeExecutionsResponse + = GetAppsByAppIdWorkflowRunsByRunIdNodeExecutionsResponses[keyof GetAppsByAppIdWorkflowRunsByRunIdNodeExecutionsResponses] + +export type GetAppsByAppIdWorkflowCommentsData = { + body?: never + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/workflow/comments' +} + +export type GetAppsByAppIdWorkflowCommentsResponses = { + 200: WorkflowCommentBasic +} + +export type GetAppsByAppIdWorkflowCommentsResponse + = GetAppsByAppIdWorkflowCommentsResponses[keyof GetAppsByAppIdWorkflowCommentsResponses] + +export type PostAppsByAppIdWorkflowCommentsData = { + body: WorkflowCommentCreatePayload + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/workflow/comments' +} + +export type PostAppsByAppIdWorkflowCommentsResponses = { + 201: WorkflowCommentCreate +} + +export type PostAppsByAppIdWorkflowCommentsResponse + = PostAppsByAppIdWorkflowCommentsResponses[keyof PostAppsByAppIdWorkflowCommentsResponses] + +export type GetAppsByAppIdWorkflowCommentsMentionUsersData = { + body?: never + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/workflow/comments/mention-users' +} + +export type GetAppsByAppIdWorkflowCommentsMentionUsersResponses = { + 200: WorkflowCommentMentionUsersPayload +} + +export type GetAppsByAppIdWorkflowCommentsMentionUsersResponse + = GetAppsByAppIdWorkflowCommentsMentionUsersResponses[keyof GetAppsByAppIdWorkflowCommentsMentionUsersResponses] + +export type DeleteAppsByAppIdWorkflowCommentsByCommentIdData = { + body?: never + path: { + app_id: string + comment_id: string + } + query?: never + url: '/apps/{app_id}/workflow/comments/{comment_id}' +} + +export type DeleteAppsByAppIdWorkflowCommentsByCommentIdResponses = { + 204: { + [key: string]: unknown + } +} + +export type DeleteAppsByAppIdWorkflowCommentsByCommentIdResponse + = DeleteAppsByAppIdWorkflowCommentsByCommentIdResponses[keyof DeleteAppsByAppIdWorkflowCommentsByCommentIdResponses] + +export type GetAppsByAppIdWorkflowCommentsByCommentIdData = { + body?: never + path: { + app_id: string + comment_id: string + } + query?: never + url: '/apps/{app_id}/workflow/comments/{comment_id}' +} + +export type GetAppsByAppIdWorkflowCommentsByCommentIdResponses = { + 200: WorkflowCommentDetail +} + +export type GetAppsByAppIdWorkflowCommentsByCommentIdResponse + = GetAppsByAppIdWorkflowCommentsByCommentIdResponses[keyof GetAppsByAppIdWorkflowCommentsByCommentIdResponses] + +export type PutAppsByAppIdWorkflowCommentsByCommentIdData = { + body: WorkflowCommentUpdatePayload + path: { + app_id: string + comment_id: string + } + query?: never + url: '/apps/{app_id}/workflow/comments/{comment_id}' +} + +export type PutAppsByAppIdWorkflowCommentsByCommentIdResponses = { + 200: WorkflowCommentUpdate +} + +export type PutAppsByAppIdWorkflowCommentsByCommentIdResponse + = PutAppsByAppIdWorkflowCommentsByCommentIdResponses[keyof PutAppsByAppIdWorkflowCommentsByCommentIdResponses] + +export type PostAppsByAppIdWorkflowCommentsByCommentIdRepliesData = { + body: WorkflowCommentReplyPayload + path: { + app_id: string + comment_id: string + } + query?: never + url: '/apps/{app_id}/workflow/comments/{comment_id}/replies' +} + +export type PostAppsByAppIdWorkflowCommentsByCommentIdRepliesResponses = { + 201: WorkflowCommentReplyCreate +} + +export type PostAppsByAppIdWorkflowCommentsByCommentIdRepliesResponse + = PostAppsByAppIdWorkflowCommentsByCommentIdRepliesResponses[keyof PostAppsByAppIdWorkflowCommentsByCommentIdRepliesResponses] + +export type DeleteAppsByAppIdWorkflowCommentsByCommentIdRepliesByReplyIdData = { + body?: never + path: { + app_id: string + comment_id: string + reply_id: string + } + query?: never + url: '/apps/{app_id}/workflow/comments/{comment_id}/replies/{reply_id}' +} + +export type DeleteAppsByAppIdWorkflowCommentsByCommentIdRepliesByReplyIdResponses = { + 204: { + [key: string]: unknown + } +} + +export type DeleteAppsByAppIdWorkflowCommentsByCommentIdRepliesByReplyIdResponse + = DeleteAppsByAppIdWorkflowCommentsByCommentIdRepliesByReplyIdResponses[keyof DeleteAppsByAppIdWorkflowCommentsByCommentIdRepliesByReplyIdResponses] + +export type PutAppsByAppIdWorkflowCommentsByCommentIdRepliesByReplyIdData = { + body: WorkflowCommentReplyPayload + path: { + app_id: string + comment_id: string + reply_id: string + } + query?: never + url: '/apps/{app_id}/workflow/comments/{comment_id}/replies/{reply_id}' +} + +export type PutAppsByAppIdWorkflowCommentsByCommentIdRepliesByReplyIdResponses = { + 200: WorkflowCommentReplyUpdate +} + +export type PutAppsByAppIdWorkflowCommentsByCommentIdRepliesByReplyIdResponse + = PutAppsByAppIdWorkflowCommentsByCommentIdRepliesByReplyIdResponses[keyof PutAppsByAppIdWorkflowCommentsByCommentIdRepliesByReplyIdResponses] + +export type PostAppsByAppIdWorkflowCommentsByCommentIdResolveData = { + body?: never + path: { + app_id: string + comment_id: string + } + query?: never + url: '/apps/{app_id}/workflow/comments/{comment_id}/resolve' +} + +export type PostAppsByAppIdWorkflowCommentsByCommentIdResolveResponses = { + 200: WorkflowCommentResolve +} + +export type PostAppsByAppIdWorkflowCommentsByCommentIdResolveResponse + = PostAppsByAppIdWorkflowCommentsByCommentIdResolveResponses[keyof PostAppsByAppIdWorkflowCommentsByCommentIdResolveResponses] + +export type GetAppsByAppIdWorkflowStatisticsAverageAppInteractionsData = { + body?: never + path: { + app_id: string + } + query?: { + end?: string | null + start?: string | null + } + url: '/apps/{app_id}/workflow/statistics/average-app-interactions' +} + +export type GetAppsByAppIdWorkflowStatisticsAverageAppInteractionsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdWorkflowStatisticsAverageAppInteractionsResponse + = GetAppsByAppIdWorkflowStatisticsAverageAppInteractionsResponses[keyof GetAppsByAppIdWorkflowStatisticsAverageAppInteractionsResponses] + +export type GetAppsByAppIdWorkflowStatisticsDailyConversationsData = { + body?: never + path: { + app_id: string + } + query?: { + end?: string | null + start?: string | null + } + url: '/apps/{app_id}/workflow/statistics/daily-conversations' +} + +export type GetAppsByAppIdWorkflowStatisticsDailyConversationsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdWorkflowStatisticsDailyConversationsResponse + = GetAppsByAppIdWorkflowStatisticsDailyConversationsResponses[keyof GetAppsByAppIdWorkflowStatisticsDailyConversationsResponses] + +export type GetAppsByAppIdWorkflowStatisticsDailyTerminalsData = { + body?: never + path: { + app_id: string + } + query?: { + end?: string | null + start?: string | null + } + url: '/apps/{app_id}/workflow/statistics/daily-terminals' +} + +export type GetAppsByAppIdWorkflowStatisticsDailyTerminalsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdWorkflowStatisticsDailyTerminalsResponse + = GetAppsByAppIdWorkflowStatisticsDailyTerminalsResponses[keyof GetAppsByAppIdWorkflowStatisticsDailyTerminalsResponses] + +export type GetAppsByAppIdWorkflowStatisticsTokenCostsData = { + body?: never + path: { + app_id: string + } + query?: { + end?: string | null + start?: string | null + } + url: '/apps/{app_id}/workflow/statistics/token-costs' +} + +export type GetAppsByAppIdWorkflowStatisticsTokenCostsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdWorkflowStatisticsTokenCostsResponse + = GetAppsByAppIdWorkflowStatisticsTokenCostsResponses[keyof GetAppsByAppIdWorkflowStatisticsTokenCostsResponses] + +export type GetAppsByAppIdWorkflowsData = { + body?: never + path: { + app_id: string + } + query?: { + limit?: number + named_only?: boolean + page?: number + user_id?: string | null + } + url: '/apps/{app_id}/workflows' +} + +export type GetAppsByAppIdWorkflowsResponses = { + 200: WorkflowPagination +} + +export type GetAppsByAppIdWorkflowsResponse + = GetAppsByAppIdWorkflowsResponses[keyof GetAppsByAppIdWorkflowsResponses] + +export type GetAppsByAppIdWorkflowsDefaultWorkflowBlockConfigsData = { + body?: never + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/workflows/default-workflow-block-configs' +} + +export type GetAppsByAppIdWorkflowsDefaultWorkflowBlockConfigsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdWorkflowsDefaultWorkflowBlockConfigsResponse + = GetAppsByAppIdWorkflowsDefaultWorkflowBlockConfigsResponses[keyof GetAppsByAppIdWorkflowsDefaultWorkflowBlockConfigsResponses] + +export type GetAppsByAppIdWorkflowsDefaultWorkflowBlockConfigsByBlockTypeData = { + body?: never + path: { + app_id: string + block_type: string + } + query?: { + q?: string | null + } + url: '/apps/{app_id}/workflows/default-workflow-block-configs/{block_type}' +} + +export type GetAppsByAppIdWorkflowsDefaultWorkflowBlockConfigsByBlockTypeErrors = { + 404: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdWorkflowsDefaultWorkflowBlockConfigsByBlockTypeError + = GetAppsByAppIdWorkflowsDefaultWorkflowBlockConfigsByBlockTypeErrors[keyof GetAppsByAppIdWorkflowsDefaultWorkflowBlockConfigsByBlockTypeErrors] + +export type GetAppsByAppIdWorkflowsDefaultWorkflowBlockConfigsByBlockTypeResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdWorkflowsDefaultWorkflowBlockConfigsByBlockTypeResponse + = GetAppsByAppIdWorkflowsDefaultWorkflowBlockConfigsByBlockTypeResponses[keyof GetAppsByAppIdWorkflowsDefaultWorkflowBlockConfigsByBlockTypeResponses] + +export type GetAppsByAppIdWorkflowsDraftData = { + body?: never + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/workflows/draft' +} + +export type GetAppsByAppIdWorkflowsDraftErrors = { + 404: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdWorkflowsDraftError + = GetAppsByAppIdWorkflowsDraftErrors[keyof GetAppsByAppIdWorkflowsDraftErrors] + +export type GetAppsByAppIdWorkflowsDraftResponses = { + 200: Workflow +} + +export type GetAppsByAppIdWorkflowsDraftResponse + = GetAppsByAppIdWorkflowsDraftResponses[keyof GetAppsByAppIdWorkflowsDraftResponses] + +export type PostAppsByAppIdWorkflowsDraftData = { + body: SyncDraftWorkflowPayload + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/workflows/draft' +} + +export type PostAppsByAppIdWorkflowsDraftErrors = { + 400: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdWorkflowsDraftError + = PostAppsByAppIdWorkflowsDraftErrors[keyof PostAppsByAppIdWorkflowsDraftErrors] + +export type PostAppsByAppIdWorkflowsDraftResponses = { + 200: SyncDraftWorkflowResponse +} + +export type PostAppsByAppIdWorkflowsDraftResponse + = PostAppsByAppIdWorkflowsDraftResponses[keyof PostAppsByAppIdWorkflowsDraftResponses] + +export type GetAppsByAppIdWorkflowsDraftConversationVariablesData = { + body?: never + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/workflows/draft/conversation-variables' +} + +export type GetAppsByAppIdWorkflowsDraftConversationVariablesErrors = { + 404: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdWorkflowsDraftConversationVariablesError + = GetAppsByAppIdWorkflowsDraftConversationVariablesErrors[keyof GetAppsByAppIdWorkflowsDraftConversationVariablesErrors] + +export type GetAppsByAppIdWorkflowsDraftConversationVariablesResponses = { + 200: WorkflowDraftVariableList +} + +export type GetAppsByAppIdWorkflowsDraftConversationVariablesResponse + = GetAppsByAppIdWorkflowsDraftConversationVariablesResponses[keyof GetAppsByAppIdWorkflowsDraftConversationVariablesResponses] + +export type PostAppsByAppIdWorkflowsDraftConversationVariablesData = { + body: ConversationVariableUpdatePayload + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/workflows/draft/conversation-variables' +} + +export type PostAppsByAppIdWorkflowsDraftConversationVariablesResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdWorkflowsDraftConversationVariablesResponse + = PostAppsByAppIdWorkflowsDraftConversationVariablesResponses[keyof PostAppsByAppIdWorkflowsDraftConversationVariablesResponses] + +export type GetAppsByAppIdWorkflowsDraftEnvironmentVariablesData = { + body?: never + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/workflows/draft/environment-variables' +} + +export type GetAppsByAppIdWorkflowsDraftEnvironmentVariablesErrors = { + 404: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdWorkflowsDraftEnvironmentVariablesError + = GetAppsByAppIdWorkflowsDraftEnvironmentVariablesErrors[keyof GetAppsByAppIdWorkflowsDraftEnvironmentVariablesErrors] + +export type GetAppsByAppIdWorkflowsDraftEnvironmentVariablesResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdWorkflowsDraftEnvironmentVariablesResponse + = GetAppsByAppIdWorkflowsDraftEnvironmentVariablesResponses[keyof GetAppsByAppIdWorkflowsDraftEnvironmentVariablesResponses] + +export type PostAppsByAppIdWorkflowsDraftEnvironmentVariablesData = { + body: EnvironmentVariableUpdatePayload + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/workflows/draft/environment-variables' +} + +export type PostAppsByAppIdWorkflowsDraftEnvironmentVariablesResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdWorkflowsDraftEnvironmentVariablesResponse + = PostAppsByAppIdWorkflowsDraftEnvironmentVariablesResponses[keyof PostAppsByAppIdWorkflowsDraftEnvironmentVariablesResponses] + +export type PostAppsByAppIdWorkflowsDraftFeaturesData = { + body: WorkflowFeaturesPayload + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/workflows/draft/features' +} + +export type PostAppsByAppIdWorkflowsDraftFeaturesResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdWorkflowsDraftFeaturesResponse + = PostAppsByAppIdWorkflowsDraftFeaturesResponses[keyof PostAppsByAppIdWorkflowsDraftFeaturesResponses] + +export type PostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdDeliveryTestData = { + body: HumanInputDeliveryTestPayload + path: { + app_id: string + node_id: string + } + query?: never + url: '/apps/{app_id}/workflows/draft/human-input/nodes/{node_id}/delivery-test' +} + +export type PostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdDeliveryTestResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdDeliveryTestResponse + = PostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdDeliveryTestResponses[keyof PostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdDeliveryTestResponses] + +export type PostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdFormPreviewData = { + body: HumanInputFormPreviewPayload + path: { + app_id: string + node_id: string + } + query?: never + url: '/apps/{app_id}/workflows/draft/human-input/nodes/{node_id}/form/preview' +} + +export type PostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdFormPreviewResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdFormPreviewResponse + = PostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdFormPreviewResponses[keyof PostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdFormPreviewResponses] + +export type PostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdFormRunData = { + body: HumanInputFormSubmitPayload + path: { + app_id: string + node_id: string + } + query?: never + url: '/apps/{app_id}/workflows/draft/human-input/nodes/{node_id}/form/run' +} + +export type PostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdFormRunResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdFormRunResponse + = PostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdFormRunResponses[keyof PostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdFormRunResponses] + +export type PostAppsByAppIdWorkflowsDraftIterationNodesByNodeIdRunData = { + body: IterationNodeRunPayload + path: { + app_id: string + node_id: string + } + query?: never + url: '/apps/{app_id}/workflows/draft/iteration/nodes/{node_id}/run' +} + +export type PostAppsByAppIdWorkflowsDraftIterationNodesByNodeIdRunErrors = { + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdWorkflowsDraftIterationNodesByNodeIdRunError + = PostAppsByAppIdWorkflowsDraftIterationNodesByNodeIdRunErrors[keyof PostAppsByAppIdWorkflowsDraftIterationNodesByNodeIdRunErrors] + +export type PostAppsByAppIdWorkflowsDraftIterationNodesByNodeIdRunResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdWorkflowsDraftIterationNodesByNodeIdRunResponse + = PostAppsByAppIdWorkflowsDraftIterationNodesByNodeIdRunResponses[keyof PostAppsByAppIdWorkflowsDraftIterationNodesByNodeIdRunResponses] + +export type PostAppsByAppIdWorkflowsDraftLoopNodesByNodeIdRunData = { + body: LoopNodeRunPayload + path: { + app_id: string + node_id: string + } + query?: never + url: '/apps/{app_id}/workflows/draft/loop/nodes/{node_id}/run' +} + +export type PostAppsByAppIdWorkflowsDraftLoopNodesByNodeIdRunErrors = { + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdWorkflowsDraftLoopNodesByNodeIdRunError + = PostAppsByAppIdWorkflowsDraftLoopNodesByNodeIdRunErrors[keyof PostAppsByAppIdWorkflowsDraftLoopNodesByNodeIdRunErrors] + +export type PostAppsByAppIdWorkflowsDraftLoopNodesByNodeIdRunResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdWorkflowsDraftLoopNodesByNodeIdRunResponse + = PostAppsByAppIdWorkflowsDraftLoopNodesByNodeIdRunResponses[keyof PostAppsByAppIdWorkflowsDraftLoopNodesByNodeIdRunResponses] + +export type GetAppsByAppIdWorkflowsDraftNodesByNodeIdLastRunData = { + body?: never + path: { + app_id: string + node_id: string + } + query?: never + url: '/apps/{app_id}/workflows/draft/nodes/{node_id}/last-run' +} + +export type GetAppsByAppIdWorkflowsDraftNodesByNodeIdLastRunErrors = { + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdWorkflowsDraftNodesByNodeIdLastRunError + = GetAppsByAppIdWorkflowsDraftNodesByNodeIdLastRunErrors[keyof GetAppsByAppIdWorkflowsDraftNodesByNodeIdLastRunErrors] + +export type GetAppsByAppIdWorkflowsDraftNodesByNodeIdLastRunResponses = { + 200: WorkflowRunNodeExecution +} + +export type GetAppsByAppIdWorkflowsDraftNodesByNodeIdLastRunResponse + = GetAppsByAppIdWorkflowsDraftNodesByNodeIdLastRunResponses[keyof GetAppsByAppIdWorkflowsDraftNodesByNodeIdLastRunResponses] + +export type PostAppsByAppIdWorkflowsDraftNodesByNodeIdRunData = { + body: DraftWorkflowNodeRunPayload + path: { + app_id: string + node_id: string + } + query?: never + url: '/apps/{app_id}/workflows/draft/nodes/{node_id}/run' +} + +export type PostAppsByAppIdWorkflowsDraftNodesByNodeIdRunErrors = { + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdWorkflowsDraftNodesByNodeIdRunError + = PostAppsByAppIdWorkflowsDraftNodesByNodeIdRunErrors[keyof PostAppsByAppIdWorkflowsDraftNodesByNodeIdRunErrors] + +export type PostAppsByAppIdWorkflowsDraftNodesByNodeIdRunResponses = { + 200: WorkflowRunNodeExecution +} + +export type PostAppsByAppIdWorkflowsDraftNodesByNodeIdRunResponse + = PostAppsByAppIdWorkflowsDraftNodesByNodeIdRunResponses[keyof PostAppsByAppIdWorkflowsDraftNodesByNodeIdRunResponses] + +export type PostAppsByAppIdWorkflowsDraftNodesByNodeIdTriggerRunData = { + body?: never + path: { + app_id: string + node_id: string + } + query?: never + url: '/apps/{app_id}/workflows/draft/nodes/{node_id}/trigger/run' +} + +export type PostAppsByAppIdWorkflowsDraftNodesByNodeIdTriggerRunErrors = { + 403: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdWorkflowsDraftNodesByNodeIdTriggerRunError + = PostAppsByAppIdWorkflowsDraftNodesByNodeIdTriggerRunErrors[keyof PostAppsByAppIdWorkflowsDraftNodesByNodeIdTriggerRunErrors] + +export type PostAppsByAppIdWorkflowsDraftNodesByNodeIdTriggerRunResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdWorkflowsDraftNodesByNodeIdTriggerRunResponse + = PostAppsByAppIdWorkflowsDraftNodesByNodeIdTriggerRunResponses[keyof PostAppsByAppIdWorkflowsDraftNodesByNodeIdTriggerRunResponses] + +export type DeleteAppsByAppIdWorkflowsDraftNodesByNodeIdVariablesData = { + body?: never + path: { + node_id: string + app_id: string + } + query?: never + url: '/apps/{app_id}/workflows/draft/nodes/{node_id}/variables' +} + +export type DeleteAppsByAppIdWorkflowsDraftNodesByNodeIdVariablesResponses = { + 204: { + [key: string]: unknown + } +} + +export type DeleteAppsByAppIdWorkflowsDraftNodesByNodeIdVariablesResponse + = DeleteAppsByAppIdWorkflowsDraftNodesByNodeIdVariablesResponses[keyof DeleteAppsByAppIdWorkflowsDraftNodesByNodeIdVariablesResponses] + +export type GetAppsByAppIdWorkflowsDraftNodesByNodeIdVariablesData = { + body?: never + path: { + app_id: string + node_id: string + } + query?: never + url: '/apps/{app_id}/workflows/draft/nodes/{node_id}/variables' +} + +export type GetAppsByAppIdWorkflowsDraftNodesByNodeIdVariablesResponses = { + 200: WorkflowDraftVariableList +} + +export type GetAppsByAppIdWorkflowsDraftNodesByNodeIdVariablesResponse + = GetAppsByAppIdWorkflowsDraftNodesByNodeIdVariablesResponses[keyof GetAppsByAppIdWorkflowsDraftNodesByNodeIdVariablesResponses] + +export type PostAppsByAppIdWorkflowsDraftRunData = { + body: DraftWorkflowRunPayload + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/workflows/draft/run' +} + +export type PostAppsByAppIdWorkflowsDraftRunErrors = { + 403: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdWorkflowsDraftRunError + = PostAppsByAppIdWorkflowsDraftRunErrors[keyof PostAppsByAppIdWorkflowsDraftRunErrors] + +export type PostAppsByAppIdWorkflowsDraftRunResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdWorkflowsDraftRunResponse + = PostAppsByAppIdWorkflowsDraftRunResponses[keyof PostAppsByAppIdWorkflowsDraftRunResponses] + +export type GetAppsByAppIdWorkflowsDraftSystemVariablesData = { + body?: never + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/workflows/draft/system-variables' +} + +export type GetAppsByAppIdWorkflowsDraftSystemVariablesResponses = { + 200: WorkflowDraftVariableList +} + +export type GetAppsByAppIdWorkflowsDraftSystemVariablesResponse + = GetAppsByAppIdWorkflowsDraftSystemVariablesResponses[keyof GetAppsByAppIdWorkflowsDraftSystemVariablesResponses] + +export type PostAppsByAppIdWorkflowsDraftTriggerRunData = { + body: DraftWorkflowTriggerRunRequest + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/workflows/draft/trigger/run' +} + +export type PostAppsByAppIdWorkflowsDraftTriggerRunErrors = { + 403: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdWorkflowsDraftTriggerRunError + = PostAppsByAppIdWorkflowsDraftTriggerRunErrors[keyof PostAppsByAppIdWorkflowsDraftTriggerRunErrors] + +export type PostAppsByAppIdWorkflowsDraftTriggerRunResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdWorkflowsDraftTriggerRunResponse + = PostAppsByAppIdWorkflowsDraftTriggerRunResponses[keyof PostAppsByAppIdWorkflowsDraftTriggerRunResponses] + +export type PostAppsByAppIdWorkflowsDraftTriggerRunAllData = { + body: DraftWorkflowTriggerRunAllPayload + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/workflows/draft/trigger/run-all' +} + +export type PostAppsByAppIdWorkflowsDraftTriggerRunAllErrors = { + 403: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdWorkflowsDraftTriggerRunAllError + = PostAppsByAppIdWorkflowsDraftTriggerRunAllErrors[keyof PostAppsByAppIdWorkflowsDraftTriggerRunAllErrors] + +export type PostAppsByAppIdWorkflowsDraftTriggerRunAllResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdWorkflowsDraftTriggerRunAllResponse + = PostAppsByAppIdWorkflowsDraftTriggerRunAllResponses[keyof PostAppsByAppIdWorkflowsDraftTriggerRunAllResponses] + +export type DeleteAppsByAppIdWorkflowsDraftVariablesData = { + body?: never + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/workflows/draft/variables' +} + +export type DeleteAppsByAppIdWorkflowsDraftVariablesResponses = { + 204: { + [key: string]: unknown + } +} + +export type DeleteAppsByAppIdWorkflowsDraftVariablesResponse + = DeleteAppsByAppIdWorkflowsDraftVariablesResponses[keyof DeleteAppsByAppIdWorkflowsDraftVariablesResponses] + +export type GetAppsByAppIdWorkflowsDraftVariablesData = { + body?: never + path: { + app_id: string + } + query?: { + page?: number + limit?: number + } + url: '/apps/{app_id}/workflows/draft/variables' +} + +export type GetAppsByAppIdWorkflowsDraftVariablesResponses = { + 200: WorkflowDraftVariableListWithoutValue +} + +export type GetAppsByAppIdWorkflowsDraftVariablesResponse + = GetAppsByAppIdWorkflowsDraftVariablesResponses[keyof GetAppsByAppIdWorkflowsDraftVariablesResponses] + +export type DeleteAppsByAppIdWorkflowsDraftVariablesByVariableIdData = { + body?: never + path: { + variable_id: string + app_id: string + } + query?: never + url: '/apps/{app_id}/workflows/draft/variables/{variable_id}' +} + +export type DeleteAppsByAppIdWorkflowsDraftVariablesByVariableIdErrors = { + 404: { + [key: string]: unknown + } +} + +export type DeleteAppsByAppIdWorkflowsDraftVariablesByVariableIdError + = DeleteAppsByAppIdWorkflowsDraftVariablesByVariableIdErrors[keyof DeleteAppsByAppIdWorkflowsDraftVariablesByVariableIdErrors] + +export type DeleteAppsByAppIdWorkflowsDraftVariablesByVariableIdResponses = { + 204: { + [key: string]: unknown + } +} + +export type DeleteAppsByAppIdWorkflowsDraftVariablesByVariableIdResponse + = DeleteAppsByAppIdWorkflowsDraftVariablesByVariableIdResponses[keyof DeleteAppsByAppIdWorkflowsDraftVariablesByVariableIdResponses] + +export type GetAppsByAppIdWorkflowsDraftVariablesByVariableIdData = { + body?: never + path: { + app_id: string + variable_id: string + } + query?: never + url: '/apps/{app_id}/workflows/draft/variables/{variable_id}' +} + +export type GetAppsByAppIdWorkflowsDraftVariablesByVariableIdErrors = { + 404: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdWorkflowsDraftVariablesByVariableIdError + = GetAppsByAppIdWorkflowsDraftVariablesByVariableIdErrors[keyof GetAppsByAppIdWorkflowsDraftVariablesByVariableIdErrors] + +export type GetAppsByAppIdWorkflowsDraftVariablesByVariableIdResponses = { + 200: WorkflowDraftVariable +} + +export type GetAppsByAppIdWorkflowsDraftVariablesByVariableIdResponse + = GetAppsByAppIdWorkflowsDraftVariablesByVariableIdResponses[keyof GetAppsByAppIdWorkflowsDraftVariablesByVariableIdResponses] + +export type PatchAppsByAppIdWorkflowsDraftVariablesByVariableIdData = { + body: WorkflowDraftVariableUpdatePayload + path: { + variable_id: string + app_id: string + } + query?: never + url: '/apps/{app_id}/workflows/draft/variables/{variable_id}' +} + +export type PatchAppsByAppIdWorkflowsDraftVariablesByVariableIdErrors = { + 404: { + [key: string]: unknown + } +} + +export type PatchAppsByAppIdWorkflowsDraftVariablesByVariableIdError + = PatchAppsByAppIdWorkflowsDraftVariablesByVariableIdErrors[keyof PatchAppsByAppIdWorkflowsDraftVariablesByVariableIdErrors] + +export type PatchAppsByAppIdWorkflowsDraftVariablesByVariableIdResponses = { + 200: WorkflowDraftVariable +} + +export type PatchAppsByAppIdWorkflowsDraftVariablesByVariableIdResponse + = PatchAppsByAppIdWorkflowsDraftVariablesByVariableIdResponses[keyof PatchAppsByAppIdWorkflowsDraftVariablesByVariableIdResponses] + +export type PutAppsByAppIdWorkflowsDraftVariablesByVariableIdResetData = { + body?: never + path: { + app_id: string + variable_id: string + } + query?: never + url: '/apps/{app_id}/workflows/draft/variables/{variable_id}/reset' +} + +export type PutAppsByAppIdWorkflowsDraftVariablesByVariableIdResetErrors = { + 404: { + [key: string]: unknown + } +} + +export type PutAppsByAppIdWorkflowsDraftVariablesByVariableIdResetError + = PutAppsByAppIdWorkflowsDraftVariablesByVariableIdResetErrors[keyof PutAppsByAppIdWorkflowsDraftVariablesByVariableIdResetErrors] + +export type PutAppsByAppIdWorkflowsDraftVariablesByVariableIdResetResponses = { + 200: WorkflowDraftVariable + 204: { + [key: string]: unknown + } +} + +export type PutAppsByAppIdWorkflowsDraftVariablesByVariableIdResetResponse + = PutAppsByAppIdWorkflowsDraftVariablesByVariableIdResetResponses[keyof PutAppsByAppIdWorkflowsDraftVariablesByVariableIdResetResponses] + +export type GetAppsByAppIdWorkflowsPublishData = { + body?: never + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/workflows/publish' +} + +export type GetAppsByAppIdWorkflowsPublishErrors = { + 404: { + [key: string]: unknown + } +} + +export type GetAppsByAppIdWorkflowsPublishError + = GetAppsByAppIdWorkflowsPublishErrors[keyof GetAppsByAppIdWorkflowsPublishErrors] + +export type GetAppsByAppIdWorkflowsPublishResponses = { + 200: Workflow +} + +export type GetAppsByAppIdWorkflowsPublishResponse + = GetAppsByAppIdWorkflowsPublishResponses[keyof GetAppsByAppIdWorkflowsPublishResponses] + +export type PostAppsByAppIdWorkflowsPublishData = { + body: PublishWorkflowPayload + path: { + app_id: string + } + query?: never + url: '/apps/{app_id}/workflows/publish' +} + +export type PostAppsByAppIdWorkflowsPublishResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdWorkflowsPublishResponse + = PostAppsByAppIdWorkflowsPublishResponses[keyof PostAppsByAppIdWorkflowsPublishResponses] + +export type GetAppsByAppIdWorkflowsTriggersWebhookData = { + body?: never + path: { + app_id: string + } + query: { + credential_id?: string | null + datasource_type: string + inputs: string + } + url: '/apps/{app_id}/workflows/triggers/webhook' +} + +export type GetAppsByAppIdWorkflowsTriggersWebhookResponses = { + 200: WebhookTriggerResponse +} + +export type GetAppsByAppIdWorkflowsTriggersWebhookResponse + = GetAppsByAppIdWorkflowsTriggersWebhookResponses[keyof GetAppsByAppIdWorkflowsTriggersWebhookResponses] + +export type DeleteAppsByAppIdWorkflowsByWorkflowIdData = { + body?: never + path: { + workflow_id: string + app_id: string + } + query?: never + url: '/apps/{app_id}/workflows/{workflow_id}' +} + +export type DeleteAppsByAppIdWorkflowsByWorkflowIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type DeleteAppsByAppIdWorkflowsByWorkflowIdResponse + = DeleteAppsByAppIdWorkflowsByWorkflowIdResponses[keyof DeleteAppsByAppIdWorkflowsByWorkflowIdResponses] + +export type PatchAppsByAppIdWorkflowsByWorkflowIdData = { + body: WorkflowUpdatePayload + path: { + app_id: string + workflow_id: string + } + query?: never + url: '/apps/{app_id}/workflows/{workflow_id}' +} + +export type PatchAppsByAppIdWorkflowsByWorkflowIdErrors = { + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PatchAppsByAppIdWorkflowsByWorkflowIdError + = PatchAppsByAppIdWorkflowsByWorkflowIdErrors[keyof PatchAppsByAppIdWorkflowsByWorkflowIdErrors] + +export type PatchAppsByAppIdWorkflowsByWorkflowIdResponses = { + 200: Workflow +} + +export type PatchAppsByAppIdWorkflowsByWorkflowIdResponse + = PatchAppsByAppIdWorkflowsByWorkflowIdResponses[keyof PatchAppsByAppIdWorkflowsByWorkflowIdResponses] + +export type PostAppsByAppIdWorkflowsByWorkflowIdRestoreData = { + body?: never + path: { + app_id: string + workflow_id: string + } + query?: never + url: '/apps/{app_id}/workflows/{workflow_id}/restore' +} + +export type PostAppsByAppIdWorkflowsByWorkflowIdRestoreErrors = { + 400: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdWorkflowsByWorkflowIdRestoreError + = PostAppsByAppIdWorkflowsByWorkflowIdRestoreErrors[keyof PostAppsByAppIdWorkflowsByWorkflowIdRestoreErrors] + +export type PostAppsByAppIdWorkflowsByWorkflowIdRestoreResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsByAppIdWorkflowsByWorkflowIdRestoreResponse + = PostAppsByAppIdWorkflowsByWorkflowIdRestoreResponses[keyof PostAppsByAppIdWorkflowsByWorkflowIdRestoreResponses] + +export type GetAppsByResourceIdApiKeysData = { + body?: never + path: { + resource_id: string + } + query?: never + url: '/apps/{resource_id}/api-keys' +} + +export type GetAppsByResourceIdApiKeysResponses = { + 200: ApiKeyList +} + +export type GetAppsByResourceIdApiKeysResponse + = GetAppsByResourceIdApiKeysResponses[keyof GetAppsByResourceIdApiKeysResponses] + +export type PostAppsByResourceIdApiKeysData = { + body?: never + path: { + resource_id: string + } + query?: never + url: '/apps/{resource_id}/api-keys' +} + +export type PostAppsByResourceIdApiKeysErrors = { + 400: { + [key: string]: unknown + } +} + +export type PostAppsByResourceIdApiKeysError + = PostAppsByResourceIdApiKeysErrors[keyof PostAppsByResourceIdApiKeysErrors] + +export type PostAppsByResourceIdApiKeysResponses = { + 201: ApiKeyItem +} + +export type PostAppsByResourceIdApiKeysResponse + = PostAppsByResourceIdApiKeysResponses[keyof PostAppsByResourceIdApiKeysResponses] + +export type DeleteAppsByResourceIdApiKeysByApiKeyIdData = { + body?: never + path: { + resource_id: string + api_key_id: string + } + query?: never + url: '/apps/{resource_id}/api-keys/{api_key_id}' +} + +export type DeleteAppsByResourceIdApiKeysByApiKeyIdResponses = { + 204: { + [key: string]: unknown + } +} + +export type DeleteAppsByResourceIdApiKeysByApiKeyIdResponse + = DeleteAppsByResourceIdApiKeysByApiKeyIdResponses[keyof DeleteAppsByResourceIdApiKeysByApiKeyIdResponses] + +export type GetAppsByServerIdServerRefreshData = { + body?: never + path: { + server_id: string + } + query?: never + url: '/apps/{server_id}/server/refresh' +} + +export type GetAppsByServerIdServerRefreshErrors = { + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type GetAppsByServerIdServerRefreshError + = GetAppsByServerIdServerRefreshErrors[keyof GetAppsByServerIdServerRefreshErrors] + +export type GetAppsByServerIdServerRefreshResponses = { + 200: AppMcpServerResponse +} + +export type GetAppsByServerIdServerRefreshResponse + = GetAppsByServerIdServerRefreshResponses[keyof GetAppsByServerIdServerRefreshResponses] diff --git a/packages/contracts/generated/api/console/apps/zod.gen.ts b/packages/contracts/generated/api/console/apps/zod.gen.ts new file mode 100644 index 0000000000..9798d22cc0 --- /dev/null +++ b/packages/contracts/generated/api/console/apps/zod.gen.ts @@ -0,0 +1,3133 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * AppImportPayload + */ +export const zAppImportPayload = z.object({ + app_id: z.string().nullish(), + description: z.string().nullish(), + icon: z.string().nullish(), + icon_background: z.string().nullish(), + icon_type: z.string().nullish(), + mode: z.string(), + name: z.string().nullish(), + yaml_content: z.string().nullish(), + yaml_url: z.string().nullish(), +}) + +export const zAdvancedChatWorkflowRunPagination = z.record(z.string(), z.unknown()) + +export const zWorkflowRunCount = z.record(z.string(), z.unknown()) + +/** + * HumanInputFormPreviewPayload + */ +export const zHumanInputFormPreviewPayload = z.object({ + inputs: z.record(z.string(), z.unknown()).optional(), +}) + +/** + * HumanInputFormSubmitPayload + */ +export const zHumanInputFormSubmitPayload = z.object({ + action: z.string(), + form_inputs: z.record(z.string(), z.unknown()), + inputs: z.record(z.string(), z.unknown()), +}) + +/** + * IterationNodeRunPayload + */ +export const zIterationNodeRunPayload = z.object({ + inputs: z.record(z.string(), z.unknown()).nullish(), +}) + +/** + * LoopNodeRunPayload + */ +export const zLoopNodeRunPayload = z.object({ + inputs: z.record(z.string(), z.unknown()).nullish(), +}) + +/** + * AdvancedChatWorkflowRunPayload + */ +export const zAdvancedChatWorkflowRunPayload = z.object({ + conversation_id: z.string().nullish(), + files: z.array(z.record(z.string(), z.unknown())).nullish(), + inputs: z.record(z.string(), z.unknown()).nullish(), + parent_message_id: z.string().nullish(), + query: z.string().optional().default(''), +}) + +/** + * AnnotationReplyPayload + */ +export const zAnnotationReplyPayload = z.object({ + embedding_model_name: z.string(), + embedding_provider_name: z.string(), + score_threshold: z.number(), +}) + +/** + * AnnotationSettingUpdatePayload + */ +export const zAnnotationSettingUpdatePayload = z.object({ + score_threshold: z.number(), +}) + +/** + * CreateAnnotationPayload + */ +export const zCreateAnnotationPayload = z.object({ + annotation_reply: z.record(z.string(), z.unknown()).nullish(), + answer: z.string().nullish(), + content: z.string().nullish(), + message_id: z.string().nullish(), + question: z.string().nullish(), +}) + +/** + * Annotation + */ +export const zAnnotation = z.object({ + content: z.string().nullish(), + created_at: z.int().nullish(), + hit_count: z.int().nullish(), + id: z.string(), + question: z.string().nullish(), +}) + +/** + * AnnotationCountResponse + */ +export const zAnnotationCountResponse = z.object({ + count: z.int(), +}) + +/** + * AnnotationExportList + */ +export const zAnnotationExportList = z.object({ + data: z.array(zAnnotation), +}) + +/** + * UpdateAnnotationPayload + */ +export const zUpdateAnnotationPayload = z.object({ + annotation_reply: z.record(z.string(), z.unknown()).nullish(), + answer: z.string().nullish(), + content: z.string().nullish(), + question: z.string().nullish(), +}) + +/** + * AppApiStatusPayload + */ +export const zAppApiStatusPayload = z.object({ + enable_api: z.boolean(), +}) + +/** + * AudioTranscriptResponse + */ +export const zAudioTranscriptResponse = z.object({ + text: z.string(), +}) + +/** + * SuggestedQuestionsResponse + */ +export const zSuggestedQuestionsResponse = z.object({ + data: z.array(z.string()), +}) + +/** + * CompletionMessagePayload + */ +export const zCompletionMessagePayload = z.object({ + files: z.array(z.unknown()).nullish(), + inputs: z.record(z.string(), z.unknown()), + model_config: z.record(z.string(), z.unknown()), + query: z.string().optional().default(''), + response_mode: z.enum(['blocking', 'streaming']).optional().default('blocking'), + retriever_from: z.string().optional().default('dev'), +}) + +/** + * ConvertToWorkflowPayload + */ +export const zConvertToWorkflowPayload = z.object({ + icon: z.string().nullish(), + icon_background: z.string().nullish(), + icon_type: z.string().nullish(), + name: z.string().nullish(), +}) + +/** + * AppExportResponse + */ +export const zAppExportResponse = z.object({ + data: z.string(), +}) + +/** + * MessageFeedbackPayload + */ +export const zMessageFeedbackPayload = z.object({ + content: z.string().nullish(), + message_id: z.string(), + rating: z.enum(['like', 'dislike']).nullish(), +}) + +/** + * ModelConfigRequest + */ +export const zModelConfigRequest = z.object({ + agent_mode: z.record(z.string(), z.unknown()).nullish(), + configs: z.record(z.string(), z.unknown()).nullish(), + dataset_configs: z.record(z.string(), z.unknown()).nullish(), + model: z.string().nullish(), + more_like_this: z.record(z.string(), z.unknown()).nullish(), + opening_statement: z.string().nullish(), + provider: z.string().nullish(), + retrieval_model: z.record(z.string(), z.unknown()).nullish(), + speech_to_text: z.record(z.string(), z.unknown()).nullish(), + suggested_questions: z.array(z.string()).nullish(), + text_to_speech: z.record(z.string(), z.unknown()).nullish(), + tools: z.array(z.record(z.string(), z.unknown())).nullish(), +}) + +/** + * AppNamePayload + */ +export const zAppNamePayload = z.object({ + name: z.string().min(1), +}) + +/** + * MCPServerCreatePayload + */ +export const zMcpServerCreatePayload = z.object({ + description: z.string().nullish(), + parameters: z.record(z.string(), z.unknown()), +}) + +/** + * MCPServerUpdatePayload + */ +export const zMcpServerUpdatePayload = z.object({ + description: z.string().nullish(), + id: z.string(), + parameters: z.record(z.string(), z.unknown()), + status: z.string().nullish(), +}) + +/** + * AppSiteUpdatePayload + */ +export const zAppSiteUpdatePayload = z.object({ + chat_color_theme: z.string().nullish(), + chat_color_theme_inverted: z.boolean().nullish(), + copyright: z.string().nullish(), + custom_disclaimer: z.string().nullish(), + customize_domain: z.string().nullish(), + customize_token_strategy: z.enum(['must', 'allow', 'not_allow']).nullish(), + default_language: z.string().nullish(), + description: z.string().nullish(), + icon: z.string().nullish(), + icon_background: z.string().nullish(), + icon_type: z.string().nullish(), + privacy_policy: z.string().nullish(), + prompt_public: z.boolean().nullish(), + show_workflow_steps: z.boolean().nullish(), + title: z.string().nullish(), + use_icon_as_answer_icon: z.boolean().nullish(), +}) + +/** + * AppSiteResponse + */ +export const zAppSiteResponse = z.object({ + app_id: z.string(), + code: z.string().nullish(), + copyright: z.string().nullish(), + custom_disclaimer: z.string().nullish(), + customize_domain: z.string().nullish(), + customize_token_strategy: z.string(), + default_language: z.string(), + description: z.string().nullish(), + icon: z.string().nullish(), + icon_background: z.string().nullish(), + privacy_policy: z.string().nullish(), + prompt_public: z.boolean(), + show_workflow_steps: z.boolean(), + title: z.string(), + use_icon_as_answer_icon: z.boolean(), +}) + +/** + * AppSiteStatusPayload + */ +export const zAppSiteStatusPayload = z.object({ + enable_site: z.boolean(), +}) + +/** + * TextToSpeechPayload + */ +export const zTextToSpeechPayload = z.object({ + message_id: z.string().nullish(), + streaming: z.boolean().nullish(), + text: z.string(), + voice: z.string().nullish(), +}) + +/** + * AppTracePayload + */ +export const zAppTracePayload = z.object({ + enabled: z.boolean(), + tracing_provider: z.string().nullish(), +}) + +/** + * TraceProviderQuery + */ +export const zTraceProviderQuery = z.object({ + tracing_provider: z.string(), +}) + +/** + * TraceConfigPayload + */ +export const zTraceConfigPayload = z.object({ + tracing_config: z.record(z.string(), z.unknown()), + tracing_provider: z.string(), +}) + +/** + * ParserEnable + */ +export const zParserEnable = z.object({ + enable_trigger: z.boolean(), + trigger_id: z.string(), +}) + +/** + * WorkflowTriggerResponse + */ +export const zWorkflowTriggerResponse = z.object({ + created_at: z.iso.datetime().nullish(), + icon: z.string(), + id: z.string(), + node_id: z.string(), + provider_name: z.string(), + status: z.string(), + title: z.string(), + trigger_type: z.string(), + updated_at: z.iso.datetime().nullish(), +}) + +/** + * WorkflowTriggerListResponse + */ +export const zWorkflowTriggerListResponse = z.object({ + data: z.array(zWorkflowTriggerResponse), +}) + +export const zWorkflowRunPagination = z.record(z.string(), z.unknown()) + +export const zWorkflowRunDetail = z.record(z.string(), z.unknown()) + +export const zWorkflowRunExport = z.record(z.string(), z.unknown()) + +export const zWorkflowRunNodeExecutionList = z.record(z.string(), z.unknown()) + +export const zWorkflowCommentBasic = z.record(z.string(), z.unknown()) + +/** + * WorkflowCommentCreatePayload + */ +export const zWorkflowCommentCreatePayload = z.object({ + content: z.string(), + mentioned_user_ids: z.array(z.string()).optional(), + position_x: z.number(), + position_y: z.number(), +}) + +export const zWorkflowCommentCreate = z.record(z.string(), z.unknown()) + +export const zWorkflowCommentDetail = z.record(z.string(), z.unknown()) + +/** + * WorkflowCommentUpdatePayload + */ +export const zWorkflowCommentUpdatePayload = z.object({ + content: z.string(), + mentioned_user_ids: z.array(z.string()).nullish(), + position_x: z.number().nullish(), + position_y: z.number().nullish(), +}) + +export const zWorkflowCommentUpdate = z.record(z.string(), z.unknown()) + +/** + * WorkflowCommentReplyPayload + */ +export const zWorkflowCommentReplyPayload = z.object({ + content: z.string(), + mentioned_user_ids: z.array(z.string()).optional(), +}) + +export const zWorkflowCommentReplyCreate = z.record(z.string(), z.unknown()) + +export const zWorkflowCommentReplyUpdate = z.record(z.string(), z.unknown()) + +export const zWorkflowCommentResolve = z.record(z.string(), z.unknown()) + +export const zWorkflowPagination = z.record(z.string(), z.unknown()) + +export const zWorkflow = z.record(z.string(), z.unknown()) + +/** + * SyncDraftWorkflowPayload + */ +export const zSyncDraftWorkflowPayload = z.object({ + conversation_variables: z.array(z.record(z.string(), z.unknown())).optional(), + environment_variables: z.array(z.record(z.string(), z.unknown())).optional(), + features: z.record(z.string(), z.unknown()), + graph: z.record(z.string(), z.unknown()), + hash: z.string().nullish(), +}) + +export const zSyncDraftWorkflowResponse = z.record(z.string(), z.unknown()) + +export const zWorkflowDraftVariableList = z.record(z.string(), z.unknown()) + +/** + * ConversationVariableUpdatePayload + */ +export const zConversationVariableUpdatePayload = z.object({ + conversation_variables: z.array(z.record(z.string(), z.unknown())), +}) + +/** + * EnvironmentVariableUpdatePayload + */ +export const zEnvironmentVariableUpdatePayload = z.object({ + environment_variables: z.array(z.record(z.string(), z.unknown())), +}) + +/** + * WorkflowFeaturesPayload + */ +export const zWorkflowFeaturesPayload = z.object({ + features: z.record(z.string(), z.unknown()), +}) + +/** + * HumanInputDeliveryTestPayload + */ +export const zHumanInputDeliveryTestPayload = z.object({ + delivery_method_id: z.string(), + inputs: z.record(z.string(), z.unknown()).optional(), +}) + +export const zWorkflowRunNodeExecution = z.record(z.string(), z.unknown()) + +/** + * DraftWorkflowNodeRunPayload + */ +export const zDraftWorkflowNodeRunPayload = z.object({ + files: z.array(z.record(z.string(), z.unknown())).nullish(), + inputs: z.record(z.string(), z.unknown()), + query: z.string().optional().default(''), +}) + +/** + * DraftWorkflowRunPayload + */ +export const zDraftWorkflowRunPayload = z.object({ + datasource_info_list: z.array(z.record(z.string(), z.unknown())), + datasource_type: z.string(), + inputs: z.record(z.string(), z.unknown()), + start_node_id: z.string(), +}) + +export const zDraftWorkflowTriggerRunRequest = z.record(z.string(), z.unknown()) + +/** + * DraftWorkflowTriggerRunAllPayload + */ +export const zDraftWorkflowTriggerRunAllPayload = z.object({ + node_ids: z.array(z.string()), +}) + +export const zWorkflowDraftVariableListWithoutValue = z.record(z.string(), z.unknown()) + +export const zWorkflowDraftVariable = z.record(z.string(), z.unknown()) + +/** + * WorkflowDraftVariableUpdatePayload + */ +export const zWorkflowDraftVariableUpdatePayload = z.object({ + name: z.string().nullish(), + value: z.unknown().optional(), +}) + +/** + * PublishWorkflowPayload + */ +export const zPublishWorkflowPayload = z.object({ + marked_comment: z.string().max(100).nullish(), + marked_name: z.string().max(20).nullish(), +}) + +/** + * WebhookTriggerResponse + */ +export const zWebhookTriggerResponse = z.object({ + created_at: z.iso.datetime().nullish(), + id: z.string(), + node_id: z.string(), + webhook_debug_url: z.string(), + webhook_id: z.string(), + webhook_url: z.string(), +}) + +/** + * WorkflowUpdatePayload + */ +export const zWorkflowUpdatePayload = z.object({ + marked_comment: z.string().max(100).nullish(), + marked_name: z.string().max(20).nullish(), +}) + +/** + * ApiKeyItem + */ +export const zApiKeyItem = z.object({ + created_at: z.int().nullish(), + id: z.string(), + last_used_at: z.int().nullish(), + token: z.string(), + type: z.string(), +}) + +/** + * ApiKeyList + */ +export const zApiKeyList = z.object({ + data: z.array(zApiKeyItem), +}) + +/** + * IconType + */ +export const zIconType = z.enum(['image', 'emoji', 'link']) + +/** + * CreateAppPayload + */ +export const zCreateAppPayload = z.object({ + description: z.string().max(400).nullish(), + icon: z.string().nullish(), + icon_background: z.string().nullish(), + icon_type: zIconType.optional(), + mode: z.enum(['chat', 'agent-chat', 'advanced-chat', 'workflow', 'completion']), + name: z.string().min(1), +}) + +/** + * UpdateAppPayload + */ +export const zUpdateAppPayload = z.object({ + description: z.string().max(400).nullish(), + icon: z.string().nullish(), + icon_background: z.string().nullish(), + icon_type: zIconType.optional(), + max_active_requests: z.int().nullish(), + name: z.string().min(1), + use_icon_as_answer_icon: z.boolean().nullish(), +}) + +/** + * CopyAppPayload + */ +export const zCopyAppPayload = z.object({ + description: z.string().max(400).nullish(), + icon: z.string().nullish(), + icon_background: z.string().nullish(), + icon_type: zIconType.optional(), + name: z.string().nullish(), +}) + +/** + * AppIconPayload + */ +export const zAppIconPayload = z.object({ + icon: z.string().nullish(), + icon_background: z.string().nullish(), + icon_type: zIconType.optional(), +}) + +/** + * Tag + */ +export const zTag = z.object({ + id: z.string(), + name: z.string(), + type: z.string(), +}) + +export const zJsonValue = z.unknown() + +/** + * ModelConfig + */ +export const zModelConfig = z.object({ + agent_mode_dict: zJsonValue.optional(), + annotation_reply_dict: zJsonValue.optional(), + chat_prompt_config_dict: zJsonValue.optional(), + completion_prompt_config_dict: zJsonValue.optional(), + created_at: z.int().nullish(), + created_by: z.string().nullish(), + dataset_configs_dict: zJsonValue.optional(), + dataset_query_variable: z.string().nullish(), + external_data_tools_list: zJsonValue.optional(), + file_upload_dict: zJsonValue.optional(), + model_dict: zJsonValue.optional(), + more_like_this_dict: zJsonValue.optional(), + opening_statement: z.string().nullish(), + pre_prompt: z.string().nullish(), + prompt_type: z.string().nullish(), + retriever_resource_dict: zJsonValue.optional(), + sensitive_word_avoidance_dict: zJsonValue.optional(), + speech_to_text_dict: zJsonValue.optional(), + suggested_questions_after_answer_dict: zJsonValue.optional(), + suggested_questions_list: zJsonValue.optional(), + text_to_speech_dict: zJsonValue.optional(), + updated_at: z.int().nullish(), + updated_by: z.string().nullish(), + user_input_form_list: zJsonValue.optional(), +}) + +/** + * WorkflowPartial + */ +export const zWorkflowPartial = z.object({ + created_at: z.int().nullish(), + created_by: z.string().nullish(), + id: z.string(), + updated_at: z.int().nullish(), + updated_by: z.string().nullish(), +}) + +/** + * AppDetail + */ +export const zAppDetail = z.object({ + access_mode: z.string().nullish(), + app_model_config: zModelConfig.optional(), + created_at: z.int().nullish(), + created_by: z.string().nullish(), + description: z.string().nullish(), + enable_api: z.boolean(), + enable_site: z.boolean(), + icon: z.string().nullish(), + icon_background: z.string().nullish(), + id: z.string(), + mode_compatible_with_agent: z.string(), + name: z.string(), + tags: z.array(zTag).optional(), + tracing: zJsonValue.optional(), + updated_at: z.int().nullish(), + updated_by: z.string().nullish(), + use_icon_as_answer_icon: z.boolean().nullish(), + workflow: zWorkflowPartial.optional(), +}) + +/** + * ImportStatus + */ +export const zImportStatus = z.enum(['completed', 'completed-with-warnings', 'pending', 'failed']) + +/** + * Import + */ +export const zImport = z.object({ + app_id: z.string().nullish(), + app_mode: z.string().nullish(), + current_dsl_version: z.string().optional().default('0.6.0'), + error: z.string().optional().default(''), + id: z.string(), + imported_dsl_version: z.string().optional().default(''), + status: zImportStatus, +}) + +/** + * DeletedTool + */ +export const zDeletedTool = z.object({ + provider_id: z.string(), + tool_name: z.string(), + type: z.string(), +}) + +/** + * Site + */ +export const zSite = z.object({ + app_base_url: z.string().nullish(), + chat_color_theme: z.string().nullish(), + chat_color_theme_inverted: z.boolean().nullish(), + code: z.string().nullish(), + copyright: z.string().nullish(), + created_at: z.int().nullish(), + created_by: z.string().nullish(), + custom_disclaimer: z.string().nullish(), + customize_domain: z.string().nullish(), + customize_token_strategy: z.string().nullish(), + default_language: z.string().nullish(), + description: z.string().nullish(), + icon: z.string().nullish(), + icon_background: z.string().nullish(), + icon_type: z.unknown().optional(), + privacy_policy: z.string().nullish(), + prompt_public: z.boolean().nullish(), + show_workflow_steps: z.boolean().nullish(), + title: z.string().nullish(), + updated_at: z.int().nullish(), + updated_by: z.string().nullish(), + use_icon_as_answer_icon: z.boolean().nullish(), +}) + +/** + * AppDetailWithSite + */ +export const zAppDetailWithSite = z.object({ + access_mode: z.string().nullish(), + api_base_url: z.string().nullish(), + app_model_config: zModelConfig.optional(), + created_at: z.int().nullish(), + created_by: z.string().nullish(), + deleted_tools: z.array(zDeletedTool).optional(), + description: z.string().nullish(), + enable_api: z.boolean(), + enable_site: z.boolean(), + icon: z.string().nullish(), + icon_background: z.string().nullish(), + icon_type: z.string().nullish(), + id: z.string(), + max_active_requests: z.int().nullish(), + mode_compatible_with_agent: z.string(), + name: z.string(), + site: zSite.optional(), + tags: z.array(zTag).optional(), + tracing: zJsonValue.optional(), + updated_at: z.int().nullish(), + updated_by: z.string().nullish(), + use_icon_as_answer_icon: z.boolean().nullish(), + workflow: zWorkflowPartial.optional(), +}) + +/** + * AnnotationHitHistory + */ +export const zAnnotationHitHistory = z.object({ + annotation_content: z.string().nullish(), + annotation_question: z.string().nullish(), + created_at: z.int().nullish(), + id: z.string(), + question: z.string().nullish(), + score: z.number().nullish(), + source: z.string().nullish(), +}) + +/** + * AnnotationHitHistoryList + */ +export const zAnnotationHitHistoryList = z.object({ + data: z.array(zAnnotationHitHistory), + has_more: z.boolean(), + limit: z.int(), + page: z.int(), + total: z.int(), +}) + +/** + * FeedbackStat + */ +export const zFeedbackStat = z.object({ + dislike: z.int(), + like: z.int(), +}) + +/** + * ConversationDetail + */ +export const zConversationDetail = z.object({ + admin_feedback_stats: zFeedbackStat.optional(), + annotated: z.boolean(), + created_at: z.int().nullish(), + from_account_id: z.string().nullish(), + from_end_user_id: z.string().nullish(), + from_source: z.string(), + id: z.string(), + introduction: z.string().nullish(), + message_count: z.int(), + model_config: zModelConfig.optional(), + status: z.string(), + updated_at: z.int().nullish(), + user_feedback_stats: zFeedbackStat.optional(), +}) + +/** + * ConversationVariableResponse + */ +export const zConversationVariableResponse = z.object({ + created_at: z.int().nullish(), + description: z.string().nullish(), + id: z.string(), + name: z.string(), + updated_at: z.int().nullish(), + value: z.string().nullish(), + value_type: z.string(), +}) + +/** + * PaginatedConversationVariableResponse + */ +export const zPaginatedConversationVariableResponse = z.object({ + data: z.array(zConversationVariableResponse), + has_more: z.boolean(), + limit: z.int(), + page: z.int(), + total: z.int(), +}) + +/** + * AgentThought + */ +export const zAgentThought = z.object({ + chain_id: z.string().nullish(), + created_at: z.int().nullish(), + files: z.array(z.string()), + id: z.string(), + message_chain_id: z.string().nullish(), + message_id: z.string(), + observation: z.string().nullish(), + position: z.int(), + thought: z.string().nullish(), + tool: z.string().nullish(), + tool_input: z.string().nullish(), + tool_labels: zJsonValue, +}) + +/** + * MessageFile + */ +export const zMessageFile = z.object({ + belongs_to: z.string().nullish(), + filename: z.string(), + id: z.string(), + mime_type: z.string().nullish(), + size: z.int().nullish(), + transfer_method: z.string(), + type: z.string(), + upload_file_id: z.string().nullish(), + url: z.string().nullish(), +}) + +/** + * AppMCPServerStatus + * + * AppMCPServer Status Enum + */ +export const zAppMcpServerStatus = z.enum(['normal', 'active', 'inactive']) + +/** + * AppMCPServerResponse + */ +export const zAppMcpServerResponse = z.object({ + created_at: z.int().nullish(), + description: z.string(), + id: z.string(), + name: z.string(), + parameters: z.unknown(), + server_code: z.string(), + status: zAppMcpServerStatus, + updated_at: z.int().nullish(), +}) + +/** + * AccountWithRole + */ +export const zAccountWithRole = z.object({ + avatar: z.string().nullish(), + created_at: z.int().nullish(), + email: z.string(), + id: z.string(), + last_active_at: z.int().nullish(), + last_login_at: z.int().nullish(), + name: z.string(), + role: z.string(), + status: z.string(), +}) + +/** + * WorkflowCommentMentionUsersPayload + */ +export const zWorkflowCommentMentionUsersPayload = z.object({ + users: z.array(zAccountWithRole), +}) + +/** + * ModelConfigPartial + */ +export const zModelConfigPartial = z.object({ + created_at: z.int().nullish(), + created_by: z.string().nullish(), + model_dict: zJsonValue.optional(), + pre_prompt: z.string().nullish(), + updated_at: z.int().nullish(), + updated_by: z.string().nullish(), +}) + +/** + * AppPartial + */ +export const zAppPartial = z.object({ + access_mode: z.string().nullish(), + app_model_config: zModelConfigPartial.optional(), + author_name: z.string().nullish(), + create_user_name: z.string().nullish(), + created_at: z.int().nullish(), + created_by: z.string().nullish(), + desc_or_prompt: z.string().nullish(), + has_draft_trigger: z.boolean().nullish(), + icon: z.string().nullish(), + icon_background: z.string().nullish(), + icon_type: z.string().nullish(), + id: z.string(), + max_active_requests: z.int().nullish(), + mode_compatible_with_agent: z.string(), + name: z.string(), + tags: z.array(zTag).optional(), + updated_at: z.int().nullish(), + updated_by: z.string().nullish(), + use_icon_as_answer_icon: z.boolean().nullish(), + workflow: zWorkflowPartial.optional(), +}) + +/** + * AppPagination + */ +export const zAppPagination = z.object({ + has_next: z.boolean(), + items: z.array(zAppPartial), + page: z.int(), + per_page: z.int(), + total: z.int(), +}) + +/** + * Type + */ +export const zType = z.enum(['github', 'marketplace', 'package']) + +/** + * PluginDependency + */ +export const zPluginDependency = z.object({ + current_identifier: z.string().nullish(), + type: zType, + value: z.unknown(), +}) + +/** + * CheckDependenciesResult + */ +export const zCheckDependenciesResult = z.object({ + leaked_dependencies: z.array(zPluginDependency).optional(), +}) + +/** + * Github + */ +export const zGithub = z.object({ + github_plugin_unique_identifier: z.string(), + package: z.string(), + repo: z.string(), + version: z.string(), +}) + +/** + * Marketplace + */ +export const zMarketplace = z.object({ + marketplace_plugin_unique_identifier: z.string(), + version: z.string().nullish(), +}) + +/** + * Package + */ +export const zPackage = z.object({ + plugin_unique_identifier: z.string(), + version: z.string().nullish(), +}) + +/** + * SimpleModelConfig + */ +export const zSimpleModelConfig = z.object({ + model_dict: zJsonValue.optional(), + pre_prompt: z.string().nullish(), +}) + +/** + * StatusCount + */ +export const zStatusCount = z.object({ + failed: z.int(), + partial_success: z.int(), + paused: z.int(), + success: z.int(), +}) + +/** + * ConversationWithSummary + */ +export const zConversationWithSummary = z.object({ + admin_feedback_stats: zFeedbackStat.optional(), + annotated: z.boolean(), + created_at: z.int().nullish(), + from_account_id: z.string().nullish(), + from_account_name: z.string().nullish(), + from_end_user_id: z.string().nullish(), + from_end_user_session_id: z.string().nullish(), + from_source: z.string(), + id: z.string(), + message_count: z.int(), + model_config: zSimpleModelConfig.optional(), + name: z.string(), + read_at: z.int().nullish(), + status: z.string(), + status_count: zStatusCount.optional(), + summary_or_query: z.string(), + updated_at: z.int().nullish(), + user_feedback_stats: zFeedbackStat.optional(), +}) + +/** + * ConversationWithSummaryPagination + */ +export const zConversationWithSummaryPagination = z.object({ + has_next: z.boolean(), + items: z.array(zConversationWithSummary), + page: z.int(), + per_page: z.int(), + total: z.int(), +}) + +/** + * SimpleMessageDetail + */ +export const zSimpleMessageDetail = z.object({ + answer: z.string(), + inputs: z.record(z.string(), zJsonValue), + message: z.string(), + query: z.string(), +}) + +/** + * SimpleAccount + */ +export const zSimpleAccount = z.object({ + email: z.string(), + id: z.string(), + name: z.string(), +}) + +/** + * ConversationAnnotation + */ +export const zConversationAnnotation = z.object({ + account: zSimpleAccount.optional(), + content: z.string(), + created_at: z.int().nullish(), + id: z.string(), + question: z.string().nullish(), +}) + +/** + * Conversation + */ +export const zConversation = z.object({ + admin_feedback_stats: zFeedbackStat.optional(), + annotation: zConversationAnnotation.optional(), + created_at: z.int().nullish(), + first_message: zSimpleMessageDetail.optional(), + from_account_id: z.string().nullish(), + from_account_name: z.string().nullish(), + from_end_user_id: z.string().nullish(), + from_end_user_session_id: z.string().nullish(), + from_source: z.string(), + id: z.string(), + model_config: zSimpleModelConfig.optional(), + read_at: z.int().nullish(), + status: z.string(), + updated_at: z.int().nullish(), + user_feedback_stats: zFeedbackStat.optional(), +}) + +/** + * ConversationPagination + */ +export const zConversationPagination = z.object({ + has_next: z.boolean(), + items: z.array(zConversation), + page: z.int(), + per_page: z.int(), + total: z.int(), +}) + +/** + * ConversationAnnotationHitHistory + */ +export const zConversationAnnotationHitHistory = z.object({ + annotation_create_account: zSimpleAccount.optional(), + created_at: z.int().nullish(), + id: z.string(), +}) + +/** + * Feedback + */ +export const zFeedback = z.object({ + content: z.string().nullish(), + from_account: zSimpleAccount.optional(), + from_end_user_id: z.string().nullish(), + from_source: z.string(), + rating: z.string(), +}) + +/** + * MessageDetail + */ +export const zMessageDetail = z.object({ + agent_thoughts: z.array(zAgentThought), + annotation: zConversationAnnotation.optional(), + annotation_hit_history: zConversationAnnotationHitHistory.optional(), + answer_tokens: z.int(), + conversation_id: z.string(), + created_at: z.int().nullish(), + error: z.string().nullish(), + feedbacks: z.array(zFeedback), + from_account_id: z.string().nullish(), + from_end_user_id: z.string().nullish(), + from_source: z.string(), + id: z.string(), + inputs: z.record(z.string(), zJsonValue), + message: zJsonValue, + message_files: z.array(zMessageFile), + message_metadata_dict: zJsonValue, + message_tokens: z.int(), + parent_message_id: z.string().nullish(), + provider_response_latency: z.number(), + query: z.string(), + re_sign_file_url_answer: z.string(), + status: z.string(), + workflow_run_id: z.string().nullish(), +}) + +/** + * ConversationMessageDetail + */ +export const zConversationMessageDetail = z.object({ + created_at: z.int().nullish(), + first_message: zMessageDetail.optional(), + from_account_id: z.string().nullish(), + from_end_user_id: z.string().nullish(), + from_source: z.string(), + id: z.string(), + model_config: zModelConfig.optional(), + status: z.string(), +}) + +/** + * HumanInputFormSubmissionData + */ +export const zHumanInputFormSubmissionData = z.object({ + action_id: z.string(), + action_text: z.string(), + node_id: z.string(), + node_title: z.string(), + rendered_content: z.string(), +}) + +/** + * ExecutionContentType + */ +export const zExecutionContentType = z.enum(['human_input']) + +/** + * SimpleEndUser + */ +export const zSimpleEndUser = z.object({ + id: z.string(), + is_anonymous: z.boolean(), + session_id: z.string().nullish(), + type: z.string(), +}) + +/** + * WorkflowRunForLogResponse + */ +export const zWorkflowRunForLogResponse = z.object({ + created_at: z.int().nullish(), + elapsed_time: z.number().nullish(), + error: z.string().nullish(), + exceptions_count: z.int().nullish(), + finished_at: z.int().nullish(), + id: z.string(), + status: z.string().nullish(), + total_steps: z.int().nullish(), + total_tokens: z.int().nullish(), + triggered_from: z.string().nullish(), + version: z.string().nullish(), +}) + +/** + * WorkflowAppLogPartialResponse + */ +export const zWorkflowAppLogPartialResponse = z.object({ + created_at: z.int().nullish(), + created_by_account: zSimpleAccount.optional(), + created_by_end_user: zSimpleEndUser.optional(), + created_by_role: z.string().nullish(), + created_from: z.string().nullish(), + details: z.unknown().optional(), + id: z.string(), + workflow_run: zWorkflowRunForLogResponse.optional(), +}) + +/** + * WorkflowAppLogPaginationResponse + */ +export const zWorkflowAppLogPaginationResponse = z.object({ + data: z.array(zWorkflowAppLogPartialResponse), + has_more: z.boolean(), + limit: z.int(), + page: z.int(), + total: z.int(), +}) + +/** + * WorkflowRunForArchivedLogResponse + */ +export const zWorkflowRunForArchivedLogResponse = z.object({ + elapsed_time: z.number().nullish(), + id: z.string(), + status: z.string().nullish(), + total_tokens: z.int().nullish(), + triggered_from: z.string().nullish(), +}) + +/** + * WorkflowArchivedLogPartialResponse + */ +export const zWorkflowArchivedLogPartialResponse = z.object({ + created_at: z.int().nullish(), + created_by_account: zSimpleAccount.optional(), + created_by_end_user: zSimpleEndUser.optional(), + id: z.string(), + trigger_metadata: z.unknown().optional(), + workflow_run: zWorkflowRunForArchivedLogResponse.optional(), +}) + +/** + * WorkflowArchivedLogPaginationResponse + */ +export const zWorkflowArchivedLogPaginationResponse = z.object({ + data: z.array(zWorkflowArchivedLogPartialResponse), + has_more: z.boolean(), + limit: z.int(), + page: z.int(), + total: z.int(), +}) + +/** + * ButtonStyle + * + * Button styles for user actions. + */ +export const zButtonStyle = z.enum(['primary', 'default', 'accent', 'ghost']) + +/** + * UserAction + * + * User action configuration. + */ +export const zUserAction = z.object({ + button_style: zButtonStyle.optional(), + id: z.string().max(20), + title: z.string().max(20), +}) + +/** + * FormInputType + * + * Form input types. + */ +export const zFormInputType = z.enum(['text_input', 'paragraph']) + +/** + * PlaceholderType + * + * Default value types for form inputs. + */ +export const zPlaceholderType = z.enum(['variable', 'constant']) + +/** + * FormInputDefault + * + * Default configuration for form inputs. + */ +export const zFormInputDefault = z.object({ + selector: z.array(z.string()).optional(), + type: zPlaceholderType, + value: z.string().optional().default(''), +}) + +/** + * FormInput + * + * Form input definition. + */ +export const zFormInput = z.object({ + default: zFormInputDefault.optional(), + output_variable_name: z.string(), + type: zFormInputType, +}) + +/** + * HumanInputFormDefinition + */ +export const zHumanInputFormDefinition = z.object({ + actions: z.array(zUserAction).optional(), + display_in_ui: z.boolean().optional().default(false), + expiration_time: z.int(), + form_content: z.string(), + form_id: z.string(), + form_token: z.string().nullish(), + inputs: z.array(zFormInput).optional(), + node_id: z.string(), + node_title: z.string(), + resolved_default_values: z.record(z.string(), z.unknown()).optional(), +}) + +/** + * HumanInputContent + */ +export const zHumanInputContent = z.object({ + form_definition: zHumanInputFormDefinition.optional(), + form_submission_data: zHumanInputFormSubmissionData.optional(), + submitted: z.boolean(), + type: zExecutionContentType.optional(), + workflow_run_id: z.string(), +}) + +/** + * MessageDetailResponse + */ +export const zMessageDetailResponse = z.object({ + agent_thoughts: z.array(zAgentThought).optional(), + annotation: zConversationAnnotation.optional(), + annotation_hit_history: zConversationAnnotationHitHistory.optional(), + answer_tokens: z.int().nullish(), + conversation_id: z.string(), + created_at: z.int().nullish(), + error: z.string().nullish(), + extra_contents: z.array(zHumanInputContent).optional(), + feedbacks: z.array(zFeedback).optional(), + from_account_id: z.string().nullish(), + from_end_user_id: z.string().nullish(), + from_source: z.string(), + id: z.string(), + inputs: z.record(z.string(), zJsonValue), + message: zJsonValue.optional(), + message_files: z.array(zMessageFile).optional(), + message_metadata_dict: zJsonValue.optional(), + message_tokens: z.int().nullish(), + parent_message_id: z.string().nullish(), + provider_response_latency: z.number().nullish(), + query: z.string(), + re_sign_file_url_answer: z.string(), + status: z.string(), + workflow_run_id: z.string().nullish(), +}) + +/** + * MessageInfiniteScrollPaginationResponse + */ +export const zMessageInfiniteScrollPaginationResponse = z.object({ + data: z.array(zMessageDetailResponse), + has_more: z.boolean(), + limit: z.int(), +}) + +export const zGetAppsQuery = z.object({ + is_created_by_me: z.boolean().nullish(), + limit: z.int().gte(1).lte(100).optional().default(20), + mode: z + .enum(['completion', 'chat', 'advanced-chat', 'workflow', 'agent-chat', 'channel', 'all']) + .optional() + .default('all'), + name: z.string().nullish(), + page: z.int().gte(1).lte(99999).optional().default(1), + tag_ids: z.array(z.string()).nullish(), +}) + +/** + * Success + */ +export const zGetAppsResponse = zAppPagination + +export const zPostAppsBody = zCreateAppPayload + +/** + * App created successfully + */ +export const zPostAppsResponse = zAppDetail + +export const zPostAppsImportsBody = zAppImportPayload + +/** + * Import completed + */ +export const zPostAppsImportsResponse = zImport + +export const zGetAppsImportsByAppIdCheckDependenciesPath = z.object({ + app_id: z.string(), +}) + +/** + * Dependencies checked + */ +export const zGetAppsImportsByAppIdCheckDependenciesResponse = zCheckDependenciesResult + +export const zPostAppsImportsByImportIdConfirmPath = z.object({ + import_id: z.string(), +}) + +/** + * Import confirmed + */ +export const zPostAppsImportsByImportIdConfirmResponse = zImport + +export const zGetAppsWorkflowsOnlineUsersQuery = z.object({ + app_ids: z.string(), +}) + +/** + * Success + */ +export const zGetAppsWorkflowsOnlineUsersResponse = z.record(z.string(), z.unknown()) + +export const zDeleteAppsByAppIdPath = z.object({ + app_id: z.string(), +}) + +/** + * App deleted successfully + */ +export const zDeleteAppsByAppIdResponse = z.record(z.string(), z.unknown()) + +export const zGetAppsByAppIdPath = z.object({ + app_id: z.string(), +}) + +/** + * Success + */ +export const zGetAppsByAppIdResponse = zAppDetailWithSite + +export const zPutAppsByAppIdBody = zUpdateAppPayload + +export const zPutAppsByAppIdPath = z.object({ + app_id: z.string(), +}) + +/** + * App updated successfully + */ +export const zPutAppsByAppIdResponse = zAppDetailWithSite + +export const zGetAppsByAppIdAdvancedChatWorkflowRunsPath = z.object({ + app_id: z.string(), +}) + +export const zGetAppsByAppIdAdvancedChatWorkflowRunsQuery = z.object({ + triggered_from: z.enum(['debugging', 'app-run']).nullish(), + status: z.enum(['running', 'succeeded', 'failed', 'stopped', 'partial-succeeded']).nullish(), + last_id: z.string().nullish(), + limit: z.int().gte(1).lte(100).optional().default(20), +}) + +/** + * Workflow runs retrieved successfully + */ +export const zGetAppsByAppIdAdvancedChatWorkflowRunsResponse = zAdvancedChatWorkflowRunPagination + +export const zGetAppsByAppIdAdvancedChatWorkflowRunsCountPath = z.object({ + app_id: z.string(), +}) + +export const zGetAppsByAppIdAdvancedChatWorkflowRunsCountQuery = z.object({ + triggered_from: z.enum(['debugging', 'app-run']).nullish(), + time_range: z.string().nullish(), + status: z.enum(['running', 'succeeded', 'failed', 'stopped', 'partial-succeeded']).nullish(), +}) + +/** + * Workflow runs count retrieved successfully + */ +export const zGetAppsByAppIdAdvancedChatWorkflowRunsCountResponse = zWorkflowRunCount + +export const zPostAppsByAppIdAdvancedChatWorkflowsDraftHumanInputNodesByNodeIdFormPreviewBody + = zHumanInputFormPreviewPayload + +export const zPostAppsByAppIdAdvancedChatWorkflowsDraftHumanInputNodesByNodeIdFormPreviewPath + = z.object({ + app_id: z.string(), + node_id: z.string(), + }) + +/** + * Success + */ +export const zPostAppsByAppIdAdvancedChatWorkflowsDraftHumanInputNodesByNodeIdFormPreviewResponse + = z.record(z.string(), z.unknown()) + +export const zPostAppsByAppIdAdvancedChatWorkflowsDraftHumanInputNodesByNodeIdFormRunBody + = zHumanInputFormSubmitPayload + +export const zPostAppsByAppIdAdvancedChatWorkflowsDraftHumanInputNodesByNodeIdFormRunPath + = z.object({ + app_id: z.string(), + node_id: z.string(), + }) + +/** + * Success + */ +export const zPostAppsByAppIdAdvancedChatWorkflowsDraftHumanInputNodesByNodeIdFormRunResponse + = z.record(z.string(), z.unknown()) + +export const zPostAppsByAppIdAdvancedChatWorkflowsDraftIterationNodesByNodeIdRunBody + = zIterationNodeRunPayload + +export const zPostAppsByAppIdAdvancedChatWorkflowsDraftIterationNodesByNodeIdRunPath = z.object({ + app_id: z.string(), + node_id: z.string(), +}) + +/** + * Iteration node run started successfully + */ +export const zPostAppsByAppIdAdvancedChatWorkflowsDraftIterationNodesByNodeIdRunResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostAppsByAppIdAdvancedChatWorkflowsDraftLoopNodesByNodeIdRunBody + = zLoopNodeRunPayload + +export const zPostAppsByAppIdAdvancedChatWorkflowsDraftLoopNodesByNodeIdRunPath = z.object({ + app_id: z.string(), + node_id: z.string(), +}) + +/** + * Loop node run started successfully + */ +export const zPostAppsByAppIdAdvancedChatWorkflowsDraftLoopNodesByNodeIdRunResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostAppsByAppIdAdvancedChatWorkflowsDraftRunBody = zAdvancedChatWorkflowRunPayload + +export const zPostAppsByAppIdAdvancedChatWorkflowsDraftRunPath = z.object({ + app_id: z.string(), +}) + +/** + * Workflow run started successfully + */ +export const zPostAppsByAppIdAdvancedChatWorkflowsDraftRunResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetAppsByAppIdAgentLogsPath = z.object({ + app_id: z.string(), +}) + +export const zGetAppsByAppIdAgentLogsQuery = z.object({ + conversation_id: z.string(), + message_id: z.string(), +}) + +/** + * Agent logs retrieved successfully + */ +export const zGetAppsByAppIdAgentLogsResponse = z.array(z.record(z.string(), z.unknown())) + +export const zPostAppsByAppIdAnnotationReplyByActionBody = zAnnotationReplyPayload + +export const zPostAppsByAppIdAnnotationReplyByActionPath = z.object({ + app_id: z.string(), + action: z.string(), +}) + +/** + * Action completed successfully + */ +export const zPostAppsByAppIdAnnotationReplyByActionResponse = z.record(z.string(), z.unknown()) + +export const zGetAppsByAppIdAnnotationReplyByActionStatusByJobIdPath = z.object({ + app_id: z.string(), + action: z.string(), + job_id: z.string(), +}) + +/** + * Job status retrieved successfully + */ +export const zGetAppsByAppIdAnnotationReplyByActionStatusByJobIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetAppsByAppIdAnnotationSettingPath = z.object({ + app_id: z.string(), +}) + +/** + * Annotation settings retrieved successfully + */ +export const zGetAppsByAppIdAnnotationSettingResponse = z.record(z.string(), z.unknown()) + +export const zPostAppsByAppIdAnnotationSettingsByAnnotationSettingIdBody + = zAnnotationSettingUpdatePayload + +export const zPostAppsByAppIdAnnotationSettingsByAnnotationSettingIdPath = z.object({ + app_id: z.string(), + annotation_setting_id: z.string(), +}) + +/** + * Settings updated successfully + */ +export const zPostAppsByAppIdAnnotationSettingsByAnnotationSettingIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zDeleteAppsByAppIdAnnotationsPath = z.object({ + app_id: z.string(), +}) + +/** + * Success + */ +export const zDeleteAppsByAppIdAnnotationsResponse = z.record(z.string(), z.unknown()) + +export const zGetAppsByAppIdAnnotationsPath = z.object({ + app_id: z.string(), +}) + +export const zGetAppsByAppIdAnnotationsQuery = z.object({ + keyword: z.string().optional().default(''), + limit: z.int().gte(1).optional().default(20), + page: z.int().gte(1).optional().default(1), +}) + +/** + * Annotations retrieved successfully + */ +export const zGetAppsByAppIdAnnotationsResponse = z.record(z.string(), z.unknown()) + +export const zPostAppsByAppIdAnnotationsBody = zCreateAnnotationPayload + +export const zPostAppsByAppIdAnnotationsPath = z.object({ + app_id: z.string(), +}) + +/** + * Annotation created successfully + */ +export const zPostAppsByAppIdAnnotationsResponse = zAnnotation + +export const zPostAppsByAppIdAnnotationsBatchImportPath = z.object({ + app_id: z.string(), +}) + +/** + * Batch import started successfully + */ +export const zPostAppsByAppIdAnnotationsBatchImportResponse = z.record(z.string(), z.unknown()) + +export const zGetAppsByAppIdAnnotationsBatchImportStatusByJobIdPath = z.object({ + app_id: z.string(), + job_id: z.string(), +}) + +/** + * Job status retrieved successfully + */ +export const zGetAppsByAppIdAnnotationsBatchImportStatusByJobIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetAppsByAppIdAnnotationsCountPath = z.object({ + app_id: z.string(), +}) + +/** + * Annotation count retrieved successfully + */ +export const zGetAppsByAppIdAnnotationsCountResponse = zAnnotationCountResponse + +export const zGetAppsByAppIdAnnotationsExportPath = z.object({ + app_id: z.string(), +}) + +/** + * Annotations exported successfully + */ +export const zGetAppsByAppIdAnnotationsExportResponse = zAnnotationExportList + +export const zDeleteAppsByAppIdAnnotationsByAnnotationIdPath = z.object({ + annotation_id: z.string(), + app_id: z.string(), +}) + +/** + * Success + */ +export const zDeleteAppsByAppIdAnnotationsByAnnotationIdResponse = z.record(z.string(), z.unknown()) + +export const zPostAppsByAppIdAnnotationsByAnnotationIdBody = zUpdateAnnotationPayload + +export const zPostAppsByAppIdAnnotationsByAnnotationIdPath = z.object({ + app_id: z.string(), + annotation_id: z.string(), +}) + +export const zPostAppsByAppIdAnnotationsByAnnotationIdResponse = z.union([ + zAnnotation, + z.record(z.string(), z.unknown()), +]) + +export const zGetAppsByAppIdAnnotationsByAnnotationIdHitHistoriesPath = z.object({ + app_id: z.string(), + annotation_id: z.string(), +}) + +export const zGetAppsByAppIdAnnotationsByAnnotationIdHitHistoriesQuery = z.object({ + page: z.int().optional().default(1), + limit: z.int().optional().default(20), +}) + +/** + * Hit histories retrieved successfully + */ +export const zGetAppsByAppIdAnnotationsByAnnotationIdHitHistoriesResponse + = zAnnotationHitHistoryList + +export const zPostAppsByAppIdApiEnableBody = zAppApiStatusPayload + +export const zPostAppsByAppIdApiEnablePath = z.object({ + app_id: z.string(), +}) + +/** + * API status updated successfully + */ +export const zPostAppsByAppIdApiEnableResponse = zAppDetail + +export const zPostAppsByAppIdAudioToTextPath = z.object({ + app_id: z.string(), +}) + +/** + * Audio transcription successful + */ +export const zPostAppsByAppIdAudioToTextResponse = zAudioTranscriptResponse + +export const zGetAppsByAppIdChatConversationsPath = z.object({ + app_id: z.string(), +}) + +export const zGetAppsByAppIdChatConversationsQuery = z.object({ + annotation_status: z.enum(['annotated', 'not_annotated', 'all']).optional().default('all'), + end: z.string().nullish(), + keyword: z.string().nullish(), + limit: z.int().gte(1).lte(100).optional().default(20), + page: z.int().gte(1).lte(99999).optional().default(1), + sort_by: z + .enum(['created_at', '-created_at', 'updated_at', '-updated_at']) + .optional() + .default('-updated_at'), + start: z.string().nullish(), +}) + +/** + * Success + */ +export const zGetAppsByAppIdChatConversationsResponse = zConversationWithSummaryPagination + +export const zDeleteAppsByAppIdChatConversationsByConversationIdPath = z.object({ + app_id: z.string(), + conversation_id: z.string(), +}) + +/** + * Conversation deleted successfully + */ +export const zDeleteAppsByAppIdChatConversationsByConversationIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetAppsByAppIdChatConversationsByConversationIdPath = z.object({ + app_id: z.string(), + conversation_id: z.string(), +}) + +/** + * Success + */ +export const zGetAppsByAppIdChatConversationsByConversationIdResponse = zConversationDetail + +export const zGetAppsByAppIdChatMessagesPath = z.object({ + app_id: z.string(), +}) + +export const zGetAppsByAppIdChatMessagesQuery = z.object({ + conversation_id: z.string(), + first_id: z.string().nullish(), + limit: z.int().gte(1).lte(100).optional().default(20), +}) + +/** + * Success + */ +export const zGetAppsByAppIdChatMessagesResponse = zMessageInfiniteScrollPaginationResponse + +export const zGetAppsByAppIdChatMessagesByMessageIdSuggestedQuestionsPath = z.object({ + app_id: z.string(), + message_id: z.string(), +}) + +/** + * Suggested questions retrieved successfully + */ +export const zGetAppsByAppIdChatMessagesByMessageIdSuggestedQuestionsResponse + = zSuggestedQuestionsResponse + +export const zPostAppsByAppIdChatMessagesByTaskIdStopPath = z.object({ + app_id: z.string(), + task_id: z.string(), +}) + +/** + * Task stopped successfully + */ +export const zPostAppsByAppIdChatMessagesByTaskIdStopResponse = z.record(z.string(), z.unknown()) + +export const zGetAppsByAppIdCompletionConversationsPath = z.object({ + app_id: z.string(), +}) + +export const zGetAppsByAppIdCompletionConversationsQuery = z.object({ + annotation_status: z.enum(['annotated', 'not_annotated', 'all']).optional().default('all'), + end: z.string().nullish(), + keyword: z.string().nullish(), + limit: z.int().gte(1).lte(100).optional().default(20), + page: z.int().gte(1).lte(99999).optional().default(1), + start: z.string().nullish(), +}) + +/** + * Success + */ +export const zGetAppsByAppIdCompletionConversationsResponse = zConversationPagination + +export const zDeleteAppsByAppIdCompletionConversationsByConversationIdPath = z.object({ + app_id: z.string(), + conversation_id: z.string(), +}) + +/** + * Conversation deleted successfully + */ +export const zDeleteAppsByAppIdCompletionConversationsByConversationIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetAppsByAppIdCompletionConversationsByConversationIdPath = z.object({ + app_id: z.string(), + conversation_id: z.string(), +}) + +/** + * Success + */ +export const zGetAppsByAppIdCompletionConversationsByConversationIdResponse + = zConversationMessageDetail + +export const zPostAppsByAppIdCompletionMessagesBody = zCompletionMessagePayload + +export const zPostAppsByAppIdCompletionMessagesPath = z.object({ + app_id: z.string(), +}) + +/** + * Completion generated successfully + */ +export const zPostAppsByAppIdCompletionMessagesResponse = z.record(z.string(), z.unknown()) + +export const zPostAppsByAppIdCompletionMessagesByTaskIdStopPath = z.object({ + app_id: z.string(), + task_id: z.string(), +}) + +/** + * Task stopped successfully + */ +export const zPostAppsByAppIdCompletionMessagesByTaskIdStopResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetAppsByAppIdConversationVariablesPath = z.object({ + app_id: z.string(), +}) + +export const zGetAppsByAppIdConversationVariablesQuery = z.object({ + conversation_id: z.string(), +}) + +/** + * Conversation variables retrieved successfully + */ +export const zGetAppsByAppIdConversationVariablesResponse = zPaginatedConversationVariableResponse + +export const zPostAppsByAppIdConvertToWorkflowBody = zConvertToWorkflowPayload + +export const zPostAppsByAppIdConvertToWorkflowPath = z.object({ + app_id: z.string(), +}) + +/** + * Application converted to workflow successfully + */ +export const zPostAppsByAppIdConvertToWorkflowResponse = z.record(z.string(), z.unknown()) + +export const zPostAppsByAppIdCopyBody = zCopyAppPayload + +export const zPostAppsByAppIdCopyPath = z.object({ + app_id: z.string(), +}) + +/** + * App copied successfully + */ +export const zPostAppsByAppIdCopyResponse = zAppDetailWithSite + +export const zGetAppsByAppIdExportPath = z.object({ + app_id: z.string(), +}) + +export const zGetAppsByAppIdExportQuery = z.object({ + include_secret: z.boolean().optional().default(false), + workflow_id: z.string().nullish(), +}) + +/** + * App exported successfully + */ +export const zGetAppsByAppIdExportResponse = zAppExportResponse + +export const zPostAppsByAppIdFeedbacksBody = zMessageFeedbackPayload + +export const zPostAppsByAppIdFeedbacksPath = z.object({ + app_id: z.string(), +}) + +/** + * Feedback updated successfully + */ +export const zPostAppsByAppIdFeedbacksResponse = z.record(z.string(), z.unknown()) + +export const zGetAppsByAppIdFeedbacksExportPath = z.object({ + app_id: z.string(), +}) + +export const zGetAppsByAppIdFeedbacksExportQuery = z.object({ + end_date: z.string().nullish(), + format: z.enum(['csv', 'json']).optional().default('csv'), + from_source: z.enum(['user', 'admin']).nullish(), + has_comment: z.boolean().nullish(), + rating: z.enum(['like', 'dislike']).nullish(), + start_date: z.string().nullish(), +}) + +/** + * Feedback data exported successfully + */ +export const zGetAppsByAppIdFeedbacksExportResponse = z.record(z.string(), z.unknown()) + +export const zPostAppsByAppIdIconBody = zAppIconPayload + +export const zPostAppsByAppIdIconPath = z.object({ + app_id: z.string(), +}) + +/** + * Icon updated successfully + */ +export const zPostAppsByAppIdIconResponse = z.record(z.string(), z.unknown()) + +export const zGetAppsByAppIdMessagesByMessageIdPath = z.object({ + app_id: z.string(), + message_id: z.string(), +}) + +/** + * Message retrieved successfully + */ +export const zGetAppsByAppIdMessagesByMessageIdResponse = zMessageDetailResponse + +export const zPostAppsByAppIdModelConfigBody = zModelConfigRequest + +export const zPostAppsByAppIdModelConfigPath = z.object({ + app_id: z.string(), +}) + +/** + * Model configuration updated successfully + */ +export const zPostAppsByAppIdModelConfigResponse = z.record(z.string(), z.unknown()) + +export const zPostAppsByAppIdNameBody = zAppNamePayload + +export const zPostAppsByAppIdNamePath = z.object({ + app_id: z.string(), +}) + +/** + * Name availability checked + */ +export const zPostAppsByAppIdNameResponse = zAppDetail + +export const zPostAppsByAppIdPublishToCreatorsPlatformPath = z.object({ + app_id: z.string(), +}) + +/** + * Success + */ +export const zPostAppsByAppIdPublishToCreatorsPlatformResponse = z.record(z.string(), z.unknown()) + +export const zGetAppsByAppIdServerPath = z.object({ + app_id: z.string(), +}) + +/** + * MCP server configuration retrieved successfully + */ +export const zGetAppsByAppIdServerResponse = zAppMcpServerResponse + +export const zPostAppsByAppIdServerBody = zMcpServerCreatePayload + +export const zPostAppsByAppIdServerPath = z.object({ + app_id: z.string(), +}) + +/** + * MCP server configuration created successfully + */ +export const zPostAppsByAppIdServerResponse = zAppMcpServerResponse + +export const zPutAppsByAppIdServerBody = zMcpServerUpdatePayload + +export const zPutAppsByAppIdServerPath = z.object({ + app_id: z.string(), +}) + +/** + * MCP server configuration updated successfully + */ +export const zPutAppsByAppIdServerResponse = zAppMcpServerResponse + +export const zPostAppsByAppIdSiteBody = zAppSiteUpdatePayload + +export const zPostAppsByAppIdSitePath = z.object({ + app_id: z.string(), +}) + +/** + * Site configuration updated successfully + */ +export const zPostAppsByAppIdSiteResponse = zAppSiteResponse + +export const zPostAppsByAppIdSiteEnableBody = zAppSiteStatusPayload + +export const zPostAppsByAppIdSiteEnablePath = z.object({ + app_id: z.string(), +}) + +/** + * Site status updated successfully + */ +export const zPostAppsByAppIdSiteEnableResponse = zAppDetail + +export const zPostAppsByAppIdSiteAccessTokenResetPath = z.object({ + app_id: z.string(), +}) + +/** + * Access token reset successfully + */ +export const zPostAppsByAppIdSiteAccessTokenResetResponse = zAppSiteResponse + +export const zGetAppsByAppIdStatisticsAverageResponseTimePath = z.object({ + app_id: z.string(), +}) + +export const zGetAppsByAppIdStatisticsAverageResponseTimeQuery = z.object({ + end: z.string().nullish(), + start: z.string().nullish(), +}) + +/** + * Average response time statistics retrieved successfully + */ +export const zGetAppsByAppIdStatisticsAverageResponseTimeResponse = z.array( + z.record(z.string(), z.unknown()), +) + +export const zGetAppsByAppIdStatisticsAverageSessionInteractionsPath = z.object({ + app_id: z.string(), +}) + +export const zGetAppsByAppIdStatisticsAverageSessionInteractionsQuery = z.object({ + end: z.string().nullish(), + start: z.string().nullish(), +}) + +/** + * Average session interaction statistics retrieved successfully + */ +export const zGetAppsByAppIdStatisticsAverageSessionInteractionsResponse = z.array( + z.record(z.string(), z.unknown()), +) + +export const zGetAppsByAppIdStatisticsDailyConversationsPath = z.object({ + app_id: z.string(), +}) + +export const zGetAppsByAppIdStatisticsDailyConversationsQuery = z.object({ + end: z.string().nullish(), + start: z.string().nullish(), +}) + +/** + * Daily conversation statistics retrieved successfully + */ +export const zGetAppsByAppIdStatisticsDailyConversationsResponse = z.array( + z.record(z.string(), z.unknown()), +) + +export const zGetAppsByAppIdStatisticsDailyEndUsersPath = z.object({ + app_id: z.string(), +}) + +export const zGetAppsByAppIdStatisticsDailyEndUsersQuery = z.object({ + end: z.string().nullish(), + start: z.string().nullish(), +}) + +/** + * Daily terminal statistics retrieved successfully + */ +export const zGetAppsByAppIdStatisticsDailyEndUsersResponse = z.array( + z.record(z.string(), z.unknown()), +) + +export const zGetAppsByAppIdStatisticsDailyMessagesPath = z.object({ + app_id: z.string(), +}) + +export const zGetAppsByAppIdStatisticsDailyMessagesQuery = z.object({ + end: z.string().nullish(), + start: z.string().nullish(), +}) + +/** + * Daily message statistics retrieved successfully + */ +export const zGetAppsByAppIdStatisticsDailyMessagesResponse = z.array( + z.record(z.string(), z.unknown()), +) + +export const zGetAppsByAppIdStatisticsTokenCostsPath = z.object({ + app_id: z.string(), +}) + +export const zGetAppsByAppIdStatisticsTokenCostsQuery = z.object({ + end: z.string().nullish(), + start: z.string().nullish(), +}) + +/** + * Daily token cost statistics retrieved successfully + */ +export const zGetAppsByAppIdStatisticsTokenCostsResponse = z.array( + z.record(z.string(), z.unknown()), +) + +export const zGetAppsByAppIdStatisticsTokensPerSecondPath = z.object({ + app_id: z.string(), +}) + +export const zGetAppsByAppIdStatisticsTokensPerSecondQuery = z.object({ + end: z.string().nullish(), + start: z.string().nullish(), +}) + +/** + * Tokens per second statistics retrieved successfully + */ +export const zGetAppsByAppIdStatisticsTokensPerSecondResponse = z.array( + z.record(z.string(), z.unknown()), +) + +export const zGetAppsByAppIdStatisticsUserSatisfactionRatePath = z.object({ + app_id: z.string(), +}) + +export const zGetAppsByAppIdStatisticsUserSatisfactionRateQuery = z.object({ + end: z.string().nullish(), + start: z.string().nullish(), +}) + +/** + * User satisfaction rate statistics retrieved successfully + */ +export const zGetAppsByAppIdStatisticsUserSatisfactionRateResponse = z.array( + z.record(z.string(), z.unknown()), +) + +export const zPostAppsByAppIdTextToAudioBody = zTextToSpeechPayload + +export const zPostAppsByAppIdTextToAudioPath = z.object({ + app_id: z.string(), +}) + +/** + * Text to speech conversion successful + */ +export const zPostAppsByAppIdTextToAudioResponse = z.record(z.string(), z.unknown()) + +export const zGetAppsByAppIdTextToAudioVoicesPath = z.object({ + app_id: z.string(), +}) + +export const zGetAppsByAppIdTextToAudioVoicesQuery = z.object({ + language: z.string(), +}) + +/** + * TTS voices retrieved successfully + */ +export const zGetAppsByAppIdTextToAudioVoicesResponse = z.array(z.record(z.string(), z.unknown())) + +export const zGetAppsByAppIdTracePath = z.object({ + app_id: z.string(), +}) + +/** + * Trace configuration retrieved successfully + */ +export const zGetAppsByAppIdTraceResponse = z.record(z.string(), z.unknown()) + +export const zPostAppsByAppIdTraceBody = zAppTracePayload + +export const zPostAppsByAppIdTracePath = z.object({ + app_id: z.string(), +}) + +/** + * Trace configuration updated successfully + */ +export const zPostAppsByAppIdTraceResponse = z.record(z.string(), z.unknown()) + +export const zDeleteAppsByAppIdTraceConfigBody = zTraceProviderQuery + +export const zDeleteAppsByAppIdTraceConfigPath = z.object({ + app_id: z.string(), +}) + +/** + * Tracing configuration deleted successfully + */ +export const zDeleteAppsByAppIdTraceConfigResponse = z.record(z.string(), z.unknown()) + +export const zGetAppsByAppIdTraceConfigPath = z.object({ + app_id: z.string(), +}) + +export const zGetAppsByAppIdTraceConfigQuery = z.object({ + tracing_provider: z.string(), +}) + +/** + * Tracing configuration data + */ +export const zGetAppsByAppIdTraceConfigResponse = z.record(z.string(), z.unknown()) + +export const zPatchAppsByAppIdTraceConfigBody = zTraceConfigPayload + +export const zPatchAppsByAppIdTraceConfigPath = z.object({ + app_id: z.string(), +}) + +/** + * Success response + */ +export const zPatchAppsByAppIdTraceConfigResponse = z.record(z.string(), z.unknown()) + +export const zPostAppsByAppIdTraceConfigBody = zTraceConfigPayload + +export const zPostAppsByAppIdTraceConfigPath = z.object({ + app_id: z.string(), +}) + +/** + * Created configuration data + */ +export const zPostAppsByAppIdTraceConfigResponse = z.record(z.string(), z.unknown()) + +export const zPostAppsByAppIdTriggerEnableBody = zParserEnable + +export const zPostAppsByAppIdTriggerEnablePath = z.object({ + app_id: z.string(), +}) + +/** + * Success + */ +export const zPostAppsByAppIdTriggerEnableResponse = zWorkflowTriggerResponse + +export const zGetAppsByAppIdTriggersPath = z.object({ + app_id: z.string(), +}) + +/** + * Success + */ +export const zGetAppsByAppIdTriggersResponse = zWorkflowTriggerListResponse + +export const zGetAppsByAppIdWorkflowAppLogsPath = z.object({ + app_id: z.string(), +}) + +export const zGetAppsByAppIdWorkflowAppLogsQuery = z.object({ + created_at__after: z.iso.datetime().nullish(), + created_at__before: z.iso.datetime().nullish(), + created_by_account: z.string().nullish(), + created_by_end_user_session_id: z.string().nullish(), + detail: z.boolean().optional().default(false), + keyword: z.string().nullish(), + limit: z.int().gte(1).lte(100).optional().default(20), + page: z.int().gte(1).lte(99999).optional().default(1), + status: z.string().nullish(), +}) + +/** + * Workflow app logs retrieved successfully + */ +export const zGetAppsByAppIdWorkflowAppLogsResponse = zWorkflowAppLogPaginationResponse + +export const zGetAppsByAppIdWorkflowArchivedLogsPath = z.object({ + app_id: z.string(), +}) + +export const zGetAppsByAppIdWorkflowArchivedLogsQuery = z.object({ + created_at__after: z.iso.datetime().nullish(), + created_at__before: z.iso.datetime().nullish(), + created_by_account: z.string().nullish(), + created_by_end_user_session_id: z.string().nullish(), + detail: z.boolean().optional().default(false), + keyword: z.string().nullish(), + limit: z.int().gte(1).lte(100).optional().default(20), + page: z.int().gte(1).lte(99999).optional().default(1), + status: z.string().nullish(), +}) + +/** + * Workflow archived logs retrieved successfully + */ +export const zGetAppsByAppIdWorkflowArchivedLogsResponse = zWorkflowArchivedLogPaginationResponse + +export const zGetAppsByAppIdWorkflowRunsPath = z.object({ + app_id: z.string(), +}) + +export const zGetAppsByAppIdWorkflowRunsQuery = z.object({ + triggered_from: z.enum(['debugging', 'app-run']).nullish(), + status: z.enum(['running', 'succeeded', 'failed', 'stopped', 'partial-succeeded']).nullish(), + last_id: z.string().nullish(), + limit: z.int().gte(1).lte(100).optional().default(20), +}) + +/** + * Workflow runs retrieved successfully + */ +export const zGetAppsByAppIdWorkflowRunsResponse = zWorkflowRunPagination + +export const zGetAppsByAppIdWorkflowRunsCountPath = z.object({ + app_id: z.string(), +}) + +export const zGetAppsByAppIdWorkflowRunsCountQuery = z.object({ + triggered_from: z.enum(['debugging', 'app-run']).nullish(), + time_range: z.string().nullish(), + status: z.enum(['running', 'succeeded', 'failed', 'stopped', 'partial-succeeded']).nullish(), +}) + +/** + * Workflow runs count retrieved successfully + */ +export const zGetAppsByAppIdWorkflowRunsCountResponse = zWorkflowRunCount + +export const zPostAppsByAppIdWorkflowRunsTasksByTaskIdStopPath = z.object({ + app_id: z.string(), + task_id: z.string(), +}) + +/** + * Task stopped successfully + */ +export const zPostAppsByAppIdWorkflowRunsTasksByTaskIdStopResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetAppsByAppIdWorkflowRunsByRunIdPath = z.object({ + app_id: z.string(), + run_id: z.string(), +}) + +/** + * Workflow run detail retrieved successfully + */ +export const zGetAppsByAppIdWorkflowRunsByRunIdResponse = zWorkflowRunDetail + +export const zGetAppsByAppIdWorkflowRunsByRunIdExportPath = z.object({ + app_id: z.string(), + run_id: z.string(), +}) + +/** + * Export URL generated + */ +export const zGetAppsByAppIdWorkflowRunsByRunIdExportResponse = zWorkflowRunExport + +export const zGetAppsByAppIdWorkflowRunsByRunIdNodeExecutionsPath = z.object({ + app_id: z.string(), + run_id: z.string(), +}) + +/** + * Node executions retrieved successfully + */ +export const zGetAppsByAppIdWorkflowRunsByRunIdNodeExecutionsResponse + = zWorkflowRunNodeExecutionList + +export const zGetAppsByAppIdWorkflowCommentsPath = z.object({ + app_id: z.string(), +}) + +/** + * Comments retrieved successfully + */ +export const zGetAppsByAppIdWorkflowCommentsResponse = zWorkflowCommentBasic + +export const zPostAppsByAppIdWorkflowCommentsBody = zWorkflowCommentCreatePayload + +export const zPostAppsByAppIdWorkflowCommentsPath = z.object({ + app_id: z.string(), +}) + +/** + * Comment created successfully + */ +export const zPostAppsByAppIdWorkflowCommentsResponse = zWorkflowCommentCreate + +export const zGetAppsByAppIdWorkflowCommentsMentionUsersPath = z.object({ + app_id: z.string(), +}) + +/** + * Mentionable users retrieved successfully + */ +export const zGetAppsByAppIdWorkflowCommentsMentionUsersResponse + = zWorkflowCommentMentionUsersPayload + +export const zDeleteAppsByAppIdWorkflowCommentsByCommentIdPath = z.object({ + app_id: z.string(), + comment_id: z.string(), +}) + +/** + * Comment deleted successfully + */ +export const zDeleteAppsByAppIdWorkflowCommentsByCommentIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetAppsByAppIdWorkflowCommentsByCommentIdPath = z.object({ + app_id: z.string(), + comment_id: z.string(), +}) + +/** + * Comment retrieved successfully + */ +export const zGetAppsByAppIdWorkflowCommentsByCommentIdResponse = zWorkflowCommentDetail + +export const zPutAppsByAppIdWorkflowCommentsByCommentIdBody = zWorkflowCommentUpdatePayload + +export const zPutAppsByAppIdWorkflowCommentsByCommentIdPath = z.object({ + app_id: z.string(), + comment_id: z.string(), +}) + +/** + * Comment updated successfully + */ +export const zPutAppsByAppIdWorkflowCommentsByCommentIdResponse = zWorkflowCommentUpdate + +export const zPostAppsByAppIdWorkflowCommentsByCommentIdRepliesBody = zWorkflowCommentReplyPayload + +export const zPostAppsByAppIdWorkflowCommentsByCommentIdRepliesPath = z.object({ + app_id: z.string(), + comment_id: z.string(), +}) + +/** + * Reply created successfully + */ +export const zPostAppsByAppIdWorkflowCommentsByCommentIdRepliesResponse + = zWorkflowCommentReplyCreate + +export const zDeleteAppsByAppIdWorkflowCommentsByCommentIdRepliesByReplyIdPath = z.object({ + app_id: z.string(), + comment_id: z.string(), + reply_id: z.string(), +}) + +/** + * Reply deleted successfully + */ +export const zDeleteAppsByAppIdWorkflowCommentsByCommentIdRepliesByReplyIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPutAppsByAppIdWorkflowCommentsByCommentIdRepliesByReplyIdBody + = zWorkflowCommentReplyPayload + +export const zPutAppsByAppIdWorkflowCommentsByCommentIdRepliesByReplyIdPath = z.object({ + app_id: z.string(), + comment_id: z.string(), + reply_id: z.string(), +}) + +/** + * Reply updated successfully + */ +export const zPutAppsByAppIdWorkflowCommentsByCommentIdRepliesByReplyIdResponse + = zWorkflowCommentReplyUpdate + +export const zPostAppsByAppIdWorkflowCommentsByCommentIdResolvePath = z.object({ + app_id: z.string(), + comment_id: z.string(), +}) + +/** + * Comment resolved successfully + */ +export const zPostAppsByAppIdWorkflowCommentsByCommentIdResolveResponse = zWorkflowCommentResolve + +export const zGetAppsByAppIdWorkflowStatisticsAverageAppInteractionsPath = z.object({ + app_id: z.string(), +}) + +export const zGetAppsByAppIdWorkflowStatisticsAverageAppInteractionsQuery = z.object({ + end: z.string().nullish(), + start: z.string().nullish(), +}) + +/** + * Average app interaction statistics retrieved successfully + */ +export const zGetAppsByAppIdWorkflowStatisticsAverageAppInteractionsResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetAppsByAppIdWorkflowStatisticsDailyConversationsPath = z.object({ + app_id: z.string(), +}) + +export const zGetAppsByAppIdWorkflowStatisticsDailyConversationsQuery = z.object({ + end: z.string().nullish(), + start: z.string().nullish(), +}) + +/** + * Daily runs statistics retrieved successfully + */ +export const zGetAppsByAppIdWorkflowStatisticsDailyConversationsResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetAppsByAppIdWorkflowStatisticsDailyTerminalsPath = z.object({ + app_id: z.string(), +}) + +export const zGetAppsByAppIdWorkflowStatisticsDailyTerminalsQuery = z.object({ + end: z.string().nullish(), + start: z.string().nullish(), +}) + +/** + * Daily terminals statistics retrieved successfully + */ +export const zGetAppsByAppIdWorkflowStatisticsDailyTerminalsResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetAppsByAppIdWorkflowStatisticsTokenCostsPath = z.object({ + app_id: z.string(), +}) + +export const zGetAppsByAppIdWorkflowStatisticsTokenCostsQuery = z.object({ + end: z.string().nullish(), + start: z.string().nullish(), +}) + +/** + * Daily token cost statistics retrieved successfully + */ +export const zGetAppsByAppIdWorkflowStatisticsTokenCostsResponse = z.record(z.string(), z.unknown()) + +export const zGetAppsByAppIdWorkflowsPath = z.object({ + app_id: z.string(), +}) + +export const zGetAppsByAppIdWorkflowsQuery = z.object({ + limit: z.int().gte(1).lte(100).optional().default(10), + named_only: z.boolean().optional().default(false), + page: z.int().gte(1).lte(99999).optional().default(1), + user_id: z.string().nullish(), +}) + +/** + * Published workflows retrieved successfully + */ +export const zGetAppsByAppIdWorkflowsResponse = zWorkflowPagination + +export const zGetAppsByAppIdWorkflowsDefaultWorkflowBlockConfigsPath = z.object({ + app_id: z.string(), +}) + +/** + * Default block configurations retrieved successfully + */ +export const zGetAppsByAppIdWorkflowsDefaultWorkflowBlockConfigsResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetAppsByAppIdWorkflowsDefaultWorkflowBlockConfigsByBlockTypePath = z.object({ + app_id: z.string(), + block_type: z.string(), +}) + +export const zGetAppsByAppIdWorkflowsDefaultWorkflowBlockConfigsByBlockTypeQuery = z.object({ + q: z.string().nullish(), +}) + +/** + * Default block configuration retrieved successfully + */ +export const zGetAppsByAppIdWorkflowsDefaultWorkflowBlockConfigsByBlockTypeResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetAppsByAppIdWorkflowsDraftPath = z.object({ + app_id: z.string(), +}) + +/** + * Draft workflow retrieved successfully + */ +export const zGetAppsByAppIdWorkflowsDraftResponse = zWorkflow + +export const zPostAppsByAppIdWorkflowsDraftBody = zSyncDraftWorkflowPayload + +export const zPostAppsByAppIdWorkflowsDraftPath = z.object({ + app_id: z.string(), +}) + +/** + * Draft workflow synced successfully + */ +export const zPostAppsByAppIdWorkflowsDraftResponse = zSyncDraftWorkflowResponse + +export const zGetAppsByAppIdWorkflowsDraftConversationVariablesPath = z.object({ + app_id: z.string(), +}) + +/** + * Conversation variables retrieved successfully + */ +export const zGetAppsByAppIdWorkflowsDraftConversationVariablesResponse = zWorkflowDraftVariableList + +export const zPostAppsByAppIdWorkflowsDraftConversationVariablesBody + = zConversationVariableUpdatePayload + +export const zPostAppsByAppIdWorkflowsDraftConversationVariablesPath = z.object({ + app_id: z.string(), +}) + +/** + * Conversation variables updated successfully + */ +export const zPostAppsByAppIdWorkflowsDraftConversationVariablesResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetAppsByAppIdWorkflowsDraftEnvironmentVariablesPath = z.object({ + app_id: z.string(), +}) + +/** + * Environment variables retrieved successfully + */ +export const zGetAppsByAppIdWorkflowsDraftEnvironmentVariablesResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostAppsByAppIdWorkflowsDraftEnvironmentVariablesBody + = zEnvironmentVariableUpdatePayload + +export const zPostAppsByAppIdWorkflowsDraftEnvironmentVariablesPath = z.object({ + app_id: z.string(), +}) + +/** + * Environment variables updated successfully + */ +export const zPostAppsByAppIdWorkflowsDraftEnvironmentVariablesResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostAppsByAppIdWorkflowsDraftFeaturesBody = zWorkflowFeaturesPayload + +export const zPostAppsByAppIdWorkflowsDraftFeaturesPath = z.object({ + app_id: z.string(), +}) + +/** + * Workflow features updated successfully + */ +export const zPostAppsByAppIdWorkflowsDraftFeaturesResponse = z.record(z.string(), z.unknown()) + +export const zPostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdDeliveryTestBody + = zHumanInputDeliveryTestPayload + +export const zPostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdDeliveryTestPath = z.object({ + app_id: z.string(), + node_id: z.string(), +}) + +/** + * Success + */ +export const zPostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdDeliveryTestResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdFormPreviewBody + = zHumanInputFormPreviewPayload + +export const zPostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdFormPreviewPath = z.object({ + app_id: z.string(), + node_id: z.string(), +}) + +/** + * Success + */ +export const zPostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdFormPreviewResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdFormRunBody + = zHumanInputFormSubmitPayload + +export const zPostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdFormRunPath = z.object({ + app_id: z.string(), + node_id: z.string(), +}) + +/** + * Success + */ +export const zPostAppsByAppIdWorkflowsDraftHumanInputNodesByNodeIdFormRunResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostAppsByAppIdWorkflowsDraftIterationNodesByNodeIdRunBody = zIterationNodeRunPayload + +export const zPostAppsByAppIdWorkflowsDraftIterationNodesByNodeIdRunPath = z.object({ + app_id: z.string(), + node_id: z.string(), +}) + +/** + * Workflow iteration node run started successfully + */ +export const zPostAppsByAppIdWorkflowsDraftIterationNodesByNodeIdRunResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostAppsByAppIdWorkflowsDraftLoopNodesByNodeIdRunBody = zLoopNodeRunPayload + +export const zPostAppsByAppIdWorkflowsDraftLoopNodesByNodeIdRunPath = z.object({ + app_id: z.string(), + node_id: z.string(), +}) + +/** + * Workflow loop node run started successfully + */ +export const zPostAppsByAppIdWorkflowsDraftLoopNodesByNodeIdRunResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetAppsByAppIdWorkflowsDraftNodesByNodeIdLastRunPath = z.object({ + app_id: z.string(), + node_id: z.string(), +}) + +/** + * Node last run retrieved successfully + */ +export const zGetAppsByAppIdWorkflowsDraftNodesByNodeIdLastRunResponse = zWorkflowRunNodeExecution + +export const zPostAppsByAppIdWorkflowsDraftNodesByNodeIdRunBody = zDraftWorkflowNodeRunPayload + +export const zPostAppsByAppIdWorkflowsDraftNodesByNodeIdRunPath = z.object({ + app_id: z.string(), + node_id: z.string(), +}) + +/** + * Node run started successfully + */ +export const zPostAppsByAppIdWorkflowsDraftNodesByNodeIdRunResponse = zWorkflowRunNodeExecution + +export const zPostAppsByAppIdWorkflowsDraftNodesByNodeIdTriggerRunPath = z.object({ + app_id: z.string(), + node_id: z.string(), +}) + +/** + * Trigger event received and node executed successfully + */ +export const zPostAppsByAppIdWorkflowsDraftNodesByNodeIdTriggerRunResponse = z.record( + z.string(), + z.unknown(), +) + +export const zDeleteAppsByAppIdWorkflowsDraftNodesByNodeIdVariablesPath = z.object({ + node_id: z.string(), + app_id: z.string(), +}) + +/** + * Node variables deleted successfully + */ +export const zDeleteAppsByAppIdWorkflowsDraftNodesByNodeIdVariablesResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetAppsByAppIdWorkflowsDraftNodesByNodeIdVariablesPath = z.object({ + app_id: z.string(), + node_id: z.string(), +}) + +/** + * Node variables retrieved successfully + */ +export const zGetAppsByAppIdWorkflowsDraftNodesByNodeIdVariablesResponse + = zWorkflowDraftVariableList + +export const zPostAppsByAppIdWorkflowsDraftRunBody = zDraftWorkflowRunPayload + +export const zPostAppsByAppIdWorkflowsDraftRunPath = z.object({ + app_id: z.string(), +}) + +/** + * Draft workflow run started successfully + */ +export const zPostAppsByAppIdWorkflowsDraftRunResponse = z.record(z.string(), z.unknown()) + +export const zGetAppsByAppIdWorkflowsDraftSystemVariablesPath = z.object({ + app_id: z.string(), +}) + +/** + * System variables retrieved successfully + */ +export const zGetAppsByAppIdWorkflowsDraftSystemVariablesResponse = zWorkflowDraftVariableList + +export const zPostAppsByAppIdWorkflowsDraftTriggerRunBody = zDraftWorkflowTriggerRunRequest + +export const zPostAppsByAppIdWorkflowsDraftTriggerRunPath = z.object({ + app_id: z.string(), +}) + +/** + * Trigger event received and workflow executed successfully + */ +export const zPostAppsByAppIdWorkflowsDraftTriggerRunResponse = z.record(z.string(), z.unknown()) + +export const zPostAppsByAppIdWorkflowsDraftTriggerRunAllBody = zDraftWorkflowTriggerRunAllPayload + +export const zPostAppsByAppIdWorkflowsDraftTriggerRunAllPath = z.object({ + app_id: z.string(), +}) + +/** + * Workflow executed successfully + */ +export const zPostAppsByAppIdWorkflowsDraftTriggerRunAllResponse = z.record(z.string(), z.unknown()) + +export const zDeleteAppsByAppIdWorkflowsDraftVariablesPath = z.object({ + app_id: z.string(), +}) + +/** + * Workflow variables deleted successfully + */ +export const zDeleteAppsByAppIdWorkflowsDraftVariablesResponse = z.record(z.string(), z.unknown()) + +export const zGetAppsByAppIdWorkflowsDraftVariablesPath = z.object({ + app_id: z.string(), +}) + +export const zGetAppsByAppIdWorkflowsDraftVariablesQuery = z.object({ + page: z.int().gte(1).lte(100000).optional().default(1), + limit: z.int().gte(1).lte(100).optional().default(20), +}) + +/** + * Workflow variables retrieved successfully + */ +export const zGetAppsByAppIdWorkflowsDraftVariablesResponse = zWorkflowDraftVariableListWithoutValue + +export const zDeleteAppsByAppIdWorkflowsDraftVariablesByVariableIdPath = z.object({ + variable_id: z.string(), + app_id: z.string(), +}) + +/** + * Variable deleted successfully + */ +export const zDeleteAppsByAppIdWorkflowsDraftVariablesByVariableIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetAppsByAppIdWorkflowsDraftVariablesByVariableIdPath = z.object({ + app_id: z.string(), + variable_id: z.string(), +}) + +/** + * Variable retrieved successfully + */ +export const zGetAppsByAppIdWorkflowsDraftVariablesByVariableIdResponse = zWorkflowDraftVariable + +export const zPatchAppsByAppIdWorkflowsDraftVariablesByVariableIdBody + = zWorkflowDraftVariableUpdatePayload + +export const zPatchAppsByAppIdWorkflowsDraftVariablesByVariableIdPath = z.object({ + variable_id: z.string(), + app_id: z.string(), +}) + +/** + * Variable updated successfully + */ +export const zPatchAppsByAppIdWorkflowsDraftVariablesByVariableIdResponse = zWorkflowDraftVariable + +export const zPutAppsByAppIdWorkflowsDraftVariablesByVariableIdResetPath = z.object({ + app_id: z.string(), + variable_id: z.string(), +}) + +export const zPutAppsByAppIdWorkflowsDraftVariablesByVariableIdResetResponse = z.union([ + zWorkflowDraftVariable, + z.record(z.string(), z.unknown()), +]) + +export const zGetAppsByAppIdWorkflowsPublishPath = z.object({ + app_id: z.string(), +}) + +/** + * Published workflow retrieved successfully + */ +export const zGetAppsByAppIdWorkflowsPublishResponse = zWorkflow + +export const zPostAppsByAppIdWorkflowsPublishBody = zPublishWorkflowPayload + +export const zPostAppsByAppIdWorkflowsPublishPath = z.object({ + app_id: z.string(), +}) + +/** + * Success + */ +export const zPostAppsByAppIdWorkflowsPublishResponse = z.record(z.string(), z.unknown()) + +export const zGetAppsByAppIdWorkflowsTriggersWebhookPath = z.object({ + app_id: z.string(), +}) + +export const zGetAppsByAppIdWorkflowsTriggersWebhookQuery = z.object({ + credential_id: z.string().nullish(), + datasource_type: z.string(), + inputs: z.string(), +}) + +/** + * Success + */ +export const zGetAppsByAppIdWorkflowsTriggersWebhookResponse = zWebhookTriggerResponse + +export const zDeleteAppsByAppIdWorkflowsByWorkflowIdPath = z.object({ + workflow_id: z.string(), + app_id: z.string(), +}) + +/** + * Success + */ +export const zDeleteAppsByAppIdWorkflowsByWorkflowIdResponse = z.record(z.string(), z.unknown()) + +export const zPatchAppsByAppIdWorkflowsByWorkflowIdBody = zWorkflowUpdatePayload + +export const zPatchAppsByAppIdWorkflowsByWorkflowIdPath = z.object({ + app_id: z.string(), + workflow_id: z.string(), +}) + +/** + * Workflow updated successfully + */ +export const zPatchAppsByAppIdWorkflowsByWorkflowIdResponse = zWorkflow + +export const zPostAppsByAppIdWorkflowsByWorkflowIdRestorePath = z.object({ + app_id: z.string(), + workflow_id: z.string(), +}) + +/** + * Workflow restored successfully + */ +export const zPostAppsByAppIdWorkflowsByWorkflowIdRestoreResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetAppsByResourceIdApiKeysPath = z.object({ + resource_id: z.string(), +}) + +/** + * API keys retrieved successfully + */ +export const zGetAppsByResourceIdApiKeysResponse = zApiKeyList + +export const zPostAppsByResourceIdApiKeysPath = z.object({ + resource_id: z.string(), +}) + +/** + * API key created successfully + */ +export const zPostAppsByResourceIdApiKeysResponse = zApiKeyItem + +export const zDeleteAppsByResourceIdApiKeysByApiKeyIdPath = z.object({ + resource_id: z.string(), + api_key_id: z.string(), +}) + +/** + * API key deleted successfully + */ +export const zDeleteAppsByResourceIdApiKeysByApiKeyIdResponse = z.record(z.string(), z.unknown()) + +export const zGetAppsByServerIdServerRefreshPath = z.object({ + server_id: z.string(), +}) + +/** + * MCP server refreshed successfully + */ +export const zGetAppsByServerIdServerRefreshResponse = zAppMcpServerResponse diff --git a/packages/contracts/generated/api/console/auth/orpc.gen.ts b/packages/contracts/generated/api/console/auth/orpc.gen.ts new file mode 100644 index 0000000000..7a95a96f10 --- /dev/null +++ b/packages/contracts/generated/api/console/auth/orpc.gen.ts @@ -0,0 +1,226 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { + zDeleteAuthPluginDatasourceByProviderIdCustomClientPath, + zDeleteAuthPluginDatasourceByProviderIdCustomClientResponse, + zGetAuthPluginDatasourceByProviderIdPath, + zGetAuthPluginDatasourceByProviderIdResponse, + zGetAuthPluginDatasourceDefaultListResponse, + zGetAuthPluginDatasourceListResponse, + zPostAuthPluginDatasourceByProviderIdBody, + zPostAuthPluginDatasourceByProviderIdCustomClientBody, + zPostAuthPluginDatasourceByProviderIdCustomClientPath, + zPostAuthPluginDatasourceByProviderIdCustomClientResponse, + zPostAuthPluginDatasourceByProviderIdDefaultBody, + zPostAuthPluginDatasourceByProviderIdDefaultPath, + zPostAuthPluginDatasourceByProviderIdDefaultResponse, + zPostAuthPluginDatasourceByProviderIdDeleteBody, + zPostAuthPluginDatasourceByProviderIdDeletePath, + zPostAuthPluginDatasourceByProviderIdDeleteResponse, + zPostAuthPluginDatasourceByProviderIdPath, + zPostAuthPluginDatasourceByProviderIdResponse, + zPostAuthPluginDatasourceByProviderIdUpdateBody, + zPostAuthPluginDatasourceByProviderIdUpdateNameBody, + zPostAuthPluginDatasourceByProviderIdUpdateNamePath, + zPostAuthPluginDatasourceByProviderIdUpdateNameResponse, + zPostAuthPluginDatasourceByProviderIdUpdatePath, + zPostAuthPluginDatasourceByProviderIdUpdateResponse, +} from './zod.gen' + +export const get = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAuthPluginDatasourceDefaultList', + path: '/auth/plugin/datasource/default-list', + tags: ['console'], + }) + .output(zGetAuthPluginDatasourceDefaultListResponse) + +export const defaultList = { + get, +} + +export const get2 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAuthPluginDatasourceList', + path: '/auth/plugin/datasource/list', + tags: ['console'], + }) + .output(zGetAuthPluginDatasourceListResponse) + +export const list = { + get: get2, +} + +export const delete_ = oc + .route({ + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteAuthPluginDatasourceByProviderIdCustomClient', + path: '/auth/plugin/datasource/{provider_id}/custom-client', + tags: ['console'], + }) + .input(z.object({ params: zDeleteAuthPluginDatasourceByProviderIdCustomClientPath })) + .output(zDeleteAuthPluginDatasourceByProviderIdCustomClientResponse) + +export const post = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAuthPluginDatasourceByProviderIdCustomClient', + path: '/auth/plugin/datasource/{provider_id}/custom-client', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAuthPluginDatasourceByProviderIdCustomClientBody, + params: zPostAuthPluginDatasourceByProviderIdCustomClientPath, + }), + ) + .output(zPostAuthPluginDatasourceByProviderIdCustomClientResponse) + +export const customClient = { + delete: delete_, + post, +} + +export const post2 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAuthPluginDatasourceByProviderIdDefault', + path: '/auth/plugin/datasource/{provider_id}/default', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAuthPluginDatasourceByProviderIdDefaultBody, + params: zPostAuthPluginDatasourceByProviderIdDefaultPath, + }), + ) + .output(zPostAuthPluginDatasourceByProviderIdDefaultResponse) + +export const default_ = { + post: post2, +} + +export const post3 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAuthPluginDatasourceByProviderIdDelete', + path: '/auth/plugin/datasource/{provider_id}/delete', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAuthPluginDatasourceByProviderIdDeleteBody, + params: zPostAuthPluginDatasourceByProviderIdDeletePath, + }), + ) + .output(zPostAuthPluginDatasourceByProviderIdDeleteResponse) + +export const delete2 = { + post: post3, +} + +export const post4 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAuthPluginDatasourceByProviderIdUpdate', + path: '/auth/plugin/datasource/{provider_id}/update', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAuthPluginDatasourceByProviderIdUpdateBody, + params: zPostAuthPluginDatasourceByProviderIdUpdatePath, + }), + ) + .output(zPostAuthPluginDatasourceByProviderIdUpdateResponse) + +export const update = { + post: post4, +} + +export const post5 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAuthPluginDatasourceByProviderIdUpdateName', + path: '/auth/plugin/datasource/{provider_id}/update-name', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAuthPluginDatasourceByProviderIdUpdateNameBody, + params: zPostAuthPluginDatasourceByProviderIdUpdateNamePath, + }), + ) + .output(zPostAuthPluginDatasourceByProviderIdUpdateNameResponse) + +export const updateName = { + post: post5, +} + +export const get3 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAuthPluginDatasourceByProviderId', + path: '/auth/plugin/datasource/{provider_id}', + tags: ['console'], + }) + .input(z.object({ params: zGetAuthPluginDatasourceByProviderIdPath })) + .output(zGetAuthPluginDatasourceByProviderIdResponse) + +export const post6 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAuthPluginDatasourceByProviderId', + path: '/auth/plugin/datasource/{provider_id}', + tags: ['console'], + }) + .input( + z.object({ + body: zPostAuthPluginDatasourceByProviderIdBody, + params: zPostAuthPluginDatasourceByProviderIdPath, + }), + ) + .output(zPostAuthPluginDatasourceByProviderIdResponse) + +export const byProviderId = { + get: get3, + post: post6, + customClient, + default: default_, + delete: delete2, + update, + updateName, +} + +export const datasource = { + defaultList, + list, + byProviderId, +} + +export const plugin = { + datasource, +} + +export const auth = { + plugin, +} + +export const contract = { + auth, +} diff --git a/packages/contracts/generated/api/console/auth/types.gen.ts b/packages/contracts/generated/api/console/auth/types.gen.ts new file mode 100644 index 0000000000..1a974f626b --- /dev/null +++ b/packages/contracts/generated/api/console/auth/types.gen.ts @@ -0,0 +1,216 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type DatasourceCredentialPayload = { + credentials: { + [key: string]: unknown + } + name?: string | null +} + +export type DatasourceCustomClientPayload = { + client_params?: { + [key: string]: unknown + } | null + enable_oauth_custom_client?: boolean | null +} + +export type DatasourceDefaultPayload = { + id: string +} + +export type DatasourceCredentialDeletePayload = { + credential_id: string +} + +export type DatasourceCredentialUpdatePayload = { + credential_id: string + credentials?: { + [key: string]: unknown + } | null + name?: string | null +} + +export type DatasourceUpdateNamePayload = { + credential_id: string + name: string +} + +export type GetAuthPluginDatasourceDefaultListData = { + body?: never + path?: never + query?: never + url: '/auth/plugin/datasource/default-list' +} + +export type GetAuthPluginDatasourceDefaultListResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetAuthPluginDatasourceDefaultListResponse + = GetAuthPluginDatasourceDefaultListResponses[keyof GetAuthPluginDatasourceDefaultListResponses] + +export type GetAuthPluginDatasourceListData = { + body?: never + path?: never + query?: never + url: '/auth/plugin/datasource/list' +} + +export type GetAuthPluginDatasourceListResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetAuthPluginDatasourceListResponse + = GetAuthPluginDatasourceListResponses[keyof GetAuthPluginDatasourceListResponses] + +export type GetAuthPluginDatasourceByProviderIdData = { + body?: never + path: { + provider_id: string + } + query?: never + url: '/auth/plugin/datasource/{provider_id}' +} + +export type GetAuthPluginDatasourceByProviderIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetAuthPluginDatasourceByProviderIdResponse + = GetAuthPluginDatasourceByProviderIdResponses[keyof GetAuthPluginDatasourceByProviderIdResponses] + +export type PostAuthPluginDatasourceByProviderIdData = { + body: DatasourceCredentialPayload + path: { + provider_id: string + } + query?: never + url: '/auth/plugin/datasource/{provider_id}' +} + +export type PostAuthPluginDatasourceByProviderIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAuthPluginDatasourceByProviderIdResponse + = PostAuthPluginDatasourceByProviderIdResponses[keyof PostAuthPluginDatasourceByProviderIdResponses] + +export type DeleteAuthPluginDatasourceByProviderIdCustomClientData = { + body?: never + path: { + provider_id: string + } + query?: never + url: '/auth/plugin/datasource/{provider_id}/custom-client' +} + +export type DeleteAuthPluginDatasourceByProviderIdCustomClientResponses = { + 200: { + [key: string]: unknown + } +} + +export type DeleteAuthPluginDatasourceByProviderIdCustomClientResponse + = DeleteAuthPluginDatasourceByProviderIdCustomClientResponses[keyof DeleteAuthPluginDatasourceByProviderIdCustomClientResponses] + +export type PostAuthPluginDatasourceByProviderIdCustomClientData = { + body: DatasourceCustomClientPayload + path: { + provider_id: string + } + query?: never + url: '/auth/plugin/datasource/{provider_id}/custom-client' +} + +export type PostAuthPluginDatasourceByProviderIdCustomClientResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAuthPluginDatasourceByProviderIdCustomClientResponse + = PostAuthPluginDatasourceByProviderIdCustomClientResponses[keyof PostAuthPluginDatasourceByProviderIdCustomClientResponses] + +export type PostAuthPluginDatasourceByProviderIdDefaultData = { + body: DatasourceDefaultPayload + path: { + provider_id: string + } + query?: never + url: '/auth/plugin/datasource/{provider_id}/default' +} + +export type PostAuthPluginDatasourceByProviderIdDefaultResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAuthPluginDatasourceByProviderIdDefaultResponse + = PostAuthPluginDatasourceByProviderIdDefaultResponses[keyof PostAuthPluginDatasourceByProviderIdDefaultResponses] + +export type PostAuthPluginDatasourceByProviderIdDeleteData = { + body: DatasourceCredentialDeletePayload + path: { + provider_id: string + } + query?: never + url: '/auth/plugin/datasource/{provider_id}/delete' +} + +export type PostAuthPluginDatasourceByProviderIdDeleteResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAuthPluginDatasourceByProviderIdDeleteResponse + = PostAuthPluginDatasourceByProviderIdDeleteResponses[keyof PostAuthPluginDatasourceByProviderIdDeleteResponses] + +export type PostAuthPluginDatasourceByProviderIdUpdateData = { + body: DatasourceCredentialUpdatePayload + path: { + provider_id: string + } + query?: never + url: '/auth/plugin/datasource/{provider_id}/update' +} + +export type PostAuthPluginDatasourceByProviderIdUpdateResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAuthPluginDatasourceByProviderIdUpdateResponse + = PostAuthPluginDatasourceByProviderIdUpdateResponses[keyof PostAuthPluginDatasourceByProviderIdUpdateResponses] + +export type PostAuthPluginDatasourceByProviderIdUpdateNameData = { + body: DatasourceUpdateNamePayload + path: { + provider_id: string + } + query?: never + url: '/auth/plugin/datasource/{provider_id}/update-name' +} + +export type PostAuthPluginDatasourceByProviderIdUpdateNameResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAuthPluginDatasourceByProviderIdUpdateNameResponse + = PostAuthPluginDatasourceByProviderIdUpdateNameResponses[keyof PostAuthPluginDatasourceByProviderIdUpdateNameResponses] diff --git a/packages/contracts/generated/api/console/auth/zod.gen.ts b/packages/contracts/generated/api/console/auth/zod.gen.ts new file mode 100644 index 0000000000..3d183e09cd --- /dev/null +++ b/packages/contracts/generated/api/console/auth/zod.gen.ts @@ -0,0 +1,156 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * DatasourceCredentialPayload + */ +export const zDatasourceCredentialPayload = z.object({ + credentials: z.record(z.string(), z.unknown()), + name: z.string().max(100).nullish(), +}) + +/** + * DatasourceCustomClientPayload + */ +export const zDatasourceCustomClientPayload = z.object({ + client_params: z.record(z.string(), z.unknown()).nullish(), + enable_oauth_custom_client: z.boolean().nullish(), +}) + +/** + * DatasourceDefaultPayload + */ +export const zDatasourceDefaultPayload = z.object({ + id: z.string(), +}) + +/** + * DatasourceCredentialDeletePayload + */ +export const zDatasourceCredentialDeletePayload = z.object({ + credential_id: z.string(), +}) + +/** + * DatasourceCredentialUpdatePayload + */ +export const zDatasourceCredentialUpdatePayload = z.object({ + credential_id: z.string(), + credentials: z.record(z.string(), z.unknown()).nullish(), + name: z.string().max(100).nullish(), +}) + +/** + * DatasourceUpdateNamePayload + */ +export const zDatasourceUpdateNamePayload = z.object({ + credential_id: z.string(), + name: z.string().max(100), +}) + +/** + * Success + */ +export const zGetAuthPluginDatasourceDefaultListResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zGetAuthPluginDatasourceListResponse = z.record(z.string(), z.unknown()) + +export const zGetAuthPluginDatasourceByProviderIdPath = z.object({ + provider_id: z.string(), +}) + +/** + * Success + */ +export const zGetAuthPluginDatasourceByProviderIdResponse = z.record(z.string(), z.unknown()) + +export const zPostAuthPluginDatasourceByProviderIdBody = zDatasourceCredentialPayload + +export const zPostAuthPluginDatasourceByProviderIdPath = z.object({ + provider_id: z.string(), +}) + +/** + * Success + */ +export const zPostAuthPluginDatasourceByProviderIdResponse = z.record(z.string(), z.unknown()) + +export const zDeleteAuthPluginDatasourceByProviderIdCustomClientPath = z.object({ + provider_id: z.string(), +}) + +/** + * Success + */ +export const zDeleteAuthPluginDatasourceByProviderIdCustomClientResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostAuthPluginDatasourceByProviderIdCustomClientBody = zDatasourceCustomClientPayload + +export const zPostAuthPluginDatasourceByProviderIdCustomClientPath = z.object({ + provider_id: z.string(), +}) + +/** + * Success + */ +export const zPostAuthPluginDatasourceByProviderIdCustomClientResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostAuthPluginDatasourceByProviderIdDefaultBody = zDatasourceDefaultPayload + +export const zPostAuthPluginDatasourceByProviderIdDefaultPath = z.object({ + provider_id: z.string(), +}) + +/** + * Success + */ +export const zPostAuthPluginDatasourceByProviderIdDefaultResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostAuthPluginDatasourceByProviderIdDeleteBody = zDatasourceCredentialDeletePayload + +export const zPostAuthPluginDatasourceByProviderIdDeletePath = z.object({ + provider_id: z.string(), +}) + +/** + * Success + */ +export const zPostAuthPluginDatasourceByProviderIdDeleteResponse = z.record(z.string(), z.unknown()) + +export const zPostAuthPluginDatasourceByProviderIdUpdateBody = zDatasourceCredentialUpdatePayload + +export const zPostAuthPluginDatasourceByProviderIdUpdatePath = z.object({ + provider_id: z.string(), +}) + +/** + * Success + */ +export const zPostAuthPluginDatasourceByProviderIdUpdateResponse = z.record(z.string(), z.unknown()) + +export const zPostAuthPluginDatasourceByProviderIdUpdateNameBody = zDatasourceUpdateNamePayload + +export const zPostAuthPluginDatasourceByProviderIdUpdateNamePath = z.object({ + provider_id: z.string(), +}) + +/** + * Success + */ +export const zPostAuthPluginDatasourceByProviderIdUpdateNameResponse = z.record( + z.string(), + z.unknown(), +) diff --git a/packages/contracts/generated/api/console/billing/orpc.gen.ts b/packages/contracts/generated/api/console/billing/orpc.gen.ts new file mode 100644 index 0000000000..09d25c072e --- /dev/null +++ b/packages/contracts/generated/api/console/billing/orpc.gen.ts @@ -0,0 +1,82 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { + zGetBillingInvoicesResponse, + zGetBillingSubscriptionResponse, + zPutBillingPartnersByPartnerKeyTenantsBody, + zPutBillingPartnersByPartnerKeyTenantsPath, + zPutBillingPartnersByPartnerKeyTenantsResponse, +} from './zod.gen' + +export const get = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getBillingInvoices', + path: '/billing/invoices', + tags: ['console'], + }) + .output(zGetBillingInvoicesResponse) + +export const invoices = { + get, +} + +/** + * Sync partner tenants bindings + */ +export const put = oc + .route({ + description: 'Sync partner tenants bindings', + inputStructure: 'detailed', + method: 'PUT', + operationId: 'putBillingPartnersByPartnerKeyTenants', + path: '/billing/partners/{partner_key}/tenants', + tags: ['console'], + }) + .input( + z.object({ + body: zPutBillingPartnersByPartnerKeyTenantsBody, + params: zPutBillingPartnersByPartnerKeyTenantsPath, + }), + ) + .output(zPutBillingPartnersByPartnerKeyTenantsResponse) + +export const tenants = { + put, +} + +export const byPartnerKey = { + tenants, +} + +export const partners = { + byPartnerKey, +} + +export const get2 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getBillingSubscription', + path: '/billing/subscription', + tags: ['console'], + }) + .output(zGetBillingSubscriptionResponse) + +export const subscription = { + get: get2, +} + +export const billing = { + invoices, + partners, + subscription, +} + +export const contract = { + billing, +} diff --git a/packages/contracts/generated/api/console/billing/types.gen.ts b/packages/contracts/generated/api/console/billing/types.gen.ts new file mode 100644 index 0000000000..7a9880c03e --- /dev/null +++ b/packages/contracts/generated/api/console/billing/types.gen.ts @@ -0,0 +1,68 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type PartnerTenantsPayload = { + click_id: string +} + +export type GetBillingInvoicesData = { + body?: never + path?: never + query?: never + url: '/billing/invoices' +} + +export type GetBillingInvoicesResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetBillingInvoicesResponse + = GetBillingInvoicesResponses[keyof GetBillingInvoicesResponses] + +export type PutBillingPartnersByPartnerKeyTenantsData = { + body: PartnerTenantsPayload + path: { + partner_key: string + } + query?: never + url: '/billing/partners/{partner_key}/tenants' +} + +export type PutBillingPartnersByPartnerKeyTenantsErrors = { + 400: { + [key: string]: unknown + } +} + +export type PutBillingPartnersByPartnerKeyTenantsError + = PutBillingPartnersByPartnerKeyTenantsErrors[keyof PutBillingPartnersByPartnerKeyTenantsErrors] + +export type PutBillingPartnersByPartnerKeyTenantsResponses = { + 200: { + [key: string]: unknown + } +} + +export type PutBillingPartnersByPartnerKeyTenantsResponse + = PutBillingPartnersByPartnerKeyTenantsResponses[keyof PutBillingPartnersByPartnerKeyTenantsResponses] + +export type GetBillingSubscriptionData = { + body?: never + path?: never + query?: never + url: '/billing/subscription' +} + +export type GetBillingSubscriptionResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetBillingSubscriptionResponse + = GetBillingSubscriptionResponses[keyof GetBillingSubscriptionResponses] diff --git a/packages/contracts/generated/api/console/billing/zod.gen.ts b/packages/contracts/generated/api/console/billing/zod.gen.ts new file mode 100644 index 0000000000..7b5412c7f4 --- /dev/null +++ b/packages/contracts/generated/api/console/billing/zod.gen.ts @@ -0,0 +1,31 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * PartnerTenantsPayload + */ +export const zPartnerTenantsPayload = z.object({ + click_id: z.string(), +}) + +/** + * Success + */ +export const zGetBillingInvoicesResponse = z.record(z.string(), z.unknown()) + +export const zPutBillingPartnersByPartnerKeyTenantsBody = zPartnerTenantsPayload + +export const zPutBillingPartnersByPartnerKeyTenantsPath = z.object({ + partner_key: z.string(), +}) + +/** + * Tenants synced to partner successfully + */ +export const zPutBillingPartnersByPartnerKeyTenantsResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zGetBillingSubscriptionResponse = z.record(z.string(), z.unknown()) diff --git a/packages/contracts/generated/api/console/code-based-extension/orpc.gen.ts b/packages/contracts/generated/api/console/code-based-extension/orpc.gen.ts new file mode 100644 index 0000000000..b3baddafd4 --- /dev/null +++ b/packages/contracts/generated/api/console/code-based-extension/orpc.gen.ts @@ -0,0 +1,29 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { zGetCodeBasedExtensionQuery, zGetCodeBasedExtensionResponse } from './zod.gen' + +/** + * Get code-based extension data by module name + */ +export const get = oc + .route({ + description: 'Get code-based extension data by module name', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getCodeBasedExtension', + path: '/code-based-extension', + tags: ['console'], + }) + .input(z.object({ query: zGetCodeBasedExtensionQuery.optional() })) + .output(zGetCodeBasedExtensionResponse) + +export const codeBasedExtension = { + get, +} + +export const contract = { + codeBasedExtension, +} diff --git a/packages/contracts/generated/api/console/code-based-extension/types.gen.ts b/packages/contracts/generated/api/console/code-based-extension/types.gen.ts new file mode 100644 index 0000000000..85d224f8d1 --- /dev/null +++ b/packages/contracts/generated/api/console/code-based-extension/types.gen.ts @@ -0,0 +1,26 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type CodeBasedExtensionResponse = { + data: unknown + module: string +} + +export type GetCodeBasedExtensionData = { + body?: never + path?: never + query?: { + module?: string + } + url: '/code-based-extension' +} + +export type GetCodeBasedExtensionResponses = { + 200: CodeBasedExtensionResponse +} + +export type GetCodeBasedExtensionResponse + = GetCodeBasedExtensionResponses[keyof GetCodeBasedExtensionResponses] diff --git a/packages/contracts/generated/api/console/code-based-extension/zod.gen.ts b/packages/contracts/generated/api/console/code-based-extension/zod.gen.ts new file mode 100644 index 0000000000..3cd520cb97 --- /dev/null +++ b/packages/contracts/generated/api/console/code-based-extension/zod.gen.ts @@ -0,0 +1,20 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * CodeBasedExtensionResponse + */ +export const zCodeBasedExtensionResponse = z.object({ + data: z.unknown(), + module: z.string(), +}) + +export const zGetCodeBasedExtensionQuery = z.object({ + module: z.string().optional(), +}) + +/** + * Success + */ +export const zGetCodeBasedExtensionResponse = zCodeBasedExtensionResponse diff --git a/packages/contracts/generated/api/console/compliance/orpc.gen.ts b/packages/contracts/generated/api/console/compliance/orpc.gen.ts new file mode 100644 index 0000000000..e68c87e7eb --- /dev/null +++ b/packages/contracts/generated/api/console/compliance/orpc.gen.ts @@ -0,0 +1,33 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { zGetComplianceDownloadQuery, zGetComplianceDownloadResponse } from './zod.gen' + +/** + * Get compliance document download link + */ +export const get = oc + .route({ + description: 'Get compliance document download link', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getComplianceDownload', + path: '/compliance/download', + tags: ['console'], + }) + .input(z.object({ query: zGetComplianceDownloadQuery })) + .output(zGetComplianceDownloadResponse) + +export const download = { + get, +} + +export const compliance = { + download, +} + +export const contract = { + compliance, +} diff --git a/packages/contracts/generated/api/console/compliance/types.gen.ts b/packages/contracts/generated/api/console/compliance/types.gen.ts new file mode 100644 index 0000000000..12ab2a82a8 --- /dev/null +++ b/packages/contracts/generated/api/console/compliance/types.gen.ts @@ -0,0 +1,23 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type GetComplianceDownloadData = { + body?: never + path?: never + query: { + doc_name: string + } + url: '/compliance/download' +} + +export type GetComplianceDownloadResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetComplianceDownloadResponse + = GetComplianceDownloadResponses[keyof GetComplianceDownloadResponses] diff --git a/packages/contracts/generated/api/console/compliance/zod.gen.ts b/packages/contracts/generated/api/console/compliance/zod.gen.ts new file mode 100644 index 0000000000..2d42e75fbc --- /dev/null +++ b/packages/contracts/generated/api/console/compliance/zod.gen.ts @@ -0,0 +1,12 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +export const zGetComplianceDownloadQuery = z.object({ + doc_name: z.string(), +}) + +/** + * Success + */ +export const zGetComplianceDownloadResponse = z.record(z.string(), z.unknown()) diff --git a/packages/contracts/generated/api/console/data-source/orpc.gen.ts b/packages/contracts/generated/api/console/data-source/orpc.gen.ts new file mode 100644 index 0000000000..209447236a --- /dev/null +++ b/packages/contracts/generated/api/console/data-source/orpc.gen.ts @@ -0,0 +1,78 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { + zGetDataSourceIntegratesByBindingIdByActionPath, + zGetDataSourceIntegratesByBindingIdByActionResponse, + zGetDataSourceIntegratesResponse, + zPatchDataSourceIntegratesByBindingIdByActionPath, + zPatchDataSourceIntegratesByBindingIdByActionResponse, + zPatchDataSourceIntegratesResponse, +} from './zod.gen' + +export const get = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDataSourceIntegratesByBindingIdByAction', + path: '/data-source/integrates/{binding_id}/{action}', + tags: ['console'], + }) + .input(z.object({ params: zGetDataSourceIntegratesByBindingIdByActionPath })) + .output(zGetDataSourceIntegratesByBindingIdByActionResponse) + +export const patch = oc + .route({ + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchDataSourceIntegratesByBindingIdByAction', + path: '/data-source/integrates/{binding_id}/{action}', + tags: ['console'], + }) + .input(z.object({ params: zPatchDataSourceIntegratesByBindingIdByActionPath })) + .output(zPatchDataSourceIntegratesByBindingIdByActionResponse) + +export const byAction = { + get, + patch, +} + +export const byBindingId = { + byAction, +} + +export const get2 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDataSourceIntegrates', + path: '/data-source/integrates', + tags: ['console'], + }) + .output(zGetDataSourceIntegratesResponse) + +export const patch2 = oc + .route({ + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchDataSourceIntegrates', + path: '/data-source/integrates', + tags: ['console'], + }) + .output(zPatchDataSourceIntegratesResponse) + +export const integrates = { + get: get2, + patch: patch2, + byBindingId, +} + +export const dataSource = { + integrates, +} + +export const contract = { + dataSource, +} diff --git a/packages/contracts/generated/api/console/data-source/types.gen.ts b/packages/contracts/generated/api/console/data-source/types.gen.ts new file mode 100644 index 0000000000..db83d81ec1 --- /dev/null +++ b/packages/contracts/generated/api/console/data-source/types.gen.ts @@ -0,0 +1,75 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type GetDataSourceIntegratesData = { + body?: never + path?: never + query?: never + url: '/data-source/integrates' +} + +export type GetDataSourceIntegratesResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDataSourceIntegratesResponse + = GetDataSourceIntegratesResponses[keyof GetDataSourceIntegratesResponses] + +export type PatchDataSourceIntegratesData = { + body?: never + path?: never + query?: never + url: '/data-source/integrates' +} + +export type PatchDataSourceIntegratesResponses = { + 200: { + [key: string]: unknown + } +} + +export type PatchDataSourceIntegratesResponse + = PatchDataSourceIntegratesResponses[keyof PatchDataSourceIntegratesResponses] + +export type GetDataSourceIntegratesByBindingIdByActionData = { + body?: never + path: { + binding_id: string + action: string + } + query?: never + url: '/data-source/integrates/{binding_id}/{action}' +} + +export type GetDataSourceIntegratesByBindingIdByActionResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDataSourceIntegratesByBindingIdByActionResponse + = GetDataSourceIntegratesByBindingIdByActionResponses[keyof GetDataSourceIntegratesByBindingIdByActionResponses] + +export type PatchDataSourceIntegratesByBindingIdByActionData = { + body?: never + path: { + binding_id: string + action: string + } + query?: never + url: '/data-source/integrates/{binding_id}/{action}' +} + +export type PatchDataSourceIntegratesByBindingIdByActionResponses = { + 200: { + [key: string]: unknown + } +} + +export type PatchDataSourceIntegratesByBindingIdByActionResponse + = PatchDataSourceIntegratesByBindingIdByActionResponses[keyof PatchDataSourceIntegratesByBindingIdByActionResponses] diff --git a/packages/contracts/generated/api/console/data-source/zod.gen.ts b/packages/contracts/generated/api/console/data-source/zod.gen.ts new file mode 100644 index 0000000000..1684b7e637 --- /dev/null +++ b/packages/contracts/generated/api/console/data-source/zod.gen.ts @@ -0,0 +1,36 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * Success + */ +export const zGetDataSourceIntegratesResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zPatchDataSourceIntegratesResponse = z.record(z.string(), z.unknown()) + +export const zGetDataSourceIntegratesByBindingIdByActionPath = z.object({ + binding_id: z.string(), + action: z.string(), +}) + +/** + * Success + */ +export const zGetDataSourceIntegratesByBindingIdByActionResponse = z.record(z.string(), z.unknown()) + +export const zPatchDataSourceIntegratesByBindingIdByActionPath = z.object({ + binding_id: z.string(), + action: z.string(), +}) + +/** + * Success + */ +export const zPatchDataSourceIntegratesByBindingIdByActionResponse = z.record( + z.string(), + z.unknown(), +) diff --git a/packages/contracts/generated/api/console/datasets/orpc.gen.ts b/packages/contracts/generated/api/console/datasets/orpc.gen.ts new file mode 100644 index 0000000000..37a0b7cb8c --- /dev/null +++ b/packages/contracts/generated/api/console/datasets/orpc.gen.ts @@ -0,0 +1,1786 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { + zDeleteDatasetsApiKeysByApiKeyIdPath, + zDeleteDatasetsApiKeysByApiKeyIdResponse, + zDeleteDatasetsByDatasetIdDocumentsByDocumentIdPath, + zDeleteDatasetsByDatasetIdDocumentsByDocumentIdResponse, + zDeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdPath, + zDeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdResponse, + zDeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdPath, + zDeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponse, + zDeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsPath, + zDeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsResponse, + zDeleteDatasetsByDatasetIdDocumentsPath, + zDeleteDatasetsByDatasetIdDocumentsResponse, + zDeleteDatasetsByDatasetIdMetadataByMetadataIdPath, + zDeleteDatasetsByDatasetIdMetadataByMetadataIdResponse, + zDeleteDatasetsByDatasetIdPath, + zDeleteDatasetsByDatasetIdResponse, + zDeleteDatasetsByResourceIdApiKeysByApiKeyIdPath, + zDeleteDatasetsByResourceIdApiKeysByApiKeyIdResponse, + zDeleteDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdPath, + zDeleteDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdResponse, + zGetDatasetsApiBaseInfoResponse, + zGetDatasetsApiKeysResponse, + zGetDatasetsBatchImportStatusByJobIdPath, + zGetDatasetsBatchImportStatusByJobIdResponse, + zGetDatasetsByDatasetIdAutoDisableLogsPath, + zGetDatasetsByDatasetIdAutoDisableLogsResponse, + zGetDatasetsByDatasetIdBatchByBatchIndexingEstimatePath, + zGetDatasetsByDatasetIdBatchByBatchIndexingEstimateResponse, + zGetDatasetsByDatasetIdBatchByBatchIndexingStatusPath, + zGetDatasetsByDatasetIdBatchByBatchIndexingStatusResponse, + zGetDatasetsByDatasetIdDocumentsByDocumentIdDownloadPath, + zGetDatasetsByDatasetIdDocumentsByDocumentIdDownloadResponse, + zGetDatasetsByDatasetIdDocumentsByDocumentIdIndexingEstimatePath, + zGetDatasetsByDatasetIdDocumentsByDocumentIdIndexingEstimateResponse, + zGetDatasetsByDatasetIdDocumentsByDocumentIdIndexingStatusPath, + zGetDatasetsByDatasetIdDocumentsByDocumentIdIndexingStatusResponse, + zGetDatasetsByDatasetIdDocumentsByDocumentIdNotionSyncPath, + zGetDatasetsByDatasetIdDocumentsByDocumentIdNotionSyncResponse, + zGetDatasetsByDatasetIdDocumentsByDocumentIdPath, + zGetDatasetsByDatasetIdDocumentsByDocumentIdPipelineExecutionLogPath, + zGetDatasetsByDatasetIdDocumentsByDocumentIdPipelineExecutionLogResponse, + zGetDatasetsByDatasetIdDocumentsByDocumentIdQuery, + zGetDatasetsByDatasetIdDocumentsByDocumentIdResponse, + zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBatchImportPath, + zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBatchImportResponse, + zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksPath, + zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponse, + zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsPath, + zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsResponse, + zGetDatasetsByDatasetIdDocumentsByDocumentIdSummaryStatusPath, + zGetDatasetsByDatasetIdDocumentsByDocumentIdSummaryStatusResponse, + zGetDatasetsByDatasetIdDocumentsByDocumentIdWebsiteSyncPath, + zGetDatasetsByDatasetIdDocumentsByDocumentIdWebsiteSyncResponse, + zGetDatasetsByDatasetIdDocumentsPath, + zGetDatasetsByDatasetIdDocumentsQuery, + zGetDatasetsByDatasetIdDocumentsResponse, + zGetDatasetsByDatasetIdErrorDocsPath, + zGetDatasetsByDatasetIdErrorDocsResponse, + zGetDatasetsByDatasetIdIndexingStatusPath, + zGetDatasetsByDatasetIdIndexingStatusResponse, + zGetDatasetsByDatasetIdMetadataPath, + zGetDatasetsByDatasetIdMetadataResponse, + zGetDatasetsByDatasetIdNotionSyncPath, + zGetDatasetsByDatasetIdNotionSyncResponse, + zGetDatasetsByDatasetIdPath, + zGetDatasetsByDatasetIdPermissionPartUsersPath, + zGetDatasetsByDatasetIdPermissionPartUsersResponse, + zGetDatasetsByDatasetIdQueriesPath, + zGetDatasetsByDatasetIdQueriesResponse, + zGetDatasetsByDatasetIdRelatedAppsPath, + zGetDatasetsByDatasetIdRelatedAppsResponse, + zGetDatasetsByDatasetIdResponse, + zGetDatasetsByDatasetIdUseCheckPath, + zGetDatasetsByDatasetIdUseCheckResponse, + zGetDatasetsByResourceIdApiKeysPath, + zGetDatasetsByResourceIdApiKeysResponse, + zGetDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdPath, + zGetDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdResponse, + zGetDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdUseCheckPath, + zGetDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdUseCheckResponse, + zGetDatasetsExternalKnowledgeApiQuery, + zGetDatasetsExternalKnowledgeApiResponse, + zGetDatasetsMetadataBuiltInResponse, + zGetDatasetsNotionIndexingEstimateResponse, + zGetDatasetsProcessRuleQuery, + zGetDatasetsProcessRuleResponse, + zGetDatasetsQuery, + zGetDatasetsResponse, + zGetDatasetsRetrievalSettingByVectorTypePath, + zGetDatasetsRetrievalSettingByVectorTypeResponse, + zGetDatasetsRetrievalSettingResponse, + zPatchDatasetsByDatasetIdBody, + zPatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingByActionPath, + zPatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingByActionResponse, + zPatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingPausePath, + zPatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingPauseResponse, + zPatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingResumePath, + zPatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingResumeResponse, + zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentByActionPath, + zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentByActionResponse, + zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdBody, + zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdBody, + zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdPath, + zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdResponse, + zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksPath, + zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponse, + zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdPath, + zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponse, + zPatchDatasetsByDatasetIdDocumentsStatusByActionBatchPath, + zPatchDatasetsByDatasetIdDocumentsStatusByActionBatchResponse, + zPatchDatasetsByDatasetIdMetadataByMetadataIdBody, + zPatchDatasetsByDatasetIdMetadataByMetadataIdPath, + zPatchDatasetsByDatasetIdMetadataByMetadataIdResponse, + zPatchDatasetsByDatasetIdPath, + zPatchDatasetsByDatasetIdResponse, + zPatchDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdBody, + zPatchDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdPath, + zPatchDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdResponse, + zPostDatasetsApiKeysResponse, + zPostDatasetsBatchImportStatusByJobIdBody, + zPostDatasetsBatchImportStatusByJobIdPath, + zPostDatasetsBatchImportStatusByJobIdResponse, + zPostDatasetsBody, + zPostDatasetsByDatasetIdApiKeysByStatusPath, + zPostDatasetsByDatasetIdApiKeysByStatusResponse, + zPostDatasetsByDatasetIdDocumentsBody, + zPostDatasetsByDatasetIdDocumentsByDocumentIdRenameBody, + zPostDatasetsByDatasetIdDocumentsByDocumentIdRenamePath, + zPostDatasetsByDatasetIdDocumentsByDocumentIdRenameResponse, + zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentBody, + zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentPath, + zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentResponse, + zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBatchImportBody, + zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBatchImportPath, + zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBatchImportResponse, + zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksBody, + zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksPath, + zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponse, + zPostDatasetsByDatasetIdDocumentsDownloadZipBody, + zPostDatasetsByDatasetIdDocumentsDownloadZipPath, + zPostDatasetsByDatasetIdDocumentsDownloadZipResponse, + zPostDatasetsByDatasetIdDocumentsGenerateSummaryBody, + zPostDatasetsByDatasetIdDocumentsGenerateSummaryPath, + zPostDatasetsByDatasetIdDocumentsGenerateSummaryResponse, + zPostDatasetsByDatasetIdDocumentsMetadataBody, + zPostDatasetsByDatasetIdDocumentsMetadataPath, + zPostDatasetsByDatasetIdDocumentsMetadataResponse, + zPostDatasetsByDatasetIdDocumentsPath, + zPostDatasetsByDatasetIdDocumentsResponse, + zPostDatasetsByDatasetIdExternalHitTestingBody, + zPostDatasetsByDatasetIdExternalHitTestingPath, + zPostDatasetsByDatasetIdExternalHitTestingResponse, + zPostDatasetsByDatasetIdHitTestingBody, + zPostDatasetsByDatasetIdHitTestingPath, + zPostDatasetsByDatasetIdHitTestingResponse, + zPostDatasetsByDatasetIdMetadataBody, + zPostDatasetsByDatasetIdMetadataBuiltInByActionPath, + zPostDatasetsByDatasetIdMetadataBuiltInByActionResponse, + zPostDatasetsByDatasetIdMetadataPath, + zPostDatasetsByDatasetIdMetadataResponse, + zPostDatasetsByDatasetIdRetryBody, + zPostDatasetsByDatasetIdRetryPath, + zPostDatasetsByDatasetIdRetryResponse, + zPostDatasetsByResourceIdApiKeysPath, + zPostDatasetsByResourceIdApiKeysResponse, + zPostDatasetsExternalBody, + zPostDatasetsExternalKnowledgeApiBody, + zPostDatasetsExternalKnowledgeApiResponse, + zPostDatasetsExternalResponse, + zPostDatasetsIndexingEstimateBody, + zPostDatasetsIndexingEstimateResponse, + zPostDatasetsInitBody, + zPostDatasetsInitResponse, + zPostDatasetsNotionIndexingEstimateBody, + zPostDatasetsNotionIndexingEstimateResponse, + zPostDatasetsResponse, + zPutDatasetsByDatasetIdDocumentsByDocumentIdMetadataBody, + zPutDatasetsByDatasetIdDocumentsByDocumentIdMetadataPath, + zPutDatasetsByDatasetIdDocumentsByDocumentIdMetadataResponse, +} from './zod.gen' + +/** + * Get dataset API base information + */ +export const get = oc + .route({ + description: 'Get dataset API base information', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsApiBaseInfo', + path: '/datasets/api-base-info', + tags: ['console'], + }) + .output(zGetDatasetsApiBaseInfoResponse) + +export const apiBaseInfo = { + get, +} + +/** + * Delete dataset API key + */ +export const delete_ = oc + .route({ + description: 'Delete dataset API key', + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteDatasetsApiKeysByApiKeyId', + path: '/datasets/api-keys/{api_key_id}', + successStatus: 204, + tags: ['console'], + }) + .input(z.object({ params: zDeleteDatasetsApiKeysByApiKeyIdPath })) + .output(zDeleteDatasetsApiKeysByApiKeyIdResponse) + +export const byApiKeyId = { + delete: delete_, +} + +/** + * Get dataset API keys + */ +export const get2 = oc + .route({ + description: 'Get dataset API keys', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsApiKeys', + path: '/datasets/api-keys', + tags: ['console'], + }) + .output(zGetDatasetsApiKeysResponse) + +export const post = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsApiKeys', + path: '/datasets/api-keys', + tags: ['console'], + }) + .output(zPostDatasetsApiKeysResponse) + +export const apiKeys = { + get: get2, + post, + byApiKeyId, +} + +export const get3 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsBatchImportStatusByJobId', + path: '/datasets/batch_import_status/{job_id}', + tags: ['console'], + }) + .input(z.object({ params: zGetDatasetsBatchImportStatusByJobIdPath })) + .output(zGetDatasetsBatchImportStatusByJobIdResponse) + +export const post2 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsBatchImportStatusByJobId', + path: '/datasets/batch_import_status/{job_id}', + tags: ['console'], + }) + .input( + z.object({ + body: zPostDatasetsBatchImportStatusByJobIdBody, + params: zPostDatasetsBatchImportStatusByJobIdPath, + }), + ) + .output(zPostDatasetsBatchImportStatusByJobIdResponse) + +export const byJobId = { + get: get3, + post: post2, +} + +export const batchImportStatus = { + byJobId, +} + +/** + * Create external knowledge dataset + */ +export const post3 = oc + .route({ + description: 'Create external knowledge dataset', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsExternal', + path: '/datasets/external', + successStatus: 201, + tags: ['console'], + }) + .input(z.object({ body: zPostDatasetsExternalBody })) + .output(zPostDatasetsExternalResponse) + +export const external = { + post: post3, +} + +/** + * Check if external knowledge API is being used + */ +export const get4 = oc + .route({ + description: 'Check if external knowledge API is being used', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdUseCheck', + path: '/datasets/external-knowledge-api/{external_knowledge_api_id}/use-check', + tags: ['console'], + }) + .input(z.object({ params: zGetDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdUseCheckPath })) + .output(zGetDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdUseCheckResponse) + +export const useCheck = { + get: get4, +} + +export const delete2 = oc + .route({ + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteDatasetsExternalKnowledgeApiByExternalKnowledgeApiId', + path: '/datasets/external-knowledge-api/{external_knowledge_api_id}', + tags: ['console'], + }) + .input(z.object({ params: zDeleteDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdPath })) + .output(zDeleteDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdResponse) + +/** + * Get external knowledge API template details + */ +export const get5 = oc + .route({ + description: 'Get external knowledge API template details', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsExternalKnowledgeApiByExternalKnowledgeApiId', + path: '/datasets/external-knowledge-api/{external_knowledge_api_id}', + tags: ['console'], + }) + .input(z.object({ params: zGetDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdPath })) + .output(zGetDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdResponse) + +export const patch = oc + .route({ + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchDatasetsExternalKnowledgeApiByExternalKnowledgeApiId', + path: '/datasets/external-knowledge-api/{external_knowledge_api_id}', + tags: ['console'], + }) + .input( + z.object({ + body: zPatchDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdBody, + params: zPatchDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdPath, + }), + ) + .output(zPatchDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdResponse) + +export const byExternalKnowledgeApiId = { + delete: delete2, + get: get5, + patch, + useCheck, +} + +/** + * Get external knowledge API templates + */ +export const get6 = oc + .route({ + description: 'Get external knowledge API templates', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsExternalKnowledgeApi', + path: '/datasets/external-knowledge-api', + tags: ['console'], + }) + .input(z.object({ query: zGetDatasetsExternalKnowledgeApiQuery.optional() })) + .output(zGetDatasetsExternalKnowledgeApiResponse) + +export const post4 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsExternalKnowledgeApi', + path: '/datasets/external-knowledge-api', + tags: ['console'], + }) + .input(z.object({ body: zPostDatasetsExternalKnowledgeApiBody })) + .output(zPostDatasetsExternalKnowledgeApiResponse) + +export const externalKnowledgeApi = { + get: get6, + post: post4, + byExternalKnowledgeApiId, +} + +/** + * Estimate dataset indexing cost + */ +export const post5 = oc + .route({ + description: 'Estimate dataset indexing cost', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsIndexingEstimate', + path: '/datasets/indexing-estimate', + tags: ['console'], + }) + .input(z.object({ body: zPostDatasetsIndexingEstimateBody })) + .output(zPostDatasetsIndexingEstimateResponse) + +export const indexingEstimate = { + post: post5, +} + +/** + * Initialize dataset with documents + */ +export const post6 = oc + .route({ + description: 'Initialize dataset with documents', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsInit', + path: '/datasets/init', + successStatus: 201, + tags: ['console'], + }) + .input(z.object({ body: zPostDatasetsInitBody })) + .output(zPostDatasetsInitResponse) + +export const init = { + post: post6, +} + +export const get7 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsMetadataBuiltIn', + path: '/datasets/metadata/built-in', + tags: ['console'], + }) + .output(zGetDatasetsMetadataBuiltInResponse) + +export const builtIn = { + get: get7, +} + +export const metadata = { + builtIn, +} + +export const get8 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsNotionIndexingEstimate', + path: '/datasets/notion-indexing-estimate', + tags: ['console'], + }) + .output(zGetDatasetsNotionIndexingEstimateResponse) + +export const post7 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsNotionIndexingEstimate', + path: '/datasets/notion-indexing-estimate', + tags: ['console'], + }) + .input(z.object({ body: zPostDatasetsNotionIndexingEstimateBody })) + .output(zPostDatasetsNotionIndexingEstimateResponse) + +export const notionIndexingEstimate = { + get: get8, + post: post7, +} + +/** + * Get dataset document processing rules + */ +export const get9 = oc + .route({ + description: 'Get dataset document processing rules', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsProcessRule', + path: '/datasets/process-rule', + tags: ['console'], + }) + .input(z.object({ query: zGetDatasetsProcessRuleQuery.optional() })) + .output(zGetDatasetsProcessRuleResponse) + +export const processRule = { + get: get9, +} + +/** + * Get mock dataset retrieval settings by vector type + */ +export const get10 = oc + .route({ + description: 'Get mock dataset retrieval settings by vector type', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsRetrievalSettingByVectorType', + path: '/datasets/retrieval-setting/{vector_type}', + tags: ['console'], + }) + .input(z.object({ params: zGetDatasetsRetrievalSettingByVectorTypePath })) + .output(zGetDatasetsRetrievalSettingByVectorTypeResponse) + +export const byVectorType = { + get: get10, +} + +/** + * Get dataset retrieval settings + */ +export const get11 = oc + .route({ + description: 'Get dataset retrieval settings', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsRetrievalSetting', + path: '/datasets/retrieval-setting', + tags: ['console'], + }) + .output(zGetDatasetsRetrievalSettingResponse) + +export const retrievalSetting = { + get: get11, + byVectorType, +} + +export const post8 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdApiKeysByStatus', + path: '/datasets/{dataset_id}/api-keys/{status}', + tags: ['console'], + }) + .input(z.object({ params: zPostDatasetsByDatasetIdApiKeysByStatusPath })) + .output(zPostDatasetsByDatasetIdApiKeysByStatusResponse) + +export const byStatus = { + post: post8, +} + +export const apiKeys2 = { + byStatus, +} + +/** + * Get dataset auto disable logs + */ +export const get12 = oc + .route({ + description: 'Get dataset auto disable logs', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdAutoDisableLogs', + path: '/datasets/{dataset_id}/auto-disable-logs', + tags: ['console'], + }) + .input(z.object({ params: zGetDatasetsByDatasetIdAutoDisableLogsPath })) + .output(zGetDatasetsByDatasetIdAutoDisableLogsResponse) + +export const autoDisableLogs = { + get: get12, +} + +export const get13 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdBatchByBatchIndexingEstimate', + path: '/datasets/{dataset_id}/batch/{batch}/indexing-estimate', + tags: ['console'], + }) + .input(z.object({ params: zGetDatasetsByDatasetIdBatchByBatchIndexingEstimatePath })) + .output(zGetDatasetsByDatasetIdBatchByBatchIndexingEstimateResponse) + +export const indexingEstimate2 = { + get: get13, +} + +export const get14 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdBatchByBatchIndexingStatus', + path: '/datasets/{dataset_id}/batch/{batch}/indexing-status', + tags: ['console'], + }) + .input(z.object({ params: zGetDatasetsByDatasetIdBatchByBatchIndexingStatusPath })) + .output(zGetDatasetsByDatasetIdBatchByBatchIndexingStatusResponse) + +export const indexingStatus = { + get: get14, +} + +export const byBatch = { + indexingEstimate: indexingEstimate2, + indexingStatus, +} + +export const batch = { + byBatch, +} + +/** + * Stream a ZIP archive containing the requested uploaded documents + * + * Download selected dataset documents as a single ZIP archive (upload-file only) + */ +export const post9 = oc + .route({ + description: 'Download selected dataset documents as a single ZIP archive (upload-file only)', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdDocumentsDownloadZip', + path: '/datasets/{dataset_id}/documents/download-zip', + summary: 'Stream a ZIP archive containing the requested uploaded documents', + tags: ['console'], + }) + .input( + z.object({ + body: zPostDatasetsByDatasetIdDocumentsDownloadZipBody, + params: zPostDatasetsByDatasetIdDocumentsDownloadZipPath, + }), + ) + .output(zPostDatasetsByDatasetIdDocumentsDownloadZipResponse) + +export const downloadZip = { + post: post9, +} + +/** + * Generate summary index for specified documents + * + * Generate summary index for documents + * This endpoint checks if the dataset configuration supports summary generation + * (indexing_technique must be 'high_quality' and summary_index_setting.enable must be true), + * then asynchronously generates summary indexes for the provided documents. + */ +export const post10 = oc + .route({ + description: + 'Generate summary index for documents\nThis endpoint checks if the dataset configuration supports summary generation\n(indexing_technique must be \'high_quality\' and summary_index_setting.enable must be true),\nthen asynchronously generates summary indexes for the provided documents.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdDocumentsGenerateSummary', + path: '/datasets/{dataset_id}/documents/generate-summary', + summary: 'Generate summary index for specified documents', + tags: ['console'], + }) + .input( + z.object({ + body: zPostDatasetsByDatasetIdDocumentsGenerateSummaryBody, + params: zPostDatasetsByDatasetIdDocumentsGenerateSummaryPath, + }), + ) + .output(zPostDatasetsByDatasetIdDocumentsGenerateSummaryResponse) + +export const generateSummary = { + post: post10, +} + +export const post11 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdDocumentsMetadata', + path: '/datasets/{dataset_id}/documents/metadata', + tags: ['console'], + }) + .input( + z.object({ + body: zPostDatasetsByDatasetIdDocumentsMetadataBody, + params: zPostDatasetsByDatasetIdDocumentsMetadataPath, + }), + ) + .output(zPostDatasetsByDatasetIdDocumentsMetadataResponse) + +export const metadata2 = { + post: post11, +} + +export const patch2 = oc + .route({ + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchDatasetsByDatasetIdDocumentsStatusByActionBatch', + path: '/datasets/{dataset_id}/documents/status/{action}/batch', + tags: ['console'], + }) + .input(z.object({ params: zPatchDatasetsByDatasetIdDocumentsStatusByActionBatchPath })) + .output(zPatchDatasetsByDatasetIdDocumentsStatusByActionBatchResponse) + +export const batch2 = { + patch: patch2, +} + +export const byAction = { + batch: batch2, +} + +export const status = { + byAction, +} + +/** + * Get a signed download URL for a dataset document's original uploaded file + */ +export const get15 = oc + .route({ + description: 'Get a signed download URL for a dataset document\'s original uploaded file', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdDocumentsByDocumentIdDownload', + path: '/datasets/{dataset_id}/documents/{document_id}/download', + tags: ['console'], + }) + .input(z.object({ params: zGetDatasetsByDatasetIdDocumentsByDocumentIdDownloadPath })) + .output(zGetDatasetsByDatasetIdDocumentsByDocumentIdDownloadResponse) + +export const download = { + get: get15, +} + +/** + * Estimate document indexing cost + */ +export const get16 = oc + .route({ + description: 'Estimate document indexing cost', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdDocumentsByDocumentIdIndexingEstimate', + path: '/datasets/{dataset_id}/documents/{document_id}/indexing-estimate', + tags: ['console'], + }) + .input(z.object({ params: zGetDatasetsByDatasetIdDocumentsByDocumentIdIndexingEstimatePath })) + .output(zGetDatasetsByDatasetIdDocumentsByDocumentIdIndexingEstimateResponse) + +export const indexingEstimate3 = { + get: get16, +} + +/** + * Get document indexing status + */ +export const get17 = oc + .route({ + description: 'Get document indexing status', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdDocumentsByDocumentIdIndexingStatus', + path: '/datasets/{dataset_id}/documents/{document_id}/indexing-status', + tags: ['console'], + }) + .input(z.object({ params: zGetDatasetsByDatasetIdDocumentsByDocumentIdIndexingStatusPath })) + .output(zGetDatasetsByDatasetIdDocumentsByDocumentIdIndexingStatusResponse) + +export const indexingStatus2 = { + get: get17, +} + +/** + * Update document metadata + */ +export const put = oc + .route({ + description: 'Update document metadata', + inputStructure: 'detailed', + method: 'PUT', + operationId: 'putDatasetsByDatasetIdDocumentsByDocumentIdMetadata', + path: '/datasets/{dataset_id}/documents/{document_id}/metadata', + tags: ['console'], + }) + .input( + z.object({ + body: zPutDatasetsByDatasetIdDocumentsByDocumentIdMetadataBody, + params: zPutDatasetsByDatasetIdDocumentsByDocumentIdMetadataPath, + }), + ) + .output(zPutDatasetsByDatasetIdDocumentsByDocumentIdMetadataResponse) + +export const metadata3 = { + put, +} + +export const get18 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdDocumentsByDocumentIdNotionSync', + path: '/datasets/{dataset_id}/documents/{document_id}/notion/sync', + tags: ['console'], + }) + .input(z.object({ params: zGetDatasetsByDatasetIdDocumentsByDocumentIdNotionSyncPath })) + .output(zGetDatasetsByDatasetIdDocumentsByDocumentIdNotionSyncResponse) + +export const sync = { + get: get18, +} + +export const notion = { + sync, +} + +export const get19 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdDocumentsByDocumentIdPipelineExecutionLog', + path: '/datasets/{dataset_id}/documents/{document_id}/pipeline-execution-log', + tags: ['console'], + }) + .input(z.object({ params: zGetDatasetsByDatasetIdDocumentsByDocumentIdPipelineExecutionLogPath })) + .output(zGetDatasetsByDatasetIdDocumentsByDocumentIdPipelineExecutionLogResponse) + +export const pipelineExecutionLog = { + get: get19, +} + +/** + * pause document + */ +export const patch3 = oc + .route({ + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchDatasetsByDatasetIdDocumentsByDocumentIdProcessingPause', + path: '/datasets/{dataset_id}/documents/{document_id}/processing/pause', + summary: 'pause document', + tags: ['console'], + }) + .input(z.object({ params: zPatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingPausePath })) + .output(zPatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingPauseResponse) + +export const pause = { + patch: patch3, +} + +/** + * recover document + */ +export const patch4 = oc + .route({ + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchDatasetsByDatasetIdDocumentsByDocumentIdProcessingResume', + path: '/datasets/{dataset_id}/documents/{document_id}/processing/resume', + summary: 'recover document', + tags: ['console'], + }) + .input(z.object({ params: zPatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingResumePath })) + .output(zPatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingResumeResponse) + +export const resume = { + patch: patch4, +} + +/** + * Update document processing status (pause/resume) + */ +export const patch5 = oc + .route({ + description: 'Update document processing status (pause/resume)', + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchDatasetsByDatasetIdDocumentsByDocumentIdProcessingByAction', + path: '/datasets/{dataset_id}/documents/{document_id}/processing/{action}', + tags: ['console'], + }) + .input(z.object({ params: zPatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingByActionPath })) + .output(zPatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingByActionResponse) + +export const byAction2 = { + patch: patch5, +} + +export const processing = { + pause, + resume, + byAction: byAction2, +} + +export const post12 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdDocumentsByDocumentIdRename', + path: '/datasets/{dataset_id}/documents/{document_id}/rename', + tags: ['console'], + }) + .input( + z.object({ + body: zPostDatasetsByDatasetIdDocumentsByDocumentIdRenameBody, + params: zPostDatasetsByDatasetIdDocumentsByDocumentIdRenamePath, + }), + ) + .output(zPostDatasetsByDatasetIdDocumentsByDocumentIdRenameResponse) + +export const rename = { + post: post12, +} + +export const patch6 = oc + .route({ + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchDatasetsByDatasetIdDocumentsByDocumentIdSegmentByAction', + path: '/datasets/{dataset_id}/documents/{document_id}/segment/{action}', + tags: ['console'], + }) + .input(z.object({ params: zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentByActionPath })) + .output(zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentByActionResponse) + +export const byAction3 = { + patch: patch6, +} + +export const post13 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdDocumentsByDocumentIdSegment', + path: '/datasets/{dataset_id}/documents/{document_id}/segment', + tags: ['console'], + }) + .input( + z.object({ + body: zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentBody, + params: zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentPath, + }), + ) + .output(zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentResponse) + +export const segment = { + post: post13, + byAction: byAction3, +} + +export const get20 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBatchImport', + path: '/datasets/{dataset_id}/documents/{document_id}/segments/batch_import', + tags: ['console'], + }) + .input(z.object({ params: zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBatchImportPath })) + .output(zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBatchImportResponse) + +export const post14 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBatchImport', + path: '/datasets/{dataset_id}/documents/{document_id}/segments/batch_import', + tags: ['console'], + }) + .input( + z.object({ + body: zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBatchImportBody, + params: zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBatchImportPath, + }), + ) + .output(zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBatchImportResponse) + +export const batchImport = { + get: get20, + post: post14, +} + +export const delete3 = oc + .route({ + inputStructure: 'detailed', + method: 'DELETE', + operationId: + 'deleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkId', + path: '/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks/{child_chunk_id}', + tags: ['console'], + }) + .input( + z.object({ + params: + zDeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdPath, + }), + ) + .output( + zDeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdResponse, + ) + +export const patch7 = oc + .route({ + inputStructure: 'detailed', + method: 'PATCH', + operationId: + 'patchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkId', + path: '/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks/{child_chunk_id}', + tags: ['console'], + }) + .input( + z.object({ + body: zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdBody, + params: + zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdPath, + }), + ) + .output( + zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdResponse, + ) + +export const byChildChunkId = { + delete: delete3, + patch: patch7, +} + +export const get21 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunks', + path: '/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks', + tags: ['console'], + }) + .input( + z.object({ + params: zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksPath, + }), + ) + .output(zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponse) + +export const patch8 = oc + .route({ + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunks', + path: '/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks', + tags: ['console'], + }) + .input( + z.object({ + params: zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksPath, + }), + ) + .output(zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponse) + +export const post15 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunks', + path: '/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks', + tags: ['console'], + }) + .input( + z.object({ + body: zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksBody, + params: zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksPath, + }), + ) + .output(zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponse) + +export const childChunks = { + get: get21, + patch: patch8, + post: post15, + byChildChunkId, +} + +export const delete4 = oc + .route({ + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentId', + path: '/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}', + tags: ['console'], + }) + .input( + z.object({ params: zDeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdPath }), + ) + .output(zDeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponse) + +export const patch9 = oc + .route({ + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentId', + path: '/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}', + tags: ['console'], + }) + .input( + z.object({ + body: zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdBody, + params: zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdPath, + }), + ) + .output(zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponse) + +export const bySegmentId = { + delete: delete4, + patch: patch9, + childChunks, +} + +export const delete5 = oc + .route({ + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteDatasetsByDatasetIdDocumentsByDocumentIdSegments', + path: '/datasets/{dataset_id}/documents/{document_id}/segments', + tags: ['console'], + }) + .input(z.object({ params: zDeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsPath })) + .output(zDeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsResponse) + +export const get22 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdDocumentsByDocumentIdSegments', + path: '/datasets/{dataset_id}/documents/{document_id}/segments', + tags: ['console'], + }) + .input(z.object({ params: zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsPath })) + .output(zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsResponse) + +export const segments = { + delete: delete5, + get: get22, + batchImport, + bySegmentId, +} + +/** + * Get summary index generation status for a document + * + * Get summary index generation status for a document + * Returns: + * - total_segments: Total number of segments in the document + * - summary_status: Dictionary with status counts + * - completed: Number of summaries completed + * - generating: Number of summaries being generated + * - error: Number of summaries with errors + * - not_started: Number of segments without summary records + * - summaries: List of summary records with status and content preview + */ +export const get23 = oc + .route({ + description: + 'Get summary index generation status for a document\nReturns:\n- total_segments: Total number of segments in the document\n- summary_status: Dictionary with status counts\n - completed: Number of summaries completed\n - generating: Number of summaries being generated\n - error: Number of summaries with errors\n - not_started: Number of segments without summary records\n- summaries: List of summary records with status and content preview', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdDocumentsByDocumentIdSummaryStatus', + path: '/datasets/{dataset_id}/documents/{document_id}/summary-status', + summary: 'Get summary index generation status for a document', + tags: ['console'], + }) + .input(z.object({ params: zGetDatasetsByDatasetIdDocumentsByDocumentIdSummaryStatusPath })) + .output(zGetDatasetsByDatasetIdDocumentsByDocumentIdSummaryStatusResponse) + +export const summaryStatus = { + get: get23, +} + +/** + * sync website document + */ +export const get24 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdDocumentsByDocumentIdWebsiteSync', + path: '/datasets/{dataset_id}/documents/{document_id}/website-sync', + summary: 'sync website document', + tags: ['console'], + }) + .input(z.object({ params: zGetDatasetsByDatasetIdDocumentsByDocumentIdWebsiteSyncPath })) + .output(zGetDatasetsByDatasetIdDocumentsByDocumentIdWebsiteSyncResponse) + +export const websiteSync = { + get: get24, +} + +export const delete6 = oc + .route({ + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteDatasetsByDatasetIdDocumentsByDocumentId', + path: '/datasets/{dataset_id}/documents/{document_id}', + tags: ['console'], + }) + .input(z.object({ params: zDeleteDatasetsByDatasetIdDocumentsByDocumentIdPath })) + .output(zDeleteDatasetsByDatasetIdDocumentsByDocumentIdResponse) + +/** + * Get document details + */ +export const get25 = oc + .route({ + description: 'Get document details', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdDocumentsByDocumentId', + path: '/datasets/{dataset_id}/documents/{document_id}', + tags: ['console'], + }) + .input( + z.object({ + params: zGetDatasetsByDatasetIdDocumentsByDocumentIdPath, + query: zGetDatasetsByDatasetIdDocumentsByDocumentIdQuery.optional(), + }), + ) + .output(zGetDatasetsByDatasetIdDocumentsByDocumentIdResponse) + +export const byDocumentId = { + delete: delete6, + get: get25, + download, + indexingEstimate: indexingEstimate3, + indexingStatus: indexingStatus2, + metadata: metadata3, + notion, + pipelineExecutionLog, + processing, + rename, + segment, + segments, + summaryStatus, + websiteSync, +} + +export const delete7 = oc + .route({ + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteDatasetsByDatasetIdDocuments', + path: '/datasets/{dataset_id}/documents', + tags: ['console'], + }) + .input(z.object({ params: zDeleteDatasetsByDatasetIdDocumentsPath })) + .output(zDeleteDatasetsByDatasetIdDocumentsResponse) + +/** + * Get documents in a dataset + */ +export const get26 = oc + .route({ + description: 'Get documents in a dataset', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdDocuments', + path: '/datasets/{dataset_id}/documents', + tags: ['console'], + }) + .input( + z.object({ + params: zGetDatasetsByDatasetIdDocumentsPath, + query: zGetDatasetsByDatasetIdDocumentsQuery.optional(), + }), + ) + .output(zGetDatasetsByDatasetIdDocumentsResponse) + +export const post16 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdDocuments', + path: '/datasets/{dataset_id}/documents', + tags: ['console'], + }) + .input( + z.object({ + body: zPostDatasetsByDatasetIdDocumentsBody, + params: zPostDatasetsByDatasetIdDocumentsPath, + }), + ) + .output(zPostDatasetsByDatasetIdDocumentsResponse) + +export const documents = { + delete: delete7, + get: get26, + post: post16, + downloadZip, + generateSummary, + metadata: metadata2, + status, + byDocumentId, +} + +/** + * Get dataset error documents + */ +export const get27 = oc + .route({ + description: 'Get dataset error documents', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdErrorDocs', + path: '/datasets/{dataset_id}/error-docs', + tags: ['console'], + }) + .input(z.object({ params: zGetDatasetsByDatasetIdErrorDocsPath })) + .output(zGetDatasetsByDatasetIdErrorDocsResponse) + +export const errorDocs = { + get: get27, +} + +/** + * Test external knowledge retrieval for dataset + */ +export const post17 = oc + .route({ + description: 'Test external knowledge retrieval for dataset', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdExternalHitTesting', + path: '/datasets/{dataset_id}/external-hit-testing', + tags: ['console'], + }) + .input( + z.object({ + body: zPostDatasetsByDatasetIdExternalHitTestingBody, + params: zPostDatasetsByDatasetIdExternalHitTestingPath, + }), + ) + .output(zPostDatasetsByDatasetIdExternalHitTestingResponse) + +export const externalHitTesting = { + post: post17, +} + +/** + * Test dataset knowledge retrieval + */ +export const post18 = oc + .route({ + description: 'Test dataset knowledge retrieval', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdHitTesting', + path: '/datasets/{dataset_id}/hit-testing', + tags: ['console'], + }) + .input( + z.object({ + body: zPostDatasetsByDatasetIdHitTestingBody, + params: zPostDatasetsByDatasetIdHitTestingPath, + }), + ) + .output(zPostDatasetsByDatasetIdHitTestingResponse) + +export const hitTesting = { + post: post18, +} + +/** + * Get dataset indexing status + */ +export const get28 = oc + .route({ + description: 'Get dataset indexing status', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdIndexingStatus', + path: '/datasets/{dataset_id}/indexing-status', + tags: ['console'], + }) + .input(z.object({ params: zGetDatasetsByDatasetIdIndexingStatusPath })) + .output(zGetDatasetsByDatasetIdIndexingStatusResponse) + +export const indexingStatus3 = { + get: get28, +} + +export const post19 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdMetadataBuiltInByAction', + path: '/datasets/{dataset_id}/metadata/built-in/{action}', + tags: ['console'], + }) + .input(z.object({ params: zPostDatasetsByDatasetIdMetadataBuiltInByActionPath })) + .output(zPostDatasetsByDatasetIdMetadataBuiltInByActionResponse) + +export const byAction4 = { + post: post19, +} + +export const builtIn2 = { + byAction: byAction4, +} + +export const delete8 = oc + .route({ + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteDatasetsByDatasetIdMetadataByMetadataId', + path: '/datasets/{dataset_id}/metadata/{metadata_id}', + tags: ['console'], + }) + .input(z.object({ params: zDeleteDatasetsByDatasetIdMetadataByMetadataIdPath })) + .output(zDeleteDatasetsByDatasetIdMetadataByMetadataIdResponse) + +export const patch10 = oc + .route({ + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchDatasetsByDatasetIdMetadataByMetadataId', + path: '/datasets/{dataset_id}/metadata/{metadata_id}', + tags: ['console'], + }) + .input( + z.object({ + body: zPatchDatasetsByDatasetIdMetadataByMetadataIdBody, + params: zPatchDatasetsByDatasetIdMetadataByMetadataIdPath, + }), + ) + .output(zPatchDatasetsByDatasetIdMetadataByMetadataIdResponse) + +export const byMetadataId = { + delete: delete8, + patch: patch10, +} + +export const get29 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdMetadata', + path: '/datasets/{dataset_id}/metadata', + tags: ['console'], + }) + .input(z.object({ params: zGetDatasetsByDatasetIdMetadataPath })) + .output(zGetDatasetsByDatasetIdMetadataResponse) + +export const post20 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdMetadata', + path: '/datasets/{dataset_id}/metadata', + tags: ['console'], + }) + .input( + z.object({ + body: zPostDatasetsByDatasetIdMetadataBody, + params: zPostDatasetsByDatasetIdMetadataPath, + }), + ) + .output(zPostDatasetsByDatasetIdMetadataResponse) + +export const metadata4 = { + get: get29, + post: post20, + builtIn: builtIn2, + byMetadataId, +} + +export const get30 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdNotionSync', + path: '/datasets/{dataset_id}/notion/sync', + tags: ['console'], + }) + .input(z.object({ params: zGetDatasetsByDatasetIdNotionSyncPath })) + .output(zGetDatasetsByDatasetIdNotionSyncResponse) + +export const sync2 = { + get: get30, +} + +export const notion2 = { + sync: sync2, +} + +/** + * Get dataset permission user list + */ +export const get31 = oc + .route({ + description: 'Get dataset permission user list', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdPermissionPartUsers', + path: '/datasets/{dataset_id}/permission-part-users', + tags: ['console'], + }) + .input(z.object({ params: zGetDatasetsByDatasetIdPermissionPartUsersPath })) + .output(zGetDatasetsByDatasetIdPermissionPartUsersResponse) + +export const permissionPartUsers = { + get: get31, +} + +/** + * Get dataset query history + */ +export const get32 = oc + .route({ + description: 'Get dataset query history', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdQueries', + path: '/datasets/{dataset_id}/queries', + tags: ['console'], + }) + .input(z.object({ params: zGetDatasetsByDatasetIdQueriesPath })) + .output(zGetDatasetsByDatasetIdQueriesResponse) + +export const queries = { + get: get32, +} + +/** + * Get applications related to dataset + */ +export const get33 = oc + .route({ + description: 'Get applications related to dataset', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdRelatedApps', + path: '/datasets/{dataset_id}/related-apps', + tags: ['console'], + }) + .input(z.object({ params: zGetDatasetsByDatasetIdRelatedAppsPath })) + .output(zGetDatasetsByDatasetIdRelatedAppsResponse) + +export const relatedApps = { + get: get33, +} + +/** + * retry document + */ +export const post21 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdRetry', + path: '/datasets/{dataset_id}/retry', + summary: 'retry document', + tags: ['console'], + }) + .input( + z.object({ + body: zPostDatasetsByDatasetIdRetryBody, + params: zPostDatasetsByDatasetIdRetryPath, + }), + ) + .output(zPostDatasetsByDatasetIdRetryResponse) + +export const retry = { + post: post21, +} + +/** + * Check if dataset is in use + */ +export const get34 = oc + .route({ + description: 'Check if dataset is in use', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdUseCheck', + path: '/datasets/{dataset_id}/use-check', + tags: ['console'], + }) + .input(z.object({ params: zGetDatasetsByDatasetIdUseCheckPath })) + .output(zGetDatasetsByDatasetIdUseCheckResponse) + +export const useCheck2 = { + get: get34, +} + +export const delete9 = oc + .route({ + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteDatasetsByDatasetId', + path: '/datasets/{dataset_id}', + tags: ['console'], + }) + .input(z.object({ params: zDeleteDatasetsByDatasetIdPath })) + .output(zDeleteDatasetsByDatasetIdResponse) + +/** + * Get dataset details + */ +export const get35 = oc + .route({ + description: 'Get dataset details', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetId', + path: '/datasets/{dataset_id}', + tags: ['console'], + }) + .input(z.object({ params: zGetDatasetsByDatasetIdPath })) + .output(zGetDatasetsByDatasetIdResponse) + +/** + * Update dataset details + */ +export const patch11 = oc + .route({ + description: 'Update dataset details', + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchDatasetsByDatasetId', + path: '/datasets/{dataset_id}', + tags: ['console'], + }) + .input(z.object({ body: zPatchDatasetsByDatasetIdBody, params: zPatchDatasetsByDatasetIdPath })) + .output(zPatchDatasetsByDatasetIdResponse) + +export const byDatasetId = { + delete: delete9, + get: get35, + patch: patch11, + apiKeys: apiKeys2, + autoDisableLogs, + batch, + documents, + errorDocs, + externalHitTesting, + hitTesting, + indexingStatus: indexingStatus3, + metadata: metadata4, + notion: notion2, + permissionPartUsers, + queries, + relatedApps, + retry, + useCheck: useCheck2, +} + +/** + * Delete an API key for a dataset + * + * Delete an API key for a dataset + */ +export const delete10 = oc + .route({ + description: 'Delete an API key for a dataset', + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteDatasetsByResourceIdApiKeysByApiKeyId', + path: '/datasets/{resource_id}/api-keys/{api_key_id}', + successStatus: 204, + summary: 'Delete an API key for a dataset', + tags: ['console'], + }) + .input(z.object({ params: zDeleteDatasetsByResourceIdApiKeysByApiKeyIdPath })) + .output(zDeleteDatasetsByResourceIdApiKeysByApiKeyIdResponse) + +export const byApiKeyId2 = { + delete: delete10, +} + +/** + * Get all API keys for a dataset + * + * Get all API keys for a dataset + */ +export const get36 = oc + .route({ + description: 'Get all API keys for a dataset', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByResourceIdApiKeys', + path: '/datasets/{resource_id}/api-keys', + summary: 'Get all API keys for a dataset', + tags: ['console'], + }) + .input(z.object({ params: zGetDatasetsByResourceIdApiKeysPath })) + .output(zGetDatasetsByResourceIdApiKeysResponse) + +/** + * Create a new API key for a dataset + * + * Create a new API key for a dataset + */ +export const post22 = oc + .route({ + description: 'Create a new API key for a dataset', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByResourceIdApiKeys', + path: '/datasets/{resource_id}/api-keys', + successStatus: 201, + summary: 'Create a new API key for a dataset', + tags: ['console'], + }) + .input(z.object({ params: zPostDatasetsByResourceIdApiKeysPath })) + .output(zPostDatasetsByResourceIdApiKeysResponse) + +export const apiKeys3 = { + get: get36, + post: post22, + byApiKeyId: byApiKeyId2, +} + +export const byResourceId = { + apiKeys: apiKeys3, +} + +/** + * Get list of datasets + */ +export const get37 = oc + .route({ + description: 'Get list of datasets', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasets', + path: '/datasets', + tags: ['console'], + }) + .input(z.object({ query: zGetDatasetsQuery.optional() })) + .output(zGetDatasetsResponse) + +/** + * Create a new dataset + */ +export const post23 = oc + .route({ + description: 'Create a new dataset', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasets', + path: '/datasets', + successStatus: 201, + tags: ['console'], + }) + .input(z.object({ body: zPostDatasetsBody })) + .output(zPostDatasetsResponse) + +export const datasets = { + get: get37, + post: post23, + apiBaseInfo, + apiKeys, + batchImportStatus, + external, + externalKnowledgeApi, + indexingEstimate, + init, + metadata, + notionIndexingEstimate, + processRule, + retrievalSetting, + byDatasetId, + byResourceId, +} + +export const contract = { + datasets, +} diff --git a/packages/contracts/generated/api/console/datasets/types.gen.ts b/packages/contracts/generated/api/console/datasets/types.gen.ts new file mode 100644 index 0000000000..61d380d686 --- /dev/null +++ b/packages/contracts/generated/api/console/datasets/types.gen.ts @@ -0,0 +1,2165 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type DatasetCreatePayload = { + description?: string + external_knowledge_api_id?: string | null + external_knowledge_id?: string | null + indexing_technique?: string | null + name: string + permission?: DatasetPermissionEnum + provider?: string +} + +export type ApiKeyList = { + data: Array +} + +export type ApiKeyItem = { + created_at?: number | null + id: string + last_used_at?: number | null + token: string + type: string +} + +export type BatchImportPayload = { + upload_file_id: string +} + +export type ExternalDatasetCreatePayload = { + description?: string | null + external_knowledge_api_id: string + external_knowledge_id: string + external_retrieval_model?: { + [key: string]: unknown + } | null + name: string +} + +export type DatasetDetail = { + [key: string]: unknown +} + +export type ExternalKnowledgeApiPayload = { + name: string + settings: { + [key: string]: unknown + } +} + +export type IndexingEstimatePayload = { + dataset_id?: string | null + doc_form?: string + doc_language?: string + indexing_technique: string + info_list: { + [key: string]: unknown + } + process_rule: { + [key: string]: unknown + } +} + +export type KnowledgeConfig = { + data_source?: DataSource + doc_form?: string + doc_language?: string + duplicate?: boolean + embedding_model?: string | null + embedding_model_provider?: string | null + indexing_technique: 'high_quality' | 'economy' + is_multimodal?: boolean + name?: string | null + original_document_id?: string | null + process_rule?: ProcessRule + retrieval_model?: RetrievalModel + summary_index_setting?: { + [key: string]: unknown + } | null +} + +export type DatasetAndDocumentResponse = { + batch: string + dataset: DatasetResponse + documents: Array +} + +export type NotionEstimatePayload = { + doc_form?: string + doc_language?: string + notion_info_list: Array<{ + [key: string]: unknown + }> + process_rule: { + [key: string]: unknown + } +} + +export type DatasetUpdatePayload = { + description?: string | null + embedding_model?: string | null + embedding_model_provider?: string | null + external_knowledge_api_id?: string | null + external_knowledge_id?: string | null + external_retrieval_model?: { + [key: string]: unknown + } | null + icon_info?: { + [key: string]: unknown + } | null + indexing_technique?: string | null + is_multimodal?: boolean | null + name?: string | null + partial_member_list?: Array<{ + [key: string]: string + }> | null + permission?: DatasetPermissionEnum + retrieval_model?: { + [key: string]: unknown + } | null + summary_index_setting?: { + [key: string]: unknown + } | null +} + +export type DocumentBatchDownloadZipPayload = { + document_ids: Array +} + +export type GenerateSummaryPayload = { + document_list: Array +} + +export type MetadataOperationData = { + operation_data: Array +} + +export type DocumentMetadataUpdatePayload = { + doc_metadata?: unknown + doc_type?: string | null +} + +export type DocumentRenamePayload = { + name: string +} + +export type DocumentResponse = { + archived?: boolean | null + created_at?: number | null + created_by?: string | null + created_from?: string | null + data_source_detail_dict?: unknown + data_source_info_dict?: unknown + data_source_type?: string | null + dataset_process_rule_id?: string | null + disabled_at?: number | null + disabled_by?: string | null + display_status?: string | null + doc_form?: string | null + doc_metadata_details?: Array + enabled?: boolean | null + error?: string | null + hit_count?: number | null + id: string + indexing_status?: string | null + name: string + need_summary?: boolean | null + position?: number | null + summary_index_status?: string | null + tokens?: number | null + word_count?: number | null +} + +export type SegmentCreatePayload = { + answer?: string | null + attachment_ids?: Array | null + content: string + keywords?: Array | null +} + +export type SegmentUpdatePayload = { + answer?: string | null + attachment_ids?: Array | null + content: string + keywords?: Array | null + regenerate_child_chunks?: boolean + summary?: string | null +} + +export type ChildChunkCreatePayload = { + content: string +} + +export type ChildChunkUpdatePayload = { + content: string +} + +export type ExternalHitTestingPayload = { + external_retrieval_model?: { + [key: string]: unknown + } | null + metadata_filtering_conditions?: { + [key: string]: unknown + } | null + query: string +} + +export type HitTestingPayload = { + attachment_ids?: Array | null + external_retrieval_model?: { + [key: string]: unknown + } | null + query: string + retrieval_model?: RetrievalModel +} + +export type HitTestingResponse = { + query: string + records?: Array +} + +export type MetadataArgs = { + name: string + type: 'string' | 'number' | 'time' +} + +export type MetadataUpdatePayload = { + name: string +} + +export type DatasetQueryDetail = { + [key: string]: unknown +} + +export type RelatedAppList = { + [key: string]: unknown +} + +export type DocumentRetryPayload = { + document_ids: Array +} + +export type DatasetPermissionEnum = 'only_me' | 'all_team_members' | 'partial_members' + +export type DataSource = { + info_list: InfoList +} + +export type ProcessRule = { + mode: 'automatic' | 'custom' | 'hierarchical' + rules?: Rule +} + +export type RetrievalModel = { + metadata_filtering_conditions?: MetadataFilteringCondition + reranking_enable: boolean + reranking_mode?: string | null + reranking_model?: RerankingModel + score_threshold?: number | null + score_threshold_enabled: boolean + search_method: RetrievalMethod + top_k: number + weights?: WeightModel +} + +export type DatasetResponse = { + created_at?: number | null + created_by?: string | null + data_source_type?: string | null + description?: string | null + id: string + indexing_technique?: string | null + name: string + permission?: string | null +} + +export type DocumentMetadataOperation = { + document_id: string + metadata_list: Array + partial_update?: boolean +} + +export type DocumentMetadataResponse = { + id: string + name: string + type: string + value?: string | null +} + +export type HitTestingRecord = { + child_chunks?: Array + files?: Array + score?: number | null + segment?: HitTestingSegment + summary?: string | null + tsne_position?: unknown +} + +export type InfoList = { + data_source_type: 'upload_file' | 'notion_import' | 'website_crawl' + file_info_list?: FileInfo + notion_info_list?: Array | null + website_info_list?: WebsiteInfo +} + +export type Rule = { + parent_mode?: 'full-doc' | 'paragraph' | null + pre_processing_rules?: Array | null + segmentation?: Segmentation + subchunk_segmentation?: Segmentation +} + +export type MetadataFilteringCondition = { + conditions?: Array | null + logical_operator?: 'and' | 'or' | null +} + +export type RerankingModel = { + reranking_model_name?: string | null + reranking_provider_name?: string | null +} + +export type RetrievalMethod + = | 'semantic_search' + | 'full_text_search' + | 'hybrid_search' + | 'keyword_search' + +export type WeightModel = { + keyword_setting?: WeightKeywordSetting + vector_setting?: WeightVectorSetting + weight_type?: 'semantic_first' | 'keyword_first' | 'customized' | null +} + +export type MetadataDetail = { + id: string + name: string + value?: unknown +} + +export type HitTestingChildChunk = { + content?: string | null + id?: string | null + position?: number | null + score?: number | null +} + +export type HitTestingFile = { + extension?: string | null + id?: string | null + mime_type?: string | null + name?: string | null + size?: number | null + source_url?: string | null +} + +export type HitTestingSegment = { + answer?: string | null + completed_at?: number | null + content?: string | null + created_at?: number | null + created_by?: string | null + disabled_at?: number | null + disabled_by?: string | null + document?: HitTestingDocument + document_id?: string | null + enabled?: boolean | null + error?: string | null + hit_count?: number | null + id?: string | null + index_node_hash?: string | null + index_node_id?: string | null + indexing_at?: number | null + keywords?: Array + position?: number | null + sign_content?: string | null + status?: string | null + stopped_at?: number | null + tokens?: number | null + word_count?: number | null +} + +export type FileInfo = { + file_ids: Array +} + +export type NotionInfo = { + credential_id: string + pages: Array + workspace_id: string +} + +export type WebsiteInfo = { + job_id: string + only_main_content?: boolean + provider: string + urls: Array +} + +export type PreProcessingRule = { + enabled: boolean + id: string +} + +export type Segmentation = { + chunk_overlap?: number + max_tokens: number + separator?: string +} + +export type Condition = { + comparison_operator: + | 'contains' + | 'not contains' + | 'start with' + | 'end with' + | 'is' + | 'is not' + | 'empty' + | 'not empty' + | 'in' + | 'not in' + | '=' + | '≠' + | '>' + | '<' + | '≥' + | '≤' + | 'before' + | 'after' + name: string + value?: unknown +} + +export type WeightKeywordSetting = { + keyword_weight: number +} + +export type WeightVectorSetting = { + embedding_model_name: string + embedding_provider_name: string + vector_weight: number +} + +export type HitTestingDocument = { + data_source_type?: string | null + doc_metadata?: unknown + doc_type?: string | null + id?: string | null + name?: string | null +} + +export type NotionPage = { + page_icon?: NotionIcon + page_id: string + page_name: string + type: string +} + +export type NotionIcon = { + emoji?: string | null + type: string + url?: string | null +} + +export type GetDatasetsData = { + body?: never + path?: never + query?: { + page?: string + limit?: string + ids?: string + keyword?: string + tag_ids?: string + include_all?: string + } + url: '/datasets' +} + +export type GetDatasetsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsResponse = GetDatasetsResponses[keyof GetDatasetsResponses] + +export type PostDatasetsData = { + body: DatasetCreatePayload + path?: never + query?: never + url: '/datasets' +} + +export type PostDatasetsErrors = { + 400: { + [key: string]: unknown + } +} + +export type PostDatasetsError = PostDatasetsErrors[keyof PostDatasetsErrors] + +export type PostDatasetsResponses = { + 201: { + [key: string]: unknown + } +} + +export type PostDatasetsResponse = PostDatasetsResponses[keyof PostDatasetsResponses] + +export type GetDatasetsApiBaseInfoData = { + body?: never + path?: never + query?: never + url: '/datasets/api-base-info' +} + +export type GetDatasetsApiBaseInfoResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsApiBaseInfoResponse + = GetDatasetsApiBaseInfoResponses[keyof GetDatasetsApiBaseInfoResponses] + +export type GetDatasetsApiKeysData = { + body?: never + path?: never + query?: never + url: '/datasets/api-keys' +} + +export type GetDatasetsApiKeysResponses = { + 200: ApiKeyList +} + +export type GetDatasetsApiKeysResponse + = GetDatasetsApiKeysResponses[keyof GetDatasetsApiKeysResponses] + +export type PostDatasetsApiKeysData = { + body?: never + path?: never + query?: never + url: '/datasets/api-keys' +} + +export type PostDatasetsApiKeysErrors = { + 400: { + [key: string]: unknown + } +} + +export type PostDatasetsApiKeysError = PostDatasetsApiKeysErrors[keyof PostDatasetsApiKeysErrors] + +export type PostDatasetsApiKeysResponses = { + 200: ApiKeyItem +} + +export type PostDatasetsApiKeysResponse + = PostDatasetsApiKeysResponses[keyof PostDatasetsApiKeysResponses] + +export type DeleteDatasetsApiKeysByApiKeyIdData = { + body?: never + path: { + api_key_id: string + } + query?: never + url: '/datasets/api-keys/{api_key_id}' +} + +export type DeleteDatasetsApiKeysByApiKeyIdResponses = { + 204: { + [key: string]: unknown + } +} + +export type DeleteDatasetsApiKeysByApiKeyIdResponse + = DeleteDatasetsApiKeysByApiKeyIdResponses[keyof DeleteDatasetsApiKeysByApiKeyIdResponses] + +export type GetDatasetsBatchImportStatusByJobIdData = { + body?: never + path: { + job_id: string + } + query?: never + url: '/datasets/batch_import_status/{job_id}' +} + +export type GetDatasetsBatchImportStatusByJobIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsBatchImportStatusByJobIdResponse + = GetDatasetsBatchImportStatusByJobIdResponses[keyof GetDatasetsBatchImportStatusByJobIdResponses] + +export type PostDatasetsBatchImportStatusByJobIdData = { + body: BatchImportPayload + path: { + job_id: string + } + query?: never + url: '/datasets/batch_import_status/{job_id}' +} + +export type PostDatasetsBatchImportStatusByJobIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsBatchImportStatusByJobIdResponse + = PostDatasetsBatchImportStatusByJobIdResponses[keyof PostDatasetsBatchImportStatusByJobIdResponses] + +export type PostDatasetsExternalData = { + body: ExternalDatasetCreatePayload + path?: never + query?: never + url: '/datasets/external' +} + +export type PostDatasetsExternalErrors = { + 400: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } +} + +export type PostDatasetsExternalError = PostDatasetsExternalErrors[keyof PostDatasetsExternalErrors] + +export type PostDatasetsExternalResponses = { + 201: DatasetDetail +} + +export type PostDatasetsExternalResponse + = PostDatasetsExternalResponses[keyof PostDatasetsExternalResponses] + +export type GetDatasetsExternalKnowledgeApiData = { + body?: never + path?: never + query?: { + page?: string + limit?: string + keyword?: string + } + url: '/datasets/external-knowledge-api' +} + +export type GetDatasetsExternalKnowledgeApiResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsExternalKnowledgeApiResponse + = GetDatasetsExternalKnowledgeApiResponses[keyof GetDatasetsExternalKnowledgeApiResponses] + +export type PostDatasetsExternalKnowledgeApiData = { + body: ExternalKnowledgeApiPayload + path?: never + query?: never + url: '/datasets/external-knowledge-api' +} + +export type PostDatasetsExternalKnowledgeApiResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsExternalKnowledgeApiResponse + = PostDatasetsExternalKnowledgeApiResponses[keyof PostDatasetsExternalKnowledgeApiResponses] + +export type DeleteDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdData = { + body?: never + path: { + external_knowledge_api_id: string + } + query?: never + url: '/datasets/external-knowledge-api/{external_knowledge_api_id}' +} + +export type DeleteDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type DeleteDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdResponse + = DeleteDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdResponses[keyof DeleteDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdResponses] + +export type GetDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdData = { + body?: never + path: { + external_knowledge_api_id: string + } + query?: never + url: '/datasets/external-knowledge-api/{external_knowledge_api_id}' +} + +export type GetDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdErrors = { + 404: { + [key: string]: unknown + } +} + +export type GetDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdError + = GetDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdErrors[keyof GetDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdErrors] + +export type GetDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdResponse + = GetDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdResponses[keyof GetDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdResponses] + +export type PatchDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdData = { + body: ExternalKnowledgeApiPayload + path: { + external_knowledge_api_id: string + } + query?: never + url: '/datasets/external-knowledge-api/{external_knowledge_api_id}' +} + +export type PatchDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type PatchDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdResponse + = PatchDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdResponses[keyof PatchDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdResponses] + +export type GetDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdUseCheckData = { + body?: never + path: { + external_knowledge_api_id: string + } + query?: never + url: '/datasets/external-knowledge-api/{external_knowledge_api_id}/use-check' +} + +export type GetDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdUseCheckResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdUseCheckResponse + = GetDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdUseCheckResponses[keyof GetDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdUseCheckResponses] + +export type PostDatasetsIndexingEstimateData = { + body: IndexingEstimatePayload + path?: never + query?: never + url: '/datasets/indexing-estimate' +} + +export type PostDatasetsIndexingEstimateResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsIndexingEstimateResponse + = PostDatasetsIndexingEstimateResponses[keyof PostDatasetsIndexingEstimateResponses] + +export type PostDatasetsInitData = { + body: KnowledgeConfig + path?: never + query?: never + url: '/datasets/init' +} + +export type PostDatasetsInitErrors = { + 400: { + [key: string]: unknown + } +} + +export type PostDatasetsInitError = PostDatasetsInitErrors[keyof PostDatasetsInitErrors] + +export type PostDatasetsInitResponses = { + 201: DatasetAndDocumentResponse +} + +export type PostDatasetsInitResponse = PostDatasetsInitResponses[keyof PostDatasetsInitResponses] + +export type GetDatasetsMetadataBuiltInData = { + body?: never + path?: never + query?: never + url: '/datasets/metadata/built-in' +} + +export type GetDatasetsMetadataBuiltInResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsMetadataBuiltInResponse + = GetDatasetsMetadataBuiltInResponses[keyof GetDatasetsMetadataBuiltInResponses] + +export type GetDatasetsNotionIndexingEstimateData = { + body?: never + path?: never + query?: never + url: '/datasets/notion-indexing-estimate' +} + +export type GetDatasetsNotionIndexingEstimateResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsNotionIndexingEstimateResponse + = GetDatasetsNotionIndexingEstimateResponses[keyof GetDatasetsNotionIndexingEstimateResponses] + +export type PostDatasetsNotionIndexingEstimateData = { + body: NotionEstimatePayload + path?: never + query?: never + url: '/datasets/notion-indexing-estimate' +} + +export type PostDatasetsNotionIndexingEstimateResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsNotionIndexingEstimateResponse + = PostDatasetsNotionIndexingEstimateResponses[keyof PostDatasetsNotionIndexingEstimateResponses] + +export type GetDatasetsProcessRuleData = { + body?: never + path?: never + query?: { + document_id?: string + } + url: '/datasets/process-rule' +} + +export type GetDatasetsProcessRuleResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsProcessRuleResponse + = GetDatasetsProcessRuleResponses[keyof GetDatasetsProcessRuleResponses] + +export type GetDatasetsRetrievalSettingData = { + body?: never + path?: never + query?: never + url: '/datasets/retrieval-setting' +} + +export type GetDatasetsRetrievalSettingResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsRetrievalSettingResponse + = GetDatasetsRetrievalSettingResponses[keyof GetDatasetsRetrievalSettingResponses] + +export type GetDatasetsRetrievalSettingByVectorTypeData = { + body?: never + path: { + vector_type: string + } + query?: never + url: '/datasets/retrieval-setting/{vector_type}' +} + +export type GetDatasetsRetrievalSettingByVectorTypeResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsRetrievalSettingByVectorTypeResponse + = GetDatasetsRetrievalSettingByVectorTypeResponses[keyof GetDatasetsRetrievalSettingByVectorTypeResponses] + +export type DeleteDatasetsByDatasetIdData = { + body?: never + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}' +} + +export type DeleteDatasetsByDatasetIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type DeleteDatasetsByDatasetIdResponse + = DeleteDatasetsByDatasetIdResponses[keyof DeleteDatasetsByDatasetIdResponses] + +export type GetDatasetsByDatasetIdData = { + body?: never + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}' +} + +export type GetDatasetsByDatasetIdErrors = { + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdError + = GetDatasetsByDatasetIdErrors[keyof GetDatasetsByDatasetIdErrors] + +export type GetDatasetsByDatasetIdResponses = { + 200: DatasetDetail +} + +export type GetDatasetsByDatasetIdResponse + = GetDatasetsByDatasetIdResponses[keyof GetDatasetsByDatasetIdResponses] + +export type PatchDatasetsByDatasetIdData = { + body: DatasetUpdatePayload + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}' +} + +export type PatchDatasetsByDatasetIdErrors = { + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PatchDatasetsByDatasetIdError + = PatchDatasetsByDatasetIdErrors[keyof PatchDatasetsByDatasetIdErrors] + +export type PatchDatasetsByDatasetIdResponses = { + 200: DatasetDetail +} + +export type PatchDatasetsByDatasetIdResponse + = PatchDatasetsByDatasetIdResponses[keyof PatchDatasetsByDatasetIdResponses] + +export type PostDatasetsByDatasetIdApiKeysByStatusData = { + body?: never + path: { + dataset_id: string + status: string + } + query?: never + url: '/datasets/{dataset_id}/api-keys/{status}' +} + +export type PostDatasetsByDatasetIdApiKeysByStatusResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdApiKeysByStatusResponse + = PostDatasetsByDatasetIdApiKeysByStatusResponses[keyof PostDatasetsByDatasetIdApiKeysByStatusResponses] + +export type GetDatasetsByDatasetIdAutoDisableLogsData = { + body?: never + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/auto-disable-logs' +} + +export type GetDatasetsByDatasetIdAutoDisableLogsErrors = { + 404: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdAutoDisableLogsError + = GetDatasetsByDatasetIdAutoDisableLogsErrors[keyof GetDatasetsByDatasetIdAutoDisableLogsErrors] + +export type GetDatasetsByDatasetIdAutoDisableLogsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdAutoDisableLogsResponse + = GetDatasetsByDatasetIdAutoDisableLogsResponses[keyof GetDatasetsByDatasetIdAutoDisableLogsResponses] + +export type GetDatasetsByDatasetIdBatchByBatchIndexingEstimateData = { + body?: never + path: { + dataset_id: string + batch: string + } + query?: never + url: '/datasets/{dataset_id}/batch/{batch}/indexing-estimate' +} + +export type GetDatasetsByDatasetIdBatchByBatchIndexingEstimateResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdBatchByBatchIndexingEstimateResponse + = GetDatasetsByDatasetIdBatchByBatchIndexingEstimateResponses[keyof GetDatasetsByDatasetIdBatchByBatchIndexingEstimateResponses] + +export type GetDatasetsByDatasetIdBatchByBatchIndexingStatusData = { + body?: never + path: { + dataset_id: string + batch: string + } + query?: never + url: '/datasets/{dataset_id}/batch/{batch}/indexing-status' +} + +export type GetDatasetsByDatasetIdBatchByBatchIndexingStatusResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdBatchByBatchIndexingStatusResponse + = GetDatasetsByDatasetIdBatchByBatchIndexingStatusResponses[keyof GetDatasetsByDatasetIdBatchByBatchIndexingStatusResponses] + +export type DeleteDatasetsByDatasetIdDocumentsData = { + body?: never + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents' +} + +export type DeleteDatasetsByDatasetIdDocumentsResponses = { + 200: { + [key: string]: unknown + } +} + +export type DeleteDatasetsByDatasetIdDocumentsResponse + = DeleteDatasetsByDatasetIdDocumentsResponses[keyof DeleteDatasetsByDatasetIdDocumentsResponses] + +export type GetDatasetsByDatasetIdDocumentsData = { + body?: never + path: { + dataset_id: string + } + query?: { + page?: string + limit?: string + keyword?: string + sort?: string + fetch?: string + status?: string + } + url: '/datasets/{dataset_id}/documents' +} + +export type GetDatasetsByDatasetIdDocumentsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdDocumentsResponse + = GetDatasetsByDatasetIdDocumentsResponses[keyof GetDatasetsByDatasetIdDocumentsResponses] + +export type PostDatasetsByDatasetIdDocumentsData = { + body: KnowledgeConfig + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents' +} + +export type PostDatasetsByDatasetIdDocumentsResponses = { + 200: DatasetAndDocumentResponse +} + +export type PostDatasetsByDatasetIdDocumentsResponse + = PostDatasetsByDatasetIdDocumentsResponses[keyof PostDatasetsByDatasetIdDocumentsResponses] + +export type PostDatasetsByDatasetIdDocumentsDownloadZipData = { + body: DocumentBatchDownloadZipPayload + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/download-zip' +} + +export type PostDatasetsByDatasetIdDocumentsDownloadZipResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentsDownloadZipResponse + = PostDatasetsByDatasetIdDocumentsDownloadZipResponses[keyof PostDatasetsByDatasetIdDocumentsDownloadZipResponses] + +export type PostDatasetsByDatasetIdDocumentsGenerateSummaryData = { + body: GenerateSummaryPayload + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/generate-summary' +} + +export type PostDatasetsByDatasetIdDocumentsGenerateSummaryErrors = { + 400: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentsGenerateSummaryError + = PostDatasetsByDatasetIdDocumentsGenerateSummaryErrors[keyof PostDatasetsByDatasetIdDocumentsGenerateSummaryErrors] + +export type PostDatasetsByDatasetIdDocumentsGenerateSummaryResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentsGenerateSummaryResponse + = PostDatasetsByDatasetIdDocumentsGenerateSummaryResponses[keyof PostDatasetsByDatasetIdDocumentsGenerateSummaryResponses] + +export type PostDatasetsByDatasetIdDocumentsMetadataData = { + body: MetadataOperationData + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/metadata' +} + +export type PostDatasetsByDatasetIdDocumentsMetadataResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentsMetadataResponse + = PostDatasetsByDatasetIdDocumentsMetadataResponses[keyof PostDatasetsByDatasetIdDocumentsMetadataResponses] + +export type PatchDatasetsByDatasetIdDocumentsStatusByActionBatchData = { + body?: never + path: { + dataset_id: string + action: string + } + query?: never + url: '/datasets/{dataset_id}/documents/status/{action}/batch' +} + +export type PatchDatasetsByDatasetIdDocumentsStatusByActionBatchResponses = { + 200: { + [key: string]: unknown + } +} + +export type PatchDatasetsByDatasetIdDocumentsStatusByActionBatchResponse + = PatchDatasetsByDatasetIdDocumentsStatusByActionBatchResponses[keyof PatchDatasetsByDatasetIdDocumentsStatusByActionBatchResponses] + +export type DeleteDatasetsByDatasetIdDocumentsByDocumentIdData = { + body?: never + path: { + document_id: string + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}' +} + +export type DeleteDatasetsByDatasetIdDocumentsByDocumentIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type DeleteDatasetsByDatasetIdDocumentsByDocumentIdResponse + = DeleteDatasetsByDatasetIdDocumentsByDocumentIdResponses[keyof DeleteDatasetsByDatasetIdDocumentsByDocumentIdResponses] + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdData = { + body?: never + path: { + dataset_id: string + document_id: string + } + query?: { + metadata?: string + } + url: '/datasets/{dataset_id}/documents/{document_id}' +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdErrors = { + 404: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdError + = GetDatasetsByDatasetIdDocumentsByDocumentIdErrors[keyof GetDatasetsByDatasetIdDocumentsByDocumentIdErrors] + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdResponse + = GetDatasetsByDatasetIdDocumentsByDocumentIdResponses[keyof GetDatasetsByDatasetIdDocumentsByDocumentIdResponses] + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdDownloadData = { + body?: never + path: { + dataset_id: string + document_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/download' +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdDownloadResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdDownloadResponse + = GetDatasetsByDatasetIdDocumentsByDocumentIdDownloadResponses[keyof GetDatasetsByDatasetIdDocumentsByDocumentIdDownloadResponses] + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdIndexingEstimateData = { + body?: never + path: { + dataset_id: string + document_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/indexing-estimate' +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdIndexingEstimateErrors = { + 400: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdIndexingEstimateError + = GetDatasetsByDatasetIdDocumentsByDocumentIdIndexingEstimateErrors[keyof GetDatasetsByDatasetIdDocumentsByDocumentIdIndexingEstimateErrors] + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdIndexingEstimateResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdIndexingEstimateResponse + = GetDatasetsByDatasetIdDocumentsByDocumentIdIndexingEstimateResponses[keyof GetDatasetsByDatasetIdDocumentsByDocumentIdIndexingEstimateResponses] + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdIndexingStatusData = { + body?: never + path: { + dataset_id: string + document_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/indexing-status' +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdIndexingStatusErrors = { + 404: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdIndexingStatusError + = GetDatasetsByDatasetIdDocumentsByDocumentIdIndexingStatusErrors[keyof GetDatasetsByDatasetIdDocumentsByDocumentIdIndexingStatusErrors] + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdIndexingStatusResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdIndexingStatusResponse + = GetDatasetsByDatasetIdDocumentsByDocumentIdIndexingStatusResponses[keyof GetDatasetsByDatasetIdDocumentsByDocumentIdIndexingStatusResponses] + +export type PutDatasetsByDatasetIdDocumentsByDocumentIdMetadataData = { + body: DocumentMetadataUpdatePayload + path: { + dataset_id: string + document_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/metadata' +} + +export type PutDatasetsByDatasetIdDocumentsByDocumentIdMetadataErrors = { + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PutDatasetsByDatasetIdDocumentsByDocumentIdMetadataError + = PutDatasetsByDatasetIdDocumentsByDocumentIdMetadataErrors[keyof PutDatasetsByDatasetIdDocumentsByDocumentIdMetadataErrors] + +export type PutDatasetsByDatasetIdDocumentsByDocumentIdMetadataResponses = { + 200: { + [key: string]: unknown + } +} + +export type PutDatasetsByDatasetIdDocumentsByDocumentIdMetadataResponse + = PutDatasetsByDatasetIdDocumentsByDocumentIdMetadataResponses[keyof PutDatasetsByDatasetIdDocumentsByDocumentIdMetadataResponses] + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdNotionSyncData = { + body?: never + path: { + dataset_id: string + document_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/notion/sync' +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdNotionSyncResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdNotionSyncResponse + = GetDatasetsByDatasetIdDocumentsByDocumentIdNotionSyncResponses[keyof GetDatasetsByDatasetIdDocumentsByDocumentIdNotionSyncResponses] + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdPipelineExecutionLogData = { + body?: never + path: { + dataset_id: string + document_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/pipeline-execution-log' +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdPipelineExecutionLogResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdPipelineExecutionLogResponse + = GetDatasetsByDatasetIdDocumentsByDocumentIdPipelineExecutionLogResponses[keyof GetDatasetsByDatasetIdDocumentsByDocumentIdPipelineExecutionLogResponses] + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingPauseData = { + body?: never + path: { + dataset_id: string + document_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/processing/pause' +} + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingPauseResponses = { + 200: { + [key: string]: unknown + } +} + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingPauseResponse + = PatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingPauseResponses[keyof PatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingPauseResponses] + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingResumeData = { + body?: never + path: { + dataset_id: string + document_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/processing/resume' +} + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingResumeResponses = { + 200: { + [key: string]: unknown + } +} + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingResumeResponse + = PatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingResumeResponses[keyof PatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingResumeResponses] + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingByActionData = { + body?: never + path: { + dataset_id: string + document_id: string + action: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/processing/{action}' +} + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingByActionErrors = { + 400: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingByActionError + = PatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingByActionErrors[keyof PatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingByActionErrors] + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingByActionResponses = { + 200: { + [key: string]: unknown + } +} + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingByActionResponse + = PatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingByActionResponses[keyof PatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingByActionResponses] + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdRenameData = { + body: DocumentRenamePayload + path: { + dataset_id: string + document_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/rename' +} + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdRenameResponses = { + 200: DocumentResponse +} + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdRenameResponse + = PostDatasetsByDatasetIdDocumentsByDocumentIdRenameResponses[keyof PostDatasetsByDatasetIdDocumentsByDocumentIdRenameResponses] + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentData = { + body: SegmentCreatePayload + path: { + dataset_id: string + document_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/segment' +} + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentResponse + = PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentResponses[keyof PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentResponses] + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentByActionData = { + body?: never + path: { + dataset_id: string + document_id: string + action: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/segment/{action}' +} + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentByActionResponses = { + 200: { + [key: string]: unknown + } +} + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentByActionResponse + = PatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentByActionResponses[keyof PatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentByActionResponses] + +export type DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsData = { + body?: never + path: { + dataset_id: string + document_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/segments' +} + +export type DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsResponses = { + 200: { + [key: string]: unknown + } +} + +export type DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsResponse + = DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsResponses[keyof DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsResponses] + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsData = { + body?: never + path: { + dataset_id: string + document_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/segments' +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsResponse + = GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsResponses[keyof GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsResponses] + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBatchImportData = { + body?: never + path: { + dataset_id: string + document_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/segments/batch_import' +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBatchImportResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBatchImportResponse + = GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBatchImportResponses[keyof GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBatchImportResponses] + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBatchImportData = { + body: BatchImportPayload + path: { + dataset_id: string + document_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/segments/batch_import' +} + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBatchImportResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBatchImportResponse + = PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBatchImportResponses[keyof PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBatchImportResponses] + +export type DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdData = { + body?: never + path: { + dataset_id: string + document_id: string + segment_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}' +} + +export type DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponse + = DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponses[keyof DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponses] + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdData = { + body: SegmentUpdatePayload + path: { + dataset_id: string + document_id: string + segment_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}' +} + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponse + = PatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponses[keyof PatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponses] + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksData = { + body?: never + path: { + dataset_id: string + document_id: string + segment_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks' +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponse + = GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponses[keyof GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponses] + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksData = { + body?: never + path: { + dataset_id: string + document_id: string + segment_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks' +} + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponses = { + 200: { + [key: string]: unknown + } +} + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponse + = PatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponses[keyof PatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponses] + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksData = { + body: ChildChunkCreatePayload + path: { + dataset_id: string + document_id: string + segment_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks' +} + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponse + = PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponses[keyof PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponses] + +export type DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdData + = { + body?: never + path: { + dataset_id: string + document_id: string + segment_id: string + child_chunk_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks/{child_chunk_id}' + } + +export type DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdResponses + = { + 200: { + [key: string]: unknown + } + } + +export type DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdResponse + = DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdResponses[keyof DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdResponses] + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdData + = { + body: ChildChunkUpdatePayload + path: { + dataset_id: string + document_id: string + segment_id: string + child_chunk_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks/{child_chunk_id}' + } + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdResponses + = { + 200: { + [key: string]: unknown + } + } + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdResponse + = PatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdResponses[keyof PatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdResponses] + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdSummaryStatusData = { + body?: never + path: { + dataset_id: string + document_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/summary-status' +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdSummaryStatusErrors = { + 404: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdSummaryStatusError + = GetDatasetsByDatasetIdDocumentsByDocumentIdSummaryStatusErrors[keyof GetDatasetsByDatasetIdDocumentsByDocumentIdSummaryStatusErrors] + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdSummaryStatusResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdSummaryStatusResponse + = GetDatasetsByDatasetIdDocumentsByDocumentIdSummaryStatusResponses[keyof GetDatasetsByDatasetIdDocumentsByDocumentIdSummaryStatusResponses] + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdWebsiteSyncData = { + body?: never + path: { + dataset_id: string + document_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/website-sync' +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdWebsiteSyncResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdWebsiteSyncResponse + = GetDatasetsByDatasetIdDocumentsByDocumentIdWebsiteSyncResponses[keyof GetDatasetsByDatasetIdDocumentsByDocumentIdWebsiteSyncResponses] + +export type GetDatasetsByDatasetIdErrorDocsData = { + body?: never + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/error-docs' +} + +export type GetDatasetsByDatasetIdErrorDocsErrors = { + 404: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdErrorDocsError + = GetDatasetsByDatasetIdErrorDocsErrors[keyof GetDatasetsByDatasetIdErrorDocsErrors] + +export type GetDatasetsByDatasetIdErrorDocsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdErrorDocsResponse + = GetDatasetsByDatasetIdErrorDocsResponses[keyof GetDatasetsByDatasetIdErrorDocsResponses] + +export type PostDatasetsByDatasetIdExternalHitTestingData = { + body: ExternalHitTestingPayload + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/external-hit-testing' +} + +export type PostDatasetsByDatasetIdExternalHitTestingErrors = { + 400: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdExternalHitTestingError + = PostDatasetsByDatasetIdExternalHitTestingErrors[keyof PostDatasetsByDatasetIdExternalHitTestingErrors] + +export type PostDatasetsByDatasetIdExternalHitTestingResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdExternalHitTestingResponse + = PostDatasetsByDatasetIdExternalHitTestingResponses[keyof PostDatasetsByDatasetIdExternalHitTestingResponses] + +export type PostDatasetsByDatasetIdHitTestingData = { + body: HitTestingPayload + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/hit-testing' +} + +export type PostDatasetsByDatasetIdHitTestingErrors = { + 400: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdHitTestingError + = PostDatasetsByDatasetIdHitTestingErrors[keyof PostDatasetsByDatasetIdHitTestingErrors] + +export type PostDatasetsByDatasetIdHitTestingResponses = { + 200: HitTestingResponse +} + +export type PostDatasetsByDatasetIdHitTestingResponse + = PostDatasetsByDatasetIdHitTestingResponses[keyof PostDatasetsByDatasetIdHitTestingResponses] + +export type GetDatasetsByDatasetIdIndexingStatusData = { + body?: never + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/indexing-status' +} + +export type GetDatasetsByDatasetIdIndexingStatusResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdIndexingStatusResponse + = GetDatasetsByDatasetIdIndexingStatusResponses[keyof GetDatasetsByDatasetIdIndexingStatusResponses] + +export type GetDatasetsByDatasetIdMetadataData = { + body?: never + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/metadata' +} + +export type GetDatasetsByDatasetIdMetadataResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdMetadataResponse + = GetDatasetsByDatasetIdMetadataResponses[keyof GetDatasetsByDatasetIdMetadataResponses] + +export type PostDatasetsByDatasetIdMetadataData = { + body: MetadataArgs + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/metadata' +} + +export type PostDatasetsByDatasetIdMetadataResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdMetadataResponse + = PostDatasetsByDatasetIdMetadataResponses[keyof PostDatasetsByDatasetIdMetadataResponses] + +export type PostDatasetsByDatasetIdMetadataBuiltInByActionData = { + body?: never + path: { + dataset_id: string + action: string + } + query?: never + url: '/datasets/{dataset_id}/metadata/built-in/{action}' +} + +export type PostDatasetsByDatasetIdMetadataBuiltInByActionResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdMetadataBuiltInByActionResponse + = PostDatasetsByDatasetIdMetadataBuiltInByActionResponses[keyof PostDatasetsByDatasetIdMetadataBuiltInByActionResponses] + +export type DeleteDatasetsByDatasetIdMetadataByMetadataIdData = { + body?: never + path: { + dataset_id: string + metadata_id: string + } + query?: never + url: '/datasets/{dataset_id}/metadata/{metadata_id}' +} + +export type DeleteDatasetsByDatasetIdMetadataByMetadataIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type DeleteDatasetsByDatasetIdMetadataByMetadataIdResponse + = DeleteDatasetsByDatasetIdMetadataByMetadataIdResponses[keyof DeleteDatasetsByDatasetIdMetadataByMetadataIdResponses] + +export type PatchDatasetsByDatasetIdMetadataByMetadataIdData = { + body: MetadataUpdatePayload + path: { + dataset_id: string + metadata_id: string + } + query?: never + url: '/datasets/{dataset_id}/metadata/{metadata_id}' +} + +export type PatchDatasetsByDatasetIdMetadataByMetadataIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type PatchDatasetsByDatasetIdMetadataByMetadataIdResponse + = PatchDatasetsByDatasetIdMetadataByMetadataIdResponses[keyof PatchDatasetsByDatasetIdMetadataByMetadataIdResponses] + +export type GetDatasetsByDatasetIdNotionSyncData = { + body?: never + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/notion/sync' +} + +export type GetDatasetsByDatasetIdNotionSyncResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdNotionSyncResponse + = GetDatasetsByDatasetIdNotionSyncResponses[keyof GetDatasetsByDatasetIdNotionSyncResponses] + +export type GetDatasetsByDatasetIdPermissionPartUsersData = { + body?: never + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/permission-part-users' +} + +export type GetDatasetsByDatasetIdPermissionPartUsersErrors = { + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdPermissionPartUsersError + = GetDatasetsByDatasetIdPermissionPartUsersErrors[keyof GetDatasetsByDatasetIdPermissionPartUsersErrors] + +export type GetDatasetsByDatasetIdPermissionPartUsersResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdPermissionPartUsersResponse + = GetDatasetsByDatasetIdPermissionPartUsersResponses[keyof GetDatasetsByDatasetIdPermissionPartUsersResponses] + +export type GetDatasetsByDatasetIdQueriesData = { + body?: never + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/queries' +} + +export type GetDatasetsByDatasetIdQueriesResponses = { + 200: DatasetQueryDetail +} + +export type GetDatasetsByDatasetIdQueriesResponse + = GetDatasetsByDatasetIdQueriesResponses[keyof GetDatasetsByDatasetIdQueriesResponses] + +export type GetDatasetsByDatasetIdRelatedAppsData = { + body?: never + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/related-apps' +} + +export type GetDatasetsByDatasetIdRelatedAppsResponses = { + 200: RelatedAppList +} + +export type GetDatasetsByDatasetIdRelatedAppsResponse + = GetDatasetsByDatasetIdRelatedAppsResponses[keyof GetDatasetsByDatasetIdRelatedAppsResponses] + +export type PostDatasetsByDatasetIdRetryData = { + body: DocumentRetryPayload + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/retry' +} + +export type PostDatasetsByDatasetIdRetryResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdRetryResponse + = PostDatasetsByDatasetIdRetryResponses[keyof PostDatasetsByDatasetIdRetryResponses] + +export type GetDatasetsByDatasetIdUseCheckData = { + body?: never + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/use-check' +} + +export type GetDatasetsByDatasetIdUseCheckResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdUseCheckResponse + = GetDatasetsByDatasetIdUseCheckResponses[keyof GetDatasetsByDatasetIdUseCheckResponses] + +export type GetDatasetsByResourceIdApiKeysData = { + body?: never + path: { + resource_id: string + } + query?: never + url: '/datasets/{resource_id}/api-keys' +} + +export type GetDatasetsByResourceIdApiKeysResponses = { + 200: ApiKeyList +} + +export type GetDatasetsByResourceIdApiKeysResponse + = GetDatasetsByResourceIdApiKeysResponses[keyof GetDatasetsByResourceIdApiKeysResponses] + +export type PostDatasetsByResourceIdApiKeysData = { + body?: never + path: { + resource_id: string + } + query?: never + url: '/datasets/{resource_id}/api-keys' +} + +export type PostDatasetsByResourceIdApiKeysErrors = { + 400: { + [key: string]: unknown + } +} + +export type PostDatasetsByResourceIdApiKeysError + = PostDatasetsByResourceIdApiKeysErrors[keyof PostDatasetsByResourceIdApiKeysErrors] + +export type PostDatasetsByResourceIdApiKeysResponses = { + 201: ApiKeyItem +} + +export type PostDatasetsByResourceIdApiKeysResponse + = PostDatasetsByResourceIdApiKeysResponses[keyof PostDatasetsByResourceIdApiKeysResponses] + +export type DeleteDatasetsByResourceIdApiKeysByApiKeyIdData = { + body?: never + path: { + resource_id: string + api_key_id: string + } + query?: never + url: '/datasets/{resource_id}/api-keys/{api_key_id}' +} + +export type DeleteDatasetsByResourceIdApiKeysByApiKeyIdResponses = { + 204: { + [key: string]: unknown + } +} + +export type DeleteDatasetsByResourceIdApiKeysByApiKeyIdResponse + = DeleteDatasetsByResourceIdApiKeysByApiKeyIdResponses[keyof DeleteDatasetsByResourceIdApiKeysByApiKeyIdResponses] diff --git a/packages/contracts/generated/api/console/datasets/zod.gen.ts b/packages/contracts/generated/api/console/datasets/zod.gen.ts new file mode 100644 index 0000000000..76491c52a0 --- /dev/null +++ b/packages/contracts/generated/api/console/datasets/zod.gen.ts @@ -0,0 +1,1531 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * ApiKeyItem + */ +export const zApiKeyItem = z.object({ + created_at: z.int().nullish(), + id: z.string(), + last_used_at: z.int().nullish(), + token: z.string(), + type: z.string(), +}) + +/** + * ApiKeyList + */ +export const zApiKeyList = z.object({ + data: z.array(zApiKeyItem), +}) + +/** + * BatchImportPayload + */ +export const zBatchImportPayload = z.object({ + upload_file_id: z.string(), +}) + +/** + * ExternalDatasetCreatePayload + */ +export const zExternalDatasetCreatePayload = z.object({ + description: z.string().max(400).nullish(), + external_knowledge_api_id: z.string(), + external_knowledge_id: z.string(), + external_retrieval_model: z.record(z.string(), z.unknown()).nullish(), + name: z.string().min(1).max(100), +}) + +export const zDatasetDetail = z.record(z.string(), z.unknown()) + +/** + * ExternalKnowledgeApiPayload + */ +export const zExternalKnowledgeApiPayload = z.object({ + name: z.string().min(1).max(40), + settings: z.record(z.string(), z.unknown()), +}) + +/** + * IndexingEstimatePayload + */ +export const zIndexingEstimatePayload = z.object({ + dataset_id: z.string().nullish(), + doc_form: z.string().optional().default('text_model'), + doc_language: z.string().optional().default('English'), + indexing_technique: z.string(), + info_list: z.record(z.string(), z.unknown()), + process_rule: z.record(z.string(), z.unknown()), +}) + +/** + * NotionEstimatePayload + */ +export const zNotionEstimatePayload = z.object({ + doc_form: z.string().optional().default('text_model'), + doc_language: z.string().optional().default('English'), + notion_info_list: z.array(z.record(z.string(), z.unknown())), + process_rule: z.record(z.string(), z.unknown()), +}) + +/** + * DocumentBatchDownloadZipPayload + * + * Request payload for bulk downloading documents as a zip archive. + */ +export const zDocumentBatchDownloadZipPayload = z.object({ + document_ids: z.array(z.uuid()).min(1).max(100), +}) + +/** + * GenerateSummaryPayload + */ +export const zGenerateSummaryPayload = z.object({ + document_list: z.array(z.string()), +}) + +/** + * DocumentMetadataUpdatePayload + */ +export const zDocumentMetadataUpdatePayload = z.object({ + doc_metadata: z.unknown().optional(), + doc_type: z.string().nullish(), +}) + +/** + * DocumentRenamePayload + */ +export const zDocumentRenamePayload = z.object({ + name: z.string(), +}) + +/** + * SegmentCreatePayload + */ +export const zSegmentCreatePayload = z.object({ + answer: z.string().nullish(), + attachment_ids: z.array(z.string()).nullish(), + content: z.string(), + keywords: z.array(z.string()).nullish(), +}) + +/** + * SegmentUpdatePayload + */ +export const zSegmentUpdatePayload = z.object({ + answer: z.string().nullish(), + attachment_ids: z.array(z.string()).nullish(), + content: z.string(), + keywords: z.array(z.string()).nullish(), + regenerate_child_chunks: z.boolean().optional().default(false), + summary: z.string().nullish(), +}) + +/** + * ChildChunkCreatePayload + */ +export const zChildChunkCreatePayload = z.object({ + content: z.string(), +}) + +/** + * ChildChunkUpdatePayload + */ +export const zChildChunkUpdatePayload = z.object({ + content: z.string(), +}) + +/** + * ExternalHitTestingPayload + */ +export const zExternalHitTestingPayload = z.object({ + external_retrieval_model: z.record(z.string(), z.unknown()).nullish(), + metadata_filtering_conditions: z.record(z.string(), z.unknown()).nullish(), + query: z.string(), +}) + +/** + * MetadataArgs + */ +export const zMetadataArgs = z.object({ + name: z.string(), + type: z.enum(['string', 'number', 'time']), +}) + +/** + * MetadataUpdatePayload + */ +export const zMetadataUpdatePayload = z.object({ + name: z.string(), +}) + +export const zDatasetQueryDetail = z.record(z.string(), z.unknown()) + +export const zRelatedAppList = z.record(z.string(), z.unknown()) + +/** + * DocumentRetryPayload + */ +export const zDocumentRetryPayload = z.object({ + document_ids: z.array(z.string()), +}) + +/** + * DatasetPermissionEnum + */ +export const zDatasetPermissionEnum = z.enum(['only_me', 'all_team_members', 'partial_members']) + +/** + * DatasetCreatePayload + */ +export const zDatasetCreatePayload = z.object({ + description: z.string().max(400).optional().default(''), + external_knowledge_api_id: z.string().nullish(), + external_knowledge_id: z.string().nullish(), + indexing_technique: z.string().nullish(), + name: z.string().min(1).max(40), + permission: zDatasetPermissionEnum.optional(), + provider: z.string().optional().default('vendor'), +}) + +/** + * DatasetUpdatePayload + */ +export const zDatasetUpdatePayload = z.object({ + description: z.string().max(400).nullish(), + embedding_model: z.string().nullish(), + embedding_model_provider: z.string().nullish(), + external_knowledge_api_id: z.string().nullish(), + external_knowledge_id: z.string().nullish(), + external_retrieval_model: z.record(z.string(), z.unknown()).nullish(), + icon_info: z.record(z.string(), z.unknown()).nullish(), + indexing_technique: z.string().nullish(), + is_multimodal: z.boolean().nullish().default(false), + name: z.string().min(1).max(40).nullish(), + partial_member_list: z.array(z.record(z.string(), z.string())).nullish(), + permission: zDatasetPermissionEnum.optional(), + retrieval_model: z.record(z.string(), z.unknown()).nullish(), + summary_index_setting: z.record(z.string(), z.unknown()).nullish(), +}) + +/** + * DatasetResponse + */ +export const zDatasetResponse = z.object({ + created_at: z.int().nullish(), + created_by: z.string().nullish(), + data_source_type: z.string().nullish(), + description: z.string().nullish(), + id: z.string(), + indexing_technique: z.string().nullish(), + name: z.string(), + permission: z.string().nullish(), +}) + +/** + * DocumentMetadataResponse + */ +export const zDocumentMetadataResponse = z.object({ + id: z.string(), + name: z.string(), + type: z.string(), + value: z.string().nullish(), +}) + +/** + * DocumentResponse + */ +export const zDocumentResponse = z.object({ + archived: z.boolean().nullish(), + created_at: z.int().nullish(), + created_by: z.string().nullish(), + created_from: z.string().nullish(), + data_source_detail_dict: z.unknown().optional(), + data_source_info_dict: z.unknown().optional(), + data_source_type: z.string().nullish(), + dataset_process_rule_id: z.string().nullish(), + disabled_at: z.int().nullish(), + disabled_by: z.string().nullish(), + display_status: z.string().nullish(), + doc_form: z.string().nullish(), + doc_metadata_details: z.array(zDocumentMetadataResponse).optional(), + enabled: z.boolean().nullish(), + error: z.string().nullish(), + hit_count: z.int().nullish(), + id: z.string(), + indexing_status: z.string().nullish(), + name: z.string(), + need_summary: z.boolean().nullish(), + position: z.int().nullish(), + summary_index_status: z.string().nullish(), + tokens: z.int().nullish(), + word_count: z.int().nullish(), +}) + +/** + * DatasetAndDocumentResponse + */ +export const zDatasetAndDocumentResponse = z.object({ + batch: z.string(), + dataset: zDatasetResponse, + documents: z.array(zDocumentResponse), +}) + +/** + * RerankingModel + */ +export const zRerankingModel = z.object({ + reranking_model_name: z.string().nullish(), + reranking_provider_name: z.string().nullish(), +}) + +/** + * RetrievalMethod + */ +export const zRetrievalMethod = z.enum([ + 'semantic_search', + 'full_text_search', + 'hybrid_search', + 'keyword_search', +]) + +/** + * MetadataDetail + */ +export const zMetadataDetail = z.object({ + id: z.string(), + name: z.string(), + value: z.unknown().optional(), +}) + +/** + * DocumentMetadataOperation + */ +export const zDocumentMetadataOperation = z.object({ + document_id: z.string(), + metadata_list: z.array(zMetadataDetail), + partial_update: z.boolean().optional().default(false), +}) + +/** + * MetadataOperationData + * + * Metadata operation data + */ +export const zMetadataOperationData = z.object({ + operation_data: z.array(zDocumentMetadataOperation), +}) + +/** + * HitTestingChildChunk + */ +export const zHitTestingChildChunk = z.object({ + content: z.string().nullish(), + id: z.string().nullish(), + position: z.int().nullish(), + score: z.number().nullish(), +}) + +/** + * HitTestingFile + */ +export const zHitTestingFile = z.object({ + extension: z.string().nullish(), + id: z.string().nullish(), + mime_type: z.string().nullish(), + name: z.string().nullish(), + size: z.int().nullish(), + source_url: z.string().nullish(), +}) + +/** + * FileInfo + */ +export const zFileInfo = z.object({ + file_ids: z.array(z.string()), +}) + +/** + * WebsiteInfo + */ +export const zWebsiteInfo = z.object({ + job_id: z.string(), + only_main_content: z.boolean().optional().default(true), + provider: z.string(), + urls: z.array(z.string()), +}) + +/** + * PreProcessingRule + */ +export const zPreProcessingRule = z.object({ + enabled: z.boolean(), + id: z.string(), +}) + +/** + * Segmentation + */ +export const zSegmentation = z.object({ + chunk_overlap: z.int().optional().default(0), + max_tokens: z.int(), + separator: z.string().optional().default('\n'), +}) + +/** + * Rule + */ +export const zRule = z.object({ + parent_mode: z.enum(['full-doc', 'paragraph']).nullish(), + pre_processing_rules: z.array(zPreProcessingRule).nullish(), + segmentation: zSegmentation.optional(), + subchunk_segmentation: zSegmentation.optional(), +}) + +/** + * ProcessRule + */ +export const zProcessRule = z.object({ + mode: z.enum(['automatic', 'custom', 'hierarchical']), + rules: zRule.optional(), +}) + +/** + * Condition + * + * Condition detail + */ +export const zCondition = z.object({ + comparison_operator: z.enum([ + 'contains', + 'not contains', + 'start with', + 'end with', + 'is', + 'is not', + 'empty', + 'not empty', + 'in', + 'not in', + '=', + '≠', + '>', + '<', + '≥', + '≤', + 'before', + 'after', + ]), + name: z.string(), + value: z.unknown().optional(), +}) + +/** + * MetadataFilteringCondition + * + * Metadata Filtering Condition. + */ +export const zMetadataFilteringCondition = z.object({ + conditions: z.array(zCondition).nullish(), + logical_operator: z.enum(['and', 'or']).nullish().default('and'), +}) + +/** + * WeightKeywordSetting + */ +export const zWeightKeywordSetting = z.object({ + keyword_weight: z.number(), +}) + +/** + * WeightVectorSetting + */ +export const zWeightVectorSetting = z.object({ + embedding_model_name: z.string(), + embedding_provider_name: z.string(), + vector_weight: z.number(), +}) + +/** + * WeightModel + */ +export const zWeightModel = z.object({ + keyword_setting: zWeightKeywordSetting.optional(), + vector_setting: zWeightVectorSetting.optional(), + weight_type: z.enum(['semantic_first', 'keyword_first', 'customized']).nullish(), +}) + +/** + * RetrievalModel + */ +export const zRetrievalModel = z.object({ + metadata_filtering_conditions: zMetadataFilteringCondition.optional(), + reranking_enable: z.boolean(), + reranking_mode: z.string().nullish(), + reranking_model: zRerankingModel.optional(), + score_threshold: z.number().nullish(), + score_threshold_enabled: z.boolean(), + search_method: zRetrievalMethod, + top_k: z.int(), + weights: zWeightModel.optional(), +}) + +/** + * HitTestingPayload + */ +export const zHitTestingPayload = z.object({ + attachment_ids: z.array(z.string()).nullish(), + external_retrieval_model: z.record(z.string(), z.unknown()).nullish(), + query: z.string().max(250), + retrieval_model: zRetrievalModel.optional(), +}) + +/** + * HitTestingDocument + */ +export const zHitTestingDocument = z.object({ + data_source_type: z.string().nullish(), + doc_metadata: z.unknown().optional(), + doc_type: z.string().nullish(), + id: z.string().nullish(), + name: z.string().nullish(), +}) + +/** + * HitTestingSegment + */ +export const zHitTestingSegment = z.object({ + answer: z.string().nullish(), + completed_at: z.int().nullish(), + content: z.string().nullish(), + created_at: z.int().nullish(), + created_by: z.string().nullish(), + disabled_at: z.int().nullish(), + disabled_by: z.string().nullish(), + document: zHitTestingDocument.optional(), + document_id: z.string().nullish(), + enabled: z.boolean().nullish(), + error: z.string().nullish(), + hit_count: z.int().nullish(), + id: z.string().nullish(), + index_node_hash: z.string().nullish(), + index_node_id: z.string().nullish(), + indexing_at: z.int().nullish(), + keywords: z.array(z.string()).optional(), + position: z.int().nullish(), + sign_content: z.string().nullish(), + status: z.string().nullish(), + stopped_at: z.int().nullish(), + tokens: z.int().nullish(), + word_count: z.int().nullish(), +}) + +/** + * HitTestingRecord + */ +export const zHitTestingRecord = z.object({ + child_chunks: z.array(zHitTestingChildChunk).optional(), + files: z.array(zHitTestingFile).optional(), + score: z.number().nullish(), + segment: zHitTestingSegment.optional(), + summary: z.string().nullish(), + tsne_position: z.unknown().optional(), +}) + +/** + * HitTestingResponse + */ +export const zHitTestingResponse = z.object({ + query: z.string(), + records: z.array(zHitTestingRecord).optional(), +}) + +/** + * NotionIcon + */ +export const zNotionIcon = z.object({ + emoji: z.string().nullish(), + type: z.string(), + url: z.string().nullish(), +}) + +/** + * NotionPage + */ +export const zNotionPage = z.object({ + page_icon: zNotionIcon.optional(), + page_id: z.string(), + page_name: z.string(), + type: z.string(), +}) + +/** + * NotionInfo + */ +export const zNotionInfo = z.object({ + credential_id: z.string(), + pages: z.array(zNotionPage), + workspace_id: z.string(), +}) + +/** + * InfoList + */ +export const zInfoList = z.object({ + data_source_type: z.enum(['upload_file', 'notion_import', 'website_crawl']), + file_info_list: zFileInfo.optional(), + notion_info_list: z.array(zNotionInfo).nullish(), + website_info_list: zWebsiteInfo.optional(), +}) + +/** + * DataSource + */ +export const zDataSource = z.object({ + info_list: zInfoList, +}) + +/** + * KnowledgeConfig + */ +export const zKnowledgeConfig = z.object({ + data_source: zDataSource.optional(), + doc_form: z.string().optional().default('text_model'), + doc_language: z.string().optional().default('English'), + duplicate: z.boolean().optional().default(true), + embedding_model: z.string().nullish(), + embedding_model_provider: z.string().nullish(), + indexing_technique: z.enum(['high_quality', 'economy']), + is_multimodal: z.boolean().optional().default(false), + name: z.string().nullish(), + original_document_id: z.string().nullish(), + process_rule: zProcessRule.optional(), + retrieval_model: zRetrievalModel.optional(), + summary_index_setting: z.record(z.string(), z.unknown()).nullish(), +}) + +export const zGetDatasetsQuery = z.object({ + page: z.string().optional(), + limit: z.string().optional(), + ids: z.string().optional(), + keyword: z.string().optional(), + tag_ids: z.string().optional(), + include_all: z.string().optional(), +}) + +/** + * Datasets retrieved successfully + */ +export const zGetDatasetsResponse = z.record(z.string(), z.unknown()) + +export const zPostDatasetsBody = zDatasetCreatePayload + +/** + * Dataset created successfully + */ +export const zPostDatasetsResponse = z.record(z.string(), z.unknown()) + +/** + * API base info retrieved successfully + */ +export const zGetDatasetsApiBaseInfoResponse = z.record(z.string(), z.unknown()) + +/** + * API keys retrieved successfully + */ +export const zGetDatasetsApiKeysResponse = zApiKeyList + +/** + * API key created successfully + */ +export const zPostDatasetsApiKeysResponse = zApiKeyItem + +export const zDeleteDatasetsApiKeysByApiKeyIdPath = z.object({ + api_key_id: z.string(), +}) + +/** + * API key deleted successfully + */ +export const zDeleteDatasetsApiKeysByApiKeyIdResponse = z.record(z.string(), z.unknown()) + +export const zGetDatasetsBatchImportStatusByJobIdPath = z.object({ + job_id: z.string(), +}) + +/** + * Success + */ +export const zGetDatasetsBatchImportStatusByJobIdResponse = z.record(z.string(), z.unknown()) + +export const zPostDatasetsBatchImportStatusByJobIdBody = zBatchImportPayload + +export const zPostDatasetsBatchImportStatusByJobIdPath = z.object({ + job_id: z.string(), +}) + +/** + * Success + */ +export const zPostDatasetsBatchImportStatusByJobIdResponse = z.record(z.string(), z.unknown()) + +export const zPostDatasetsExternalBody = zExternalDatasetCreatePayload + +/** + * External dataset created successfully + */ +export const zPostDatasetsExternalResponse = zDatasetDetail + +export const zGetDatasetsExternalKnowledgeApiQuery = z.object({ + page: z.string().optional(), + limit: z.string().optional(), + keyword: z.string().optional(), +}) + +/** + * External API templates retrieved successfully + */ +export const zGetDatasetsExternalKnowledgeApiResponse = z.record(z.string(), z.unknown()) + +export const zPostDatasetsExternalKnowledgeApiBody = zExternalKnowledgeApiPayload + +/** + * Success + */ +export const zPostDatasetsExternalKnowledgeApiResponse = z.record(z.string(), z.unknown()) + +export const zDeleteDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdPath = z.object({ + external_knowledge_api_id: z.string(), +}) + +/** + * Success + */ +export const zDeleteDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdPath = z.object({ + external_knowledge_api_id: z.string(), +}) + +/** + * External API template retrieved successfully + */ +export const zGetDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPatchDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdBody + = zExternalKnowledgeApiPayload + +export const zPatchDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdPath = z.object({ + external_knowledge_api_id: z.string(), +}) + +/** + * Success + */ +export const zPatchDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdUseCheckPath = z.object({ + external_knowledge_api_id: z.string(), +}) + +/** + * Usage check completed successfully + */ +export const zGetDatasetsExternalKnowledgeApiByExternalKnowledgeApiIdUseCheckResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostDatasetsIndexingEstimateBody = zIndexingEstimatePayload + +/** + * Indexing estimate calculated successfully + */ +export const zPostDatasetsIndexingEstimateResponse = z.record(z.string(), z.unknown()) + +export const zPostDatasetsInitBody = zKnowledgeConfig + +/** + * Dataset initialized successfully + */ +export const zPostDatasetsInitResponse = zDatasetAndDocumentResponse + +/** + * Success + */ +export const zGetDatasetsMetadataBuiltInResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zGetDatasetsNotionIndexingEstimateResponse = z.record(z.string(), z.unknown()) + +export const zPostDatasetsNotionIndexingEstimateBody = zNotionEstimatePayload + +/** + * Success + */ +export const zPostDatasetsNotionIndexingEstimateResponse = z.record(z.string(), z.unknown()) + +export const zGetDatasetsProcessRuleQuery = z.object({ + document_id: z.string().optional(), +}) + +/** + * Process rules retrieved successfully + */ +export const zGetDatasetsProcessRuleResponse = z.record(z.string(), z.unknown()) + +/** + * Retrieval settings retrieved successfully + */ +export const zGetDatasetsRetrievalSettingResponse = z.record(z.string(), z.unknown()) + +export const zGetDatasetsRetrievalSettingByVectorTypePath = z.object({ + vector_type: z.string(), +}) + +/** + * Mock retrieval settings retrieved successfully + */ +export const zGetDatasetsRetrievalSettingByVectorTypeResponse = z.record(z.string(), z.unknown()) + +export const zDeleteDatasetsByDatasetIdPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Success + */ +export const zDeleteDatasetsByDatasetIdResponse = z.record(z.string(), z.unknown()) + +export const zGetDatasetsByDatasetIdPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Dataset retrieved successfully + */ +export const zGetDatasetsByDatasetIdResponse = zDatasetDetail + +export const zPatchDatasetsByDatasetIdBody = zDatasetUpdatePayload + +export const zPatchDatasetsByDatasetIdPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Dataset updated successfully + */ +export const zPatchDatasetsByDatasetIdResponse = zDatasetDetail + +export const zPostDatasetsByDatasetIdApiKeysByStatusPath = z.object({ + dataset_id: z.string(), + status: z.string(), +}) + +/** + * Success + */ +export const zPostDatasetsByDatasetIdApiKeysByStatusResponse = z.record(z.string(), z.unknown()) + +export const zGetDatasetsByDatasetIdAutoDisableLogsPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Auto disable logs retrieved successfully + */ +export const zGetDatasetsByDatasetIdAutoDisableLogsResponse = z.record(z.string(), z.unknown()) + +export const zGetDatasetsByDatasetIdBatchByBatchIndexingEstimatePath = z.object({ + dataset_id: z.string(), + batch: z.string(), +}) + +/** + * Success + */ +export const zGetDatasetsByDatasetIdBatchByBatchIndexingEstimateResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetDatasetsByDatasetIdBatchByBatchIndexingStatusPath = z.object({ + dataset_id: z.string(), + batch: z.string(), +}) + +/** + * Success + */ +export const zGetDatasetsByDatasetIdBatchByBatchIndexingStatusResponse = z.record( + z.string(), + z.unknown(), +) + +export const zDeleteDatasetsByDatasetIdDocumentsPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Success + */ +export const zDeleteDatasetsByDatasetIdDocumentsResponse = z.record(z.string(), z.unknown()) + +export const zGetDatasetsByDatasetIdDocumentsPath = z.object({ + dataset_id: z.string(), +}) + +export const zGetDatasetsByDatasetIdDocumentsQuery = z.object({ + page: z.string().optional(), + limit: z.string().optional(), + keyword: z.string().optional(), + sort: z.string().optional(), + fetch: z.string().optional(), + status: z.string().optional(), +}) + +/** + * Documents retrieved successfully + */ +export const zGetDatasetsByDatasetIdDocumentsResponse = z.record(z.string(), z.unknown()) + +export const zPostDatasetsByDatasetIdDocumentsBody = zKnowledgeConfig + +export const zPostDatasetsByDatasetIdDocumentsPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Documents created successfully + */ +export const zPostDatasetsByDatasetIdDocumentsResponse = zDatasetAndDocumentResponse + +export const zPostDatasetsByDatasetIdDocumentsDownloadZipBody = zDocumentBatchDownloadZipPayload + +export const zPostDatasetsByDatasetIdDocumentsDownloadZipPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Success + */ +export const zPostDatasetsByDatasetIdDocumentsDownloadZipResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostDatasetsByDatasetIdDocumentsGenerateSummaryBody = zGenerateSummaryPayload + +export const zPostDatasetsByDatasetIdDocumentsGenerateSummaryPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Summary generation started successfully + */ +export const zPostDatasetsByDatasetIdDocumentsGenerateSummaryResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostDatasetsByDatasetIdDocumentsMetadataBody = zMetadataOperationData + +export const zPostDatasetsByDatasetIdDocumentsMetadataPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Success + */ +export const zPostDatasetsByDatasetIdDocumentsMetadataResponse = z.record(z.string(), z.unknown()) + +export const zPatchDatasetsByDatasetIdDocumentsStatusByActionBatchPath = z.object({ + dataset_id: z.string(), + action: z.string(), +}) + +/** + * Success + */ +export const zPatchDatasetsByDatasetIdDocumentsStatusByActionBatchResponse = z.record( + z.string(), + z.unknown(), +) + +export const zDeleteDatasetsByDatasetIdDocumentsByDocumentIdPath = z.object({ + document_id: z.string(), + dataset_id: z.string(), +}) + +/** + * Success + */ +export const zDeleteDatasetsByDatasetIdDocumentsByDocumentIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdPath = z.object({ + dataset_id: z.string(), + document_id: z.string(), +}) + +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdQuery = z.object({ + metadata: z.string().optional(), +}) + +/** + * Document retrieved successfully + */ +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdDownloadPath = z.object({ + dataset_id: z.string(), + document_id: z.string(), +}) + +/** + * Success + */ +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdDownloadResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdIndexingEstimatePath = z.object({ + dataset_id: z.string(), + document_id: z.string(), +}) + +/** + * Indexing estimate calculated successfully + */ +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdIndexingEstimateResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdIndexingStatusPath = z.object({ + dataset_id: z.string(), + document_id: z.string(), +}) + +/** + * Indexing status retrieved successfully + */ +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdIndexingStatusResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPutDatasetsByDatasetIdDocumentsByDocumentIdMetadataBody + = zDocumentMetadataUpdatePayload + +export const zPutDatasetsByDatasetIdDocumentsByDocumentIdMetadataPath = z.object({ + dataset_id: z.string(), + document_id: z.string(), +}) + +/** + * Document metadata updated successfully + */ +export const zPutDatasetsByDatasetIdDocumentsByDocumentIdMetadataResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdNotionSyncPath = z.object({ + dataset_id: z.string(), + document_id: z.string(), +}) + +/** + * Success + */ +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdNotionSyncResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdPipelineExecutionLogPath = z.object({ + dataset_id: z.string(), + document_id: z.string(), +}) + +/** + * Success + */ +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdPipelineExecutionLogResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingPausePath = z.object({ + dataset_id: z.string(), + document_id: z.string(), +}) + +/** + * Success + */ +export const zPatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingPauseResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingResumePath = z.object({ + dataset_id: z.string(), + document_id: z.string(), +}) + +/** + * Success + */ +export const zPatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingResumeResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingByActionPath = z.object({ + dataset_id: z.string(), + document_id: z.string(), + action: z.string(), +}) + +/** + * Processing status updated successfully + */ +export const zPatchDatasetsByDatasetIdDocumentsByDocumentIdProcessingByActionResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostDatasetsByDatasetIdDocumentsByDocumentIdRenameBody = zDocumentRenamePayload + +export const zPostDatasetsByDatasetIdDocumentsByDocumentIdRenamePath = z.object({ + dataset_id: z.string(), + document_id: z.string(), +}) + +/** + * Document renamed successfully + */ +export const zPostDatasetsByDatasetIdDocumentsByDocumentIdRenameResponse = zDocumentResponse + +export const zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentBody = zSegmentCreatePayload + +export const zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentPath = z.object({ + dataset_id: z.string(), + document_id: z.string(), +}) + +/** + * Success + */ +export const zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentByActionPath = z.object({ + dataset_id: z.string(), + document_id: z.string(), + action: z.string(), +}) + +/** + * Success + */ +export const zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentByActionResponse = z.record( + z.string(), + z.unknown(), +) + +export const zDeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsPath = z.object({ + dataset_id: z.string(), + document_id: z.string(), +}) + +/** + * Success + */ +export const zDeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsPath = z.object({ + dataset_id: z.string(), + document_id: z.string(), +}) + +/** + * Success + */ +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBatchImportPath = z.object({ + dataset_id: z.string(), + document_id: z.string(), +}) + +/** + * Success + */ +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBatchImportResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBatchImportBody + = zBatchImportPayload + +export const zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBatchImportPath = z.object({ + dataset_id: z.string(), + document_id: z.string(), +}) + +/** + * Success + */ +export const zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBatchImportResponse = z.record( + z.string(), + z.unknown(), +) + +export const zDeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdPath = z.object({ + dataset_id: z.string(), + document_id: z.string(), + segment_id: z.string(), +}) + +/** + * Success + */ +export const zDeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdBody + = zSegmentUpdatePayload + +export const zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdPath = z.object({ + dataset_id: z.string(), + document_id: z.string(), + segment_id: z.string(), +}) + +/** + * Success + */ +export const zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksPath + = z.object({ + dataset_id: z.string(), + document_id: z.string(), + segment_id: z.string(), + }) + +/** + * Success + */ +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponse + = z.record(z.string(), z.unknown()) + +export const zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksPath + = z.object({ + dataset_id: z.string(), + document_id: z.string(), + segment_id: z.string(), + }) + +/** + * Success + */ +export const zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponse + = z.record(z.string(), z.unknown()) + +export const zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksBody + = zChildChunkCreatePayload + +export const zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksPath + = z.object({ + dataset_id: z.string(), + document_id: z.string(), + segment_id: z.string(), + }) + +/** + * Success + */ +export const zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponse + = z.record(z.string(), z.unknown()) + +export const zDeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdPath + = z.object({ + dataset_id: z.string(), + document_id: z.string(), + segment_id: z.string(), + child_chunk_id: z.string(), + }) + +/** + * Success + */ +export const zDeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdResponse + = z.record(z.string(), z.unknown()) + +export const zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdBody + = zChildChunkUpdatePayload + +export const zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdPath + = z.object({ + dataset_id: z.string(), + document_id: z.string(), + segment_id: z.string(), + child_chunk_id: z.string(), + }) + +/** + * Success + */ +export const zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdResponse + = z.record(z.string(), z.unknown()) + +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdSummaryStatusPath = z.object({ + dataset_id: z.string(), + document_id: z.string(), +}) + +/** + * Summary status retrieved successfully + */ +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdSummaryStatusResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdWebsiteSyncPath = z.object({ + dataset_id: z.string(), + document_id: z.string(), +}) + +/** + * Success + */ +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdWebsiteSyncResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetDatasetsByDatasetIdErrorDocsPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Error documents retrieved successfully + */ +export const zGetDatasetsByDatasetIdErrorDocsResponse = z.record(z.string(), z.unknown()) + +export const zPostDatasetsByDatasetIdExternalHitTestingBody = zExternalHitTestingPayload + +export const zPostDatasetsByDatasetIdExternalHitTestingPath = z.object({ + dataset_id: z.string(), +}) + +/** + * External hit testing completed successfully + */ +export const zPostDatasetsByDatasetIdExternalHitTestingResponse = z.record(z.string(), z.unknown()) + +export const zPostDatasetsByDatasetIdHitTestingBody = zHitTestingPayload + +export const zPostDatasetsByDatasetIdHitTestingPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Hit testing completed successfully + */ +export const zPostDatasetsByDatasetIdHitTestingResponse = zHitTestingResponse + +export const zGetDatasetsByDatasetIdIndexingStatusPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Indexing status retrieved successfully + */ +export const zGetDatasetsByDatasetIdIndexingStatusResponse = z.record(z.string(), z.unknown()) + +export const zGetDatasetsByDatasetIdMetadataPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Success + */ +export const zGetDatasetsByDatasetIdMetadataResponse = z.record(z.string(), z.unknown()) + +export const zPostDatasetsByDatasetIdMetadataBody = zMetadataArgs + +export const zPostDatasetsByDatasetIdMetadataPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Success + */ +export const zPostDatasetsByDatasetIdMetadataResponse = z.record(z.string(), z.unknown()) + +export const zPostDatasetsByDatasetIdMetadataBuiltInByActionPath = z.object({ + dataset_id: z.string(), + action: z.string(), +}) + +/** + * Success + */ +export const zPostDatasetsByDatasetIdMetadataBuiltInByActionResponse = z.record( + z.string(), + z.unknown(), +) + +export const zDeleteDatasetsByDatasetIdMetadataByMetadataIdPath = z.object({ + dataset_id: z.string(), + metadata_id: z.string(), +}) + +/** + * Success + */ +export const zDeleteDatasetsByDatasetIdMetadataByMetadataIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPatchDatasetsByDatasetIdMetadataByMetadataIdBody = zMetadataUpdatePayload + +export const zPatchDatasetsByDatasetIdMetadataByMetadataIdPath = z.object({ + dataset_id: z.string(), + metadata_id: z.string(), +}) + +/** + * Success + */ +export const zPatchDatasetsByDatasetIdMetadataByMetadataIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetDatasetsByDatasetIdNotionSyncPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Success + */ +export const zGetDatasetsByDatasetIdNotionSyncResponse = z.record(z.string(), z.unknown()) + +export const zGetDatasetsByDatasetIdPermissionPartUsersPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Permission users retrieved successfully + */ +export const zGetDatasetsByDatasetIdPermissionPartUsersResponse = z.record(z.string(), z.unknown()) + +export const zGetDatasetsByDatasetIdQueriesPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Query history retrieved successfully + */ +export const zGetDatasetsByDatasetIdQueriesResponse = zDatasetQueryDetail + +export const zGetDatasetsByDatasetIdRelatedAppsPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Related apps retrieved successfully + */ +export const zGetDatasetsByDatasetIdRelatedAppsResponse = zRelatedAppList + +export const zPostDatasetsByDatasetIdRetryBody = zDocumentRetryPayload + +export const zPostDatasetsByDatasetIdRetryPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Success + */ +export const zPostDatasetsByDatasetIdRetryResponse = z.record(z.string(), z.unknown()) + +export const zGetDatasetsByDatasetIdUseCheckPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Dataset use status retrieved successfully + */ +export const zGetDatasetsByDatasetIdUseCheckResponse = z.record(z.string(), z.unknown()) + +export const zGetDatasetsByResourceIdApiKeysPath = z.object({ + resource_id: z.string(), +}) + +/** + * API keys retrieved successfully + */ +export const zGetDatasetsByResourceIdApiKeysResponse = zApiKeyList + +export const zPostDatasetsByResourceIdApiKeysPath = z.object({ + resource_id: z.string(), +}) + +/** + * API key created successfully + */ +export const zPostDatasetsByResourceIdApiKeysResponse = zApiKeyItem + +export const zDeleteDatasetsByResourceIdApiKeysByApiKeyIdPath = z.object({ + resource_id: z.string(), + api_key_id: z.string(), +}) + +/** + * API key deleted successfully + */ +export const zDeleteDatasetsByResourceIdApiKeysByApiKeyIdResponse = z.record( + z.string(), + z.unknown(), +) diff --git a/packages/contracts/generated/api/console/email-code-login/orpc.gen.ts b/packages/contracts/generated/api/console/email-code-login/orpc.gen.ts new file mode 100644 index 0000000000..54edabc29f --- /dev/null +++ b/packages/contracts/generated/api/console/email-code-login/orpc.gen.ts @@ -0,0 +1,46 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { + zPostEmailCodeLoginBody, + zPostEmailCodeLoginResponse, + zPostEmailCodeLoginValidityBody, + zPostEmailCodeLoginValidityResponse, +} from './zod.gen' + +export const post = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postEmailCodeLoginValidity', + path: '/email-code-login/validity', + tags: ['console'], + }) + .input(z.object({ body: zPostEmailCodeLoginValidityBody })) + .output(zPostEmailCodeLoginValidityResponse) + +export const validity = { + post, +} + +export const post2 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postEmailCodeLogin', + path: '/email-code-login', + tags: ['console'], + }) + .input(z.object({ body: zPostEmailCodeLoginBody })) + .output(zPostEmailCodeLoginResponse) + +export const emailCodeLogin = { + post: post2, + validity, +} + +export const contract = { + emailCodeLogin, +} diff --git a/packages/contracts/generated/api/console/email-code-login/types.gen.ts b/packages/contracts/generated/api/console/email-code-login/types.gen.ts new file mode 100644 index 0000000000..851a8d568d --- /dev/null +++ b/packages/contracts/generated/api/console/email-code-login/types.gen.ts @@ -0,0 +1,49 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type EmailPayload = { + email: string + language?: string | null +} + +export type EmailCodeLoginPayload = { + code: string + email: string + language?: string | null + token: string +} + +export type PostEmailCodeLoginData = { + body: EmailPayload + path?: never + query?: never + url: '/email-code-login' +} + +export type PostEmailCodeLoginResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostEmailCodeLoginResponse + = PostEmailCodeLoginResponses[keyof PostEmailCodeLoginResponses] + +export type PostEmailCodeLoginValidityData = { + body: EmailCodeLoginPayload + path?: never + query?: never + url: '/email-code-login/validity' +} + +export type PostEmailCodeLoginValidityResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostEmailCodeLoginValidityResponse + = PostEmailCodeLoginValidityResponses[keyof PostEmailCodeLoginValidityResponses] diff --git a/packages/contracts/generated/api/console/email-code-login/zod.gen.ts b/packages/contracts/generated/api/console/email-code-login/zod.gen.ts new file mode 100644 index 0000000000..0ff511c722 --- /dev/null +++ b/packages/contracts/generated/api/console/email-code-login/zod.gen.ts @@ -0,0 +1,35 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * EmailPayload + */ +export const zEmailPayload = z.object({ + email: z.string(), + language: z.string().nullish(), +}) + +/** + * EmailCodeLoginPayload + */ +export const zEmailCodeLoginPayload = z.object({ + code: z.string(), + email: z.string(), + language: z.string().nullish(), + token: z.string(), +}) + +export const zPostEmailCodeLoginBody = zEmailPayload + +/** + * Success + */ +export const zPostEmailCodeLoginResponse = z.record(z.string(), z.unknown()) + +export const zPostEmailCodeLoginValidityBody = zEmailCodeLoginPayload + +/** + * Success + */ +export const zPostEmailCodeLoginValidityResponse = z.record(z.string(), z.unknown()) diff --git a/packages/contracts/generated/api/console/email-register/orpc.gen.ts b/packages/contracts/generated/api/console/email-register/orpc.gen.ts new file mode 100644 index 0000000000..0bd724aba9 --- /dev/null +++ b/packages/contracts/generated/api/console/email-register/orpc.gen.ts @@ -0,0 +1,57 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' + +import { + zPostEmailRegisterResponse, + zPostEmailRegisterSendEmailResponse, + zPostEmailRegisterValidityResponse, +} from './zod.gen' + +export const post = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postEmailRegisterSendEmail', + path: '/email-register/send-email', + tags: ['console'], + }) + .output(zPostEmailRegisterSendEmailResponse) + +export const sendEmail = { + post, +} + +export const post2 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postEmailRegisterValidity', + path: '/email-register/validity', + tags: ['console'], + }) + .output(zPostEmailRegisterValidityResponse) + +export const validity = { + post: post2, +} + +export const post3 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postEmailRegister', + path: '/email-register', + tags: ['console'], + }) + .output(zPostEmailRegisterResponse) + +export const emailRegister = { + post: post3, + sendEmail, + validity, +} + +export const contract = { + emailRegister, +} diff --git a/packages/contracts/generated/api/console/email-register/types.gen.ts b/packages/contracts/generated/api/console/email-register/types.gen.ts new file mode 100644 index 0000000000..3fe5129fbf --- /dev/null +++ b/packages/contracts/generated/api/console/email-register/types.gen.ts @@ -0,0 +1,52 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type PostEmailRegisterData = { + body?: never + path?: never + query?: never + url: '/email-register' +} + +export type PostEmailRegisterResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostEmailRegisterResponse = PostEmailRegisterResponses[keyof PostEmailRegisterResponses] + +export type PostEmailRegisterSendEmailData = { + body?: never + path?: never + query?: never + url: '/email-register/send-email' +} + +export type PostEmailRegisterSendEmailResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostEmailRegisterSendEmailResponse + = PostEmailRegisterSendEmailResponses[keyof PostEmailRegisterSendEmailResponses] + +export type PostEmailRegisterValidityData = { + body?: never + path?: never + query?: never + url: '/email-register/validity' +} + +export type PostEmailRegisterValidityResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostEmailRegisterValidityResponse + = PostEmailRegisterValidityResponses[keyof PostEmailRegisterValidityResponses] diff --git a/packages/contracts/generated/api/console/email-register/zod.gen.ts b/packages/contracts/generated/api/console/email-register/zod.gen.ts new file mode 100644 index 0000000000..11720317f2 --- /dev/null +++ b/packages/contracts/generated/api/console/email-register/zod.gen.ts @@ -0,0 +1,18 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * Success + */ +export const zPostEmailRegisterResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zPostEmailRegisterSendEmailResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zPostEmailRegisterValidityResponse = z.record(z.string(), z.unknown()) diff --git a/packages/contracts/generated/api/console/explore/orpc.gen.ts b/packages/contracts/generated/api/console/explore/orpc.gen.ts new file mode 100644 index 0000000000..4b37a0a4fd --- /dev/null +++ b/packages/contracts/generated/api/console/explore/orpc.gen.ts @@ -0,0 +1,70 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { + zGetExploreAppsByAppIdPath, + zGetExploreAppsByAppIdResponse, + zGetExploreAppsQuery, + zGetExploreAppsResponse, + zGetExploreBannersResponse, +} from './zod.gen' + +export const get = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getExploreAppsByAppId', + path: '/explore/apps/{app_id}', + tags: ['console'], + }) + .input(z.object({ params: zGetExploreAppsByAppIdPath })) + .output(zGetExploreAppsByAppIdResponse) + +export const byAppId = { + get, +} + +export const get2 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getExploreApps', + path: '/explore/apps', + tags: ['console'], + }) + .input(z.object({ query: zGetExploreAppsQuery.optional() })) + .output(zGetExploreAppsResponse) + +export const apps = { + get: get2, + byAppId, +} + +/** + * Get banner list + */ +export const get3 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getExploreBanners', + path: '/explore/banners', + summary: 'Get banner list', + tags: ['default'], + }) + .output(zGetExploreBannersResponse) + +export const banners = { + get: get3, +} + +export const explore = { + apps, + banners, +} + +export const contract = { + explore, +} diff --git a/packages/contracts/generated/api/console/explore/types.gen.ts b/packages/contracts/generated/api/console/explore/types.gen.ts new file mode 100644 index 0000000000..db56dbdac0 --- /dev/null +++ b/packages/contracts/generated/api/console/explore/types.gen.ts @@ -0,0 +1,80 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type RecommendedAppListResponse = { + categories: Array + recommended_apps: Array +} + +export type RecommendedAppResponse = { + app?: RecommendedAppInfoResponse + app_id: string + can_trial?: boolean | null + category?: string | null + copyright?: string | null + custom_disclaimer?: string | null + description?: string | null + is_listed?: boolean | null + position?: number | null + privacy_policy?: string | null +} + +export type RecommendedAppInfoResponse = { + icon?: string | null + icon_background?: string | null + icon_type?: string | null + id: string + mode?: string | null + name?: string | null +} + +export type GetExploreAppsData = { + body?: never + path?: never + query?: { + language?: string | null + } + url: '/explore/apps' +} + +export type GetExploreAppsResponses = { + 200: RecommendedAppListResponse +} + +export type GetExploreAppsResponse = GetExploreAppsResponses[keyof GetExploreAppsResponses] + +export type GetExploreAppsByAppIdData = { + body?: never + path: { + app_id: string + } + query?: never + url: '/explore/apps/{app_id}' +} + +export type GetExploreAppsByAppIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetExploreAppsByAppIdResponse + = GetExploreAppsByAppIdResponses[keyof GetExploreAppsByAppIdResponses] + +export type GetExploreBannersData = { + body?: never + path?: never + query?: never + url: '/explore/banners' +} + +export type GetExploreBannersResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetExploreBannersResponse = GetExploreBannersResponses[keyof GetExploreBannersResponses] diff --git a/packages/contracts/generated/api/console/explore/zod.gen.ts b/packages/contracts/generated/api/console/explore/zod.gen.ts new file mode 100644 index 0000000000..2ceb54e7bd --- /dev/null +++ b/packages/contracts/generated/api/console/explore/zod.gen.ts @@ -0,0 +1,62 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * RecommendedAppInfoResponse + */ +export const zRecommendedAppInfoResponse = z.object({ + icon: z.string().nullish(), + icon_background: z.string().nullish(), + icon_type: z.string().nullish(), + id: z.string(), + mode: z.string().nullish(), + name: z.string().nullish(), +}) + +/** + * RecommendedAppResponse + */ +export const zRecommendedAppResponse = z.object({ + app: zRecommendedAppInfoResponse.optional(), + app_id: z.string(), + can_trial: z.boolean().nullish(), + category: z.string().nullish(), + copyright: z.string().nullish(), + custom_disclaimer: z.string().nullish(), + description: z.string().nullish(), + is_listed: z.boolean().nullish(), + position: z.int().nullish(), + privacy_policy: z.string().nullish(), +}) + +/** + * RecommendedAppListResponse + */ +export const zRecommendedAppListResponse = z.object({ + categories: z.array(z.string()), + recommended_apps: z.array(zRecommendedAppResponse), +}) + +export const zGetExploreAppsQuery = z.object({ + language: z.string().nullish(), +}) + +/** + * Success + */ +export const zGetExploreAppsResponse = zRecommendedAppListResponse + +export const zGetExploreAppsByAppIdPath = z.object({ + app_id: z.string(), +}) + +/** + * Success + */ +export const zGetExploreAppsByAppIdResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zGetExploreBannersResponse = z.record(z.string(), z.unknown()) diff --git a/packages/contracts/generated/api/console/features/orpc.gen.ts b/packages/contracts/generated/api/console/features/orpc.gen.ts new file mode 100644 index 0000000000..e24ec3d964 --- /dev/null +++ b/packages/contracts/generated/api/console/features/orpc.gen.ts @@ -0,0 +1,30 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' + +import { zGetFeaturesResponse } from './zod.gen' + +/** + * Get feature configuration for current tenant + * + * Get feature configuration for current tenant + */ +export const get = oc + .route({ + description: 'Get feature configuration for current tenant', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getFeatures', + path: '/features', + summary: 'Get feature configuration for current tenant', + tags: ['console'], + }) + .output(zGetFeaturesResponse) + +export const features = { + get, +} + +export const contract = { + features, +} diff --git a/packages/contracts/generated/api/console/features/types.gen.ts b/packages/contracts/generated/api/console/features/types.gen.ts new file mode 100644 index 0000000000..eed18d5344 --- /dev/null +++ b/packages/contracts/generated/api/console/features/types.gen.ts @@ -0,0 +1,22 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type FeatureResponse = { + [key: string]: unknown +} + +export type GetFeaturesData = { + body?: never + path?: never + query?: never + url: '/features' +} + +export type GetFeaturesResponses = { + 200: FeatureResponse +} + +export type GetFeaturesResponse = GetFeaturesResponses[keyof GetFeaturesResponses] diff --git a/packages/contracts/generated/api/console/features/zod.gen.ts b/packages/contracts/generated/api/console/features/zod.gen.ts new file mode 100644 index 0000000000..1e967fb879 --- /dev/null +++ b/packages/contracts/generated/api/console/features/zod.gen.ts @@ -0,0 +1,10 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +export const zFeatureResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zGetFeaturesResponse = zFeatureResponse diff --git a/packages/contracts/generated/api/console/files/orpc.gen.ts b/packages/contracts/generated/api/console/files/orpc.gen.ts new file mode 100644 index 0000000000..2ee949edc2 --- /dev/null +++ b/packages/contracts/generated/api/console/files/orpc.gen.ts @@ -0,0 +1,81 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { + zGetFilesByFileIdPreviewPath, + zGetFilesByFileIdPreviewResponse, + zGetFilesSupportTypeResponse, + zGetFilesUploadResponse, + zPostFilesUploadResponse, +} from './zod.gen' + +export const get = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getFilesSupportType', + path: '/files/support-type', + tags: ['console'], + }) + .output(zGetFilesSupportTypeResponse) + +export const supportType = { + get, +} + +export const get2 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getFilesUpload', + path: '/files/upload', + tags: ['console'], + }) + .output(zGetFilesUploadResponse) + +export const post = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postFilesUpload', + path: '/files/upload', + successStatus: 201, + tags: ['console'], + }) + .output(zPostFilesUploadResponse) + +export const upload = { + get: get2, + post, +} + +export const get3 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getFilesByFileIdPreview', + path: '/files/{file_id}/preview', + tags: ['console'], + }) + .input(z.object({ params: zGetFilesByFileIdPreviewPath })) + .output(zGetFilesByFileIdPreviewResponse) + +export const preview = { + get: get3, +} + +export const byFileId = { + preview, +} + +export const files = { + supportType, + upload, + byFileId, +} + +export const contract = { + files, +} diff --git a/packages/contracts/generated/api/console/files/types.gen.ts b/packages/contracts/generated/api/console/files/types.gen.ts new file mode 100644 index 0000000000..5620235461 --- /dev/null +++ b/packages/contracts/generated/api/console/files/types.gen.ts @@ -0,0 +1,95 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type UploadConfig = { + attachment_image_file_size_limit?: number | null + audio_file_size_limit: number + batch_count_limit: number + file_size_limit: number + file_upload_limit?: number | null + image_file_batch_limit: number + image_file_size_limit: number + single_chunk_attachment_limit: number + video_file_size_limit: number + workflow_file_upload_limit: number +} + +export type FileResponse = { + conversation_id?: string | null + created_at?: number | null + created_by?: string | null + extension?: string | null + file_key?: string | null + id: string + mime_type?: string | null + name: string + original_url?: string | null + preview_url?: string | null + size: number + source_url?: string | null + tenant_id?: string | null + user_id?: string | null +} + +export type GetFilesSupportTypeData = { + body?: never + path?: never + query?: never + url: '/files/support-type' +} + +export type GetFilesSupportTypeResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetFilesSupportTypeResponse + = GetFilesSupportTypeResponses[keyof GetFilesSupportTypeResponses] + +export type GetFilesUploadData = { + body?: never + path?: never + query?: never + url: '/files/upload' +} + +export type GetFilesUploadResponses = { + 200: UploadConfig +} + +export type GetFilesUploadResponse = GetFilesUploadResponses[keyof GetFilesUploadResponses] + +export type PostFilesUploadData = { + body?: never + path?: never + query?: never + url: '/files/upload' +} + +export type PostFilesUploadResponses = { + 201: FileResponse +} + +export type PostFilesUploadResponse = PostFilesUploadResponses[keyof PostFilesUploadResponses] + +export type GetFilesByFileIdPreviewData = { + body?: never + path: { + file_id: string + } + query?: never + url: '/files/{file_id}/preview' +} + +export type GetFilesByFileIdPreviewResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetFilesByFileIdPreviewResponse + = GetFilesByFileIdPreviewResponses[keyof GetFilesByFileIdPreviewResponses] diff --git a/packages/contracts/generated/api/console/files/zod.gen.ts b/packages/contracts/generated/api/console/files/zod.gen.ts new file mode 100644 index 0000000000..d61e7795ce --- /dev/null +++ b/packages/contracts/generated/api/console/files/zod.gen.ts @@ -0,0 +1,63 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * UploadConfig + */ +export const zUploadConfig = z.object({ + attachment_image_file_size_limit: z.int().nullish(), + audio_file_size_limit: z.int(), + batch_count_limit: z.int(), + file_size_limit: z.int(), + file_upload_limit: z.int().nullish(), + image_file_batch_limit: z.int(), + image_file_size_limit: z.int(), + single_chunk_attachment_limit: z.int(), + video_file_size_limit: z.int(), + workflow_file_upload_limit: z.int(), +}) + +/** + * FileResponse + */ +export const zFileResponse = z.object({ + conversation_id: z.string().nullish(), + created_at: z.int().nullish(), + created_by: z.string().nullish(), + extension: z.string().nullish(), + file_key: z.string().nullish(), + id: z.string(), + mime_type: z.string().nullish(), + name: z.string(), + original_url: z.string().nullish(), + preview_url: z.string().nullish(), + size: z.int(), + source_url: z.string().nullish(), + tenant_id: z.string().nullish(), + user_id: z.string().nullish(), +}) + +/** + * Success + */ +export const zGetFilesSupportTypeResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zGetFilesUploadResponse = zUploadConfig + +/** + * File uploaded successfully + */ +export const zPostFilesUploadResponse = zFileResponse + +export const zGetFilesByFileIdPreviewPath = z.object({ + file_id: z.string(), +}) + +/** + * Success + */ +export const zGetFilesByFileIdPreviewResponse = z.record(z.string(), z.unknown()) diff --git a/packages/contracts/generated/api/console/forgot-password/orpc.gen.ts b/packages/contracts/generated/api/console/forgot-password/orpc.gen.ts new file mode 100644 index 0000000000..a5a33f407b --- /dev/null +++ b/packages/contracts/generated/api/console/forgot-password/orpc.gen.ts @@ -0,0 +1,76 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { + zPostForgotPasswordBody, + zPostForgotPasswordResetsBody, + zPostForgotPasswordResetsResponse, + zPostForgotPasswordResponse, + zPostForgotPasswordValidityBody, + zPostForgotPasswordValidityResponse, +} from './zod.gen' + +/** + * Reset password with verification token + */ +export const post = oc + .route({ + description: 'Reset password with verification token', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postForgotPasswordResets', + path: '/forgot-password/resets', + tags: ['console'], + }) + .input(z.object({ body: zPostForgotPasswordResetsBody })) + .output(zPostForgotPasswordResetsResponse) + +export const resets = { + post, +} + +/** + * Verify password reset code + */ +export const post2 = oc + .route({ + description: 'Verify password reset code', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postForgotPasswordValidity', + path: '/forgot-password/validity', + tags: ['console'], + }) + .input(z.object({ body: zPostForgotPasswordValidityBody })) + .output(zPostForgotPasswordValidityResponse) + +export const validity = { + post: post2, +} + +/** + * Send password reset email + */ +export const post3 = oc + .route({ + description: 'Send password reset email', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postForgotPassword', + path: '/forgot-password', + tags: ['console'], + }) + .input(z.object({ body: zPostForgotPasswordBody })) + .output(zPostForgotPasswordResponse) + +export const forgotPassword = { + post: post3, + resets, + validity, +} + +export const contract = { + forgotPassword, +} diff --git a/packages/contracts/generated/api/console/forgot-password/types.gen.ts b/packages/contracts/generated/api/console/forgot-password/types.gen.ts new file mode 100644 index 0000000000..b58165c8eb --- /dev/null +++ b/packages/contracts/generated/api/console/forgot-password/types.gen.ts @@ -0,0 +1,106 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type ForgotPasswordSendPayload = { + email: string + language?: string | null +} + +export type ForgotPasswordEmailResponse = { + code?: string | null + data?: string | null + result: string +} + +export type ForgotPasswordResetPayload = { + new_password: string + password_confirm: string + token: string +} + +export type ForgotPasswordResetResponse = { + result: string +} + +export type ForgotPasswordCheckPayload = { + code: string + email: string + token: string +} + +export type ForgotPasswordCheckResponse = { + email: string + is_valid: boolean + token: string +} + +export type PostForgotPasswordData = { + body: ForgotPasswordSendPayload + path?: never + query?: never + url: '/forgot-password' +} + +export type PostForgotPasswordErrors = { + 400: { + [key: string]: unknown + } +} + +export type PostForgotPasswordError = PostForgotPasswordErrors[keyof PostForgotPasswordErrors] + +export type PostForgotPasswordResponses = { + 200: ForgotPasswordEmailResponse +} + +export type PostForgotPasswordResponse + = PostForgotPasswordResponses[keyof PostForgotPasswordResponses] + +export type PostForgotPasswordResetsData = { + body: ForgotPasswordResetPayload + path?: never + query?: never + url: '/forgot-password/resets' +} + +export type PostForgotPasswordResetsErrors = { + 400: { + [key: string]: unknown + } +} + +export type PostForgotPasswordResetsError + = PostForgotPasswordResetsErrors[keyof PostForgotPasswordResetsErrors] + +export type PostForgotPasswordResetsResponses = { + 200: ForgotPasswordResetResponse +} + +export type PostForgotPasswordResetsResponse + = PostForgotPasswordResetsResponses[keyof PostForgotPasswordResetsResponses] + +export type PostForgotPasswordValidityData = { + body: ForgotPasswordCheckPayload + path?: never + query?: never + url: '/forgot-password/validity' +} + +export type PostForgotPasswordValidityErrors = { + 400: { + [key: string]: unknown + } +} + +export type PostForgotPasswordValidityError + = PostForgotPasswordValidityErrors[keyof PostForgotPasswordValidityErrors] + +export type PostForgotPasswordValidityResponses = { + 200: ForgotPasswordCheckResponse +} + +export type PostForgotPasswordValidityResponse + = PostForgotPasswordValidityResponses[keyof PostForgotPasswordValidityResponses] diff --git a/packages/contracts/generated/api/console/forgot-password/zod.gen.ts b/packages/contracts/generated/api/console/forgot-password/zod.gen.ts new file mode 100644 index 0000000000..fdbb7b033d --- /dev/null +++ b/packages/contracts/generated/api/console/forgot-password/zod.gen.ts @@ -0,0 +1,75 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * ForgotPasswordSendPayload + */ +export const zForgotPasswordSendPayload = z.object({ + email: z.string(), + language: z.string().nullish(), +}) + +/** + * ForgotPasswordEmailResponse + */ +export const zForgotPasswordEmailResponse = z.object({ + code: z.string().nullish(), + data: z.string().nullish(), + result: z.string(), +}) + +/** + * ForgotPasswordResetPayload + */ +export const zForgotPasswordResetPayload = z.object({ + new_password: z.string(), + password_confirm: z.string(), + token: z.string().min(1), +}) + +/** + * ForgotPasswordResetResponse + */ +export const zForgotPasswordResetResponse = z.object({ + result: z.string(), +}) + +/** + * ForgotPasswordCheckPayload + */ +export const zForgotPasswordCheckPayload = z.object({ + code: z.string(), + email: z.string(), + token: z.string().min(1), +}) + +/** + * ForgotPasswordCheckResponse + */ +export const zForgotPasswordCheckResponse = z.object({ + email: z.string(), + is_valid: z.boolean(), + token: z.string(), +}) + +export const zPostForgotPasswordBody = zForgotPasswordSendPayload + +/** + * Email sent successfully + */ +export const zPostForgotPasswordResponse = zForgotPasswordEmailResponse + +export const zPostForgotPasswordResetsBody = zForgotPasswordResetPayload + +/** + * Password reset successfully + */ +export const zPostForgotPasswordResetsResponse = zForgotPasswordResetResponse + +export const zPostForgotPasswordValidityBody = zForgotPasswordCheckPayload + +/** + * Code verified successfully + */ +export const zPostForgotPasswordValidityResponse = zForgotPasswordCheckResponse diff --git a/packages/contracts/generated/api/console/form/orpc.gen.ts b/packages/contracts/generated/api/console/form/orpc.gen.ts new file mode 100644 index 0000000000..f6d76b28c0 --- /dev/null +++ b/packages/contracts/generated/api/console/form/orpc.gen.ts @@ -0,0 +1,73 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { + zGetFormHumanInputByFormTokenPath, + zGetFormHumanInputByFormTokenResponse, + zPostFormHumanInputByFormTokenPath, + zPostFormHumanInputByFormTokenResponse, +} from './zod.gen' + +/** + * Get human input form definition by form token + * + * GET /console/api/form/human_input/ + */ +export const get = oc + .route({ + description: 'GET /console/api/form/human_input/', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getFormHumanInputByFormToken', + path: '/form/human_input/{form_token}', + summary: 'Get human input form definition by form token', + tags: ['console'], + }) + .input(z.object({ params: zGetFormHumanInputByFormTokenPath })) + .output(zGetFormHumanInputByFormTokenResponse) + +/** + * Submit human input form by form token + * + * POST /console/api/form/human_input/ + * + * Request body: + * { + * "inputs": { + * "content": "User input content" + * }, + * "action": "Approve" + * } + */ +export const post = oc + .route({ + description: + 'POST /console/api/form/human_input/\n\nRequest body:\n{\n "inputs": {\n "content": "User input content"\n },\n "action": "Approve"\n}', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postFormHumanInputByFormToken', + path: '/form/human_input/{form_token}', + summary: 'Submit human input form by form token', + tags: ['console'], + }) + .input(z.object({ params: zPostFormHumanInputByFormTokenPath })) + .output(zPostFormHumanInputByFormTokenResponse) + +export const byFormToken = { + get, + post, +} + +export const humanInput = { + byFormToken, +} + +export const form = { + humanInput, +} + +export const contract = { + form, +} diff --git a/packages/contracts/generated/api/console/form/types.gen.ts b/packages/contracts/generated/api/console/form/types.gen.ts new file mode 100644 index 0000000000..80c0c1a474 --- /dev/null +++ b/packages/contracts/generated/api/console/form/types.gen.ts @@ -0,0 +1,41 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type GetFormHumanInputByFormTokenData = { + body?: never + path: { + form_token: string + } + query?: never + url: '/form/human_input/{form_token}' +} + +export type GetFormHumanInputByFormTokenResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetFormHumanInputByFormTokenResponse + = GetFormHumanInputByFormTokenResponses[keyof GetFormHumanInputByFormTokenResponses] + +export type PostFormHumanInputByFormTokenData = { + body?: never + path: { + form_token: string + } + query?: never + url: '/form/human_input/{form_token}' +} + +export type PostFormHumanInputByFormTokenResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostFormHumanInputByFormTokenResponse + = PostFormHumanInputByFormTokenResponses[keyof PostFormHumanInputByFormTokenResponses] diff --git a/packages/contracts/generated/api/console/form/zod.gen.ts b/packages/contracts/generated/api/console/form/zod.gen.ts new file mode 100644 index 0000000000..840b04383e --- /dev/null +++ b/packages/contracts/generated/api/console/form/zod.gen.ts @@ -0,0 +1,21 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +export const zGetFormHumanInputByFormTokenPath = z.object({ + form_token: z.string(), +}) + +/** + * Success + */ +export const zGetFormHumanInputByFormTokenResponse = z.record(z.string(), z.unknown()) + +export const zPostFormHumanInputByFormTokenPath = z.object({ + form_token: z.string(), +}) + +/** + * Success + */ +export const zPostFormHumanInputByFormTokenResponse = z.record(z.string(), z.unknown()) diff --git a/packages/contracts/generated/api/console/info/orpc.gen.ts b/packages/contracts/generated/api/console/info/orpc.gen.ts new file mode 100644 index 0000000000..4eb342e9cf --- /dev/null +++ b/packages/contracts/generated/api/console/info/orpc.gen.ts @@ -0,0 +1,23 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' + +import { zPostInfoResponse } from './zod.gen' + +export const post = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postInfo', + path: '/info', + tags: ['console'], + }) + .output(zPostInfoResponse) + +export const info = { + post, +} + +export const contract = { + info, +} diff --git a/packages/contracts/generated/api/console/info/types.gen.ts b/packages/contracts/generated/api/console/info/types.gen.ts new file mode 100644 index 0000000000..975f887a99 --- /dev/null +++ b/packages/contracts/generated/api/console/info/types.gen.ts @@ -0,0 +1,35 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type TenantInfoResponse = { + created_at?: number | null + custom_config?: { + [key: string]: unknown + } | null + id: string + in_trial?: boolean | null + name?: string | null + next_credit_reset_date?: number | null + plan?: string | null + role?: string | null + status?: string | null + trial_credits?: number | null + trial_credits_used?: number | null + trial_end_reason?: string | null +} + +export type PostInfoData = { + body?: never + path?: never + query?: never + url: '/info' +} + +export type PostInfoResponses = { + 200: TenantInfoResponse +} + +export type PostInfoResponse = PostInfoResponses[keyof PostInfoResponses] diff --git a/packages/contracts/generated/api/console/info/zod.gen.ts b/packages/contracts/generated/api/console/info/zod.gen.ts new file mode 100644 index 0000000000..adb1ea23f2 --- /dev/null +++ b/packages/contracts/generated/api/console/info/zod.gen.ts @@ -0,0 +1,26 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * TenantInfoResponse + */ +export const zTenantInfoResponse = z.object({ + created_at: z.int().nullish(), + custom_config: z.record(z.string(), z.unknown()).nullish(), + id: z.string(), + in_trial: z.boolean().nullish(), + name: z.string().nullish(), + next_credit_reset_date: z.int().nullish(), + plan: z.string().nullish(), + role: z.string().nullish(), + status: z.string().nullish(), + trial_credits: z.int().nullish(), + trial_credits_used: z.int().nullish(), + trial_end_reason: z.string().nullish(), +}) + +/** + * Success + */ +export const zPostInfoResponse = zTenantInfoResponse diff --git a/packages/contracts/generated/api/console/installed-apps/orpc.gen.ts b/packages/contracts/generated/api/console/installed-apps/orpc.gen.ts new file mode 100644 index 0000000000..c2b0b2eb37 --- /dev/null +++ b/packages/contracts/generated/api/console/installed-apps/orpc.gen.ts @@ -0,0 +1,572 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { + zDeleteInstalledAppsByInstalledAppIdConversationsByCIdPath, + zDeleteInstalledAppsByInstalledAppIdConversationsByCIdResponse, + zDeleteInstalledAppsByInstalledAppIdPath, + zDeleteInstalledAppsByInstalledAppIdResponse, + zDeleteInstalledAppsByInstalledAppIdSavedMessagesByMessageIdPath, + zDeleteInstalledAppsByInstalledAppIdSavedMessagesByMessageIdResponse, + zGetInstalledAppsByInstalledAppIdConversationsPath, + zGetInstalledAppsByInstalledAppIdConversationsQuery, + zGetInstalledAppsByInstalledAppIdConversationsResponse, + zGetInstalledAppsByInstalledAppIdMessagesByMessageIdMoreLikeThisPath, + zGetInstalledAppsByInstalledAppIdMessagesByMessageIdMoreLikeThisQuery, + zGetInstalledAppsByInstalledAppIdMessagesByMessageIdMoreLikeThisResponse, + zGetInstalledAppsByInstalledAppIdMessagesByMessageIdSuggestedQuestionsPath, + zGetInstalledAppsByInstalledAppIdMessagesByMessageIdSuggestedQuestionsResponse, + zGetInstalledAppsByInstalledAppIdMessagesPath, + zGetInstalledAppsByInstalledAppIdMessagesQuery, + zGetInstalledAppsByInstalledAppIdMessagesResponse, + zGetInstalledAppsByInstalledAppIdMetaPath, + zGetInstalledAppsByInstalledAppIdMetaResponse, + zGetInstalledAppsByInstalledAppIdParametersPath, + zGetInstalledAppsByInstalledAppIdParametersResponse, + zGetInstalledAppsByInstalledAppIdSavedMessagesPath, + zGetInstalledAppsByInstalledAppIdSavedMessagesQuery, + zGetInstalledAppsByInstalledAppIdSavedMessagesResponse, + zGetInstalledAppsResponse, + zPatchInstalledAppsByInstalledAppIdConversationsByCIdPinPath, + zPatchInstalledAppsByInstalledAppIdConversationsByCIdPinResponse, + zPatchInstalledAppsByInstalledAppIdConversationsByCIdUnpinPath, + zPatchInstalledAppsByInstalledAppIdConversationsByCIdUnpinResponse, + zPatchInstalledAppsByInstalledAppIdPath, + zPatchInstalledAppsByInstalledAppIdResponse, + zPostInstalledAppsByInstalledAppIdAudioToTextPath, + zPostInstalledAppsByInstalledAppIdAudioToTextResponse, + zPostInstalledAppsByInstalledAppIdChatMessagesBody, + zPostInstalledAppsByInstalledAppIdChatMessagesByTaskIdStopPath, + zPostInstalledAppsByInstalledAppIdChatMessagesByTaskIdStopResponse, + zPostInstalledAppsByInstalledAppIdChatMessagesPath, + zPostInstalledAppsByInstalledAppIdChatMessagesResponse, + zPostInstalledAppsByInstalledAppIdCompletionMessagesBody, + zPostInstalledAppsByInstalledAppIdCompletionMessagesByTaskIdStopPath, + zPostInstalledAppsByInstalledAppIdCompletionMessagesByTaskIdStopResponse, + zPostInstalledAppsByInstalledAppIdCompletionMessagesPath, + zPostInstalledAppsByInstalledAppIdCompletionMessagesResponse, + zPostInstalledAppsByInstalledAppIdConversationsByCIdNameBody, + zPostInstalledAppsByInstalledAppIdConversationsByCIdNamePath, + zPostInstalledAppsByInstalledAppIdConversationsByCIdNameResponse, + zPostInstalledAppsByInstalledAppIdMessagesByMessageIdFeedbacksBody, + zPostInstalledAppsByInstalledAppIdMessagesByMessageIdFeedbacksPath, + zPostInstalledAppsByInstalledAppIdMessagesByMessageIdFeedbacksResponse, + zPostInstalledAppsByInstalledAppIdSavedMessagesBody, + zPostInstalledAppsByInstalledAppIdSavedMessagesPath, + zPostInstalledAppsByInstalledAppIdSavedMessagesResponse, + zPostInstalledAppsByInstalledAppIdTextToAudioBody, + zPostInstalledAppsByInstalledAppIdTextToAudioPath, + zPostInstalledAppsByInstalledAppIdTextToAudioResponse, + zPostInstalledAppsByInstalledAppIdWorkflowsRunBody, + zPostInstalledAppsByInstalledAppIdWorkflowsRunPath, + zPostInstalledAppsByInstalledAppIdWorkflowsRunResponse, + zPostInstalledAppsByInstalledAppIdWorkflowsTasksByTaskIdStopPath, + zPostInstalledAppsByInstalledAppIdWorkflowsTasksByTaskIdStopResponse, + zPostInstalledAppsResponse, +} from './zod.gen' + +export const post = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postInstalledAppsByInstalledAppIdAudioToText', + path: '/installed-apps/{installed_app_id}/audio-to-text', + tags: ['console'], + }) + .input(z.object({ params: zPostInstalledAppsByInstalledAppIdAudioToTextPath })) + .output(zPostInstalledAppsByInstalledAppIdAudioToTextResponse) + +export const audioToText = { + post, +} + +export const post2 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postInstalledAppsByInstalledAppIdChatMessagesByTaskIdStop', + path: '/installed-apps/{installed_app_id}/chat-messages/{task_id}/stop', + tags: ['console'], + }) + .input(z.object({ params: zPostInstalledAppsByInstalledAppIdChatMessagesByTaskIdStopPath })) + .output(zPostInstalledAppsByInstalledAppIdChatMessagesByTaskIdStopResponse) + +export const stop = { + post: post2, +} + +export const byTaskId = { + stop, +} + +export const post3 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postInstalledAppsByInstalledAppIdChatMessages', + path: '/installed-apps/{installed_app_id}/chat-messages', + tags: ['console'], + }) + .input( + z.object({ + body: zPostInstalledAppsByInstalledAppIdChatMessagesBody, + params: zPostInstalledAppsByInstalledAppIdChatMessagesPath, + }), + ) + .output(zPostInstalledAppsByInstalledAppIdChatMessagesResponse) + +export const chatMessages = { + post: post3, + byTaskId, +} + +export const post4 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postInstalledAppsByInstalledAppIdCompletionMessagesByTaskIdStop', + path: '/installed-apps/{installed_app_id}/completion-messages/{task_id}/stop', + tags: ['console'], + }) + .input(z.object({ params: zPostInstalledAppsByInstalledAppIdCompletionMessagesByTaskIdStopPath })) + .output(zPostInstalledAppsByInstalledAppIdCompletionMessagesByTaskIdStopResponse) + +export const stop2 = { + post: post4, +} + +export const byTaskId2 = { + stop: stop2, +} + +export const post5 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postInstalledAppsByInstalledAppIdCompletionMessages', + path: '/installed-apps/{installed_app_id}/completion-messages', + tags: ['console'], + }) + .input( + z.object({ + body: zPostInstalledAppsByInstalledAppIdCompletionMessagesBody, + params: zPostInstalledAppsByInstalledAppIdCompletionMessagesPath, + }), + ) + .output(zPostInstalledAppsByInstalledAppIdCompletionMessagesResponse) + +export const completionMessages = { + post: post5, + byTaskId: byTaskId2, +} + +export const post6 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postInstalledAppsByInstalledAppIdConversationsByCIdName', + path: '/installed-apps/{installed_app_id}/conversations/{c_id}/name', + tags: ['console'], + }) + .input( + z.object({ + body: zPostInstalledAppsByInstalledAppIdConversationsByCIdNameBody, + params: zPostInstalledAppsByInstalledAppIdConversationsByCIdNamePath, + }), + ) + .output(zPostInstalledAppsByInstalledAppIdConversationsByCIdNameResponse) + +export const name = { + post: post6, +} + +export const patch = oc + .route({ + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchInstalledAppsByInstalledAppIdConversationsByCIdPin', + path: '/installed-apps/{installed_app_id}/conversations/{c_id}/pin', + tags: ['console'], + }) + .input(z.object({ params: zPatchInstalledAppsByInstalledAppIdConversationsByCIdPinPath })) + .output(zPatchInstalledAppsByInstalledAppIdConversationsByCIdPinResponse) + +export const pin = { + patch, +} + +export const patch2 = oc + .route({ + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchInstalledAppsByInstalledAppIdConversationsByCIdUnpin', + path: '/installed-apps/{installed_app_id}/conversations/{c_id}/unpin', + tags: ['console'], + }) + .input(z.object({ params: zPatchInstalledAppsByInstalledAppIdConversationsByCIdUnpinPath })) + .output(zPatchInstalledAppsByInstalledAppIdConversationsByCIdUnpinResponse) + +export const unpin = { + patch: patch2, +} + +export const delete_ = oc + .route({ + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteInstalledAppsByInstalledAppIdConversationsByCId', + path: '/installed-apps/{installed_app_id}/conversations/{c_id}', + tags: ['console'], + }) + .input(z.object({ params: zDeleteInstalledAppsByInstalledAppIdConversationsByCIdPath })) + .output(zDeleteInstalledAppsByInstalledAppIdConversationsByCIdResponse) + +export const byCId = { + delete: delete_, + name, + pin, + unpin, +} + +export const get = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getInstalledAppsByInstalledAppIdConversations', + path: '/installed-apps/{installed_app_id}/conversations', + tags: ['console'], + }) + .input( + z.object({ + params: zGetInstalledAppsByInstalledAppIdConversationsPath, + query: zGetInstalledAppsByInstalledAppIdConversationsQuery.optional(), + }), + ) + .output(zGetInstalledAppsByInstalledAppIdConversationsResponse) + +export const conversations = { + get, + byCId, +} + +export const post7 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postInstalledAppsByInstalledAppIdMessagesByMessageIdFeedbacks', + path: '/installed-apps/{installed_app_id}/messages/{message_id}/feedbacks', + tags: ['console'], + }) + .input( + z.object({ + body: zPostInstalledAppsByInstalledAppIdMessagesByMessageIdFeedbacksBody, + params: zPostInstalledAppsByInstalledAppIdMessagesByMessageIdFeedbacksPath, + }), + ) + .output(zPostInstalledAppsByInstalledAppIdMessagesByMessageIdFeedbacksResponse) + +export const feedbacks = { + post: post7, +} + +export const get2 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getInstalledAppsByInstalledAppIdMessagesByMessageIdMoreLikeThis', + path: '/installed-apps/{installed_app_id}/messages/{message_id}/more-like-this', + tags: ['console'], + }) + .input( + z.object({ + params: zGetInstalledAppsByInstalledAppIdMessagesByMessageIdMoreLikeThisPath, + query: zGetInstalledAppsByInstalledAppIdMessagesByMessageIdMoreLikeThisQuery, + }), + ) + .output(zGetInstalledAppsByInstalledAppIdMessagesByMessageIdMoreLikeThisResponse) + +export const moreLikeThis = { + get: get2, +} + +export const get3 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getInstalledAppsByInstalledAppIdMessagesByMessageIdSuggestedQuestions', + path: '/installed-apps/{installed_app_id}/messages/{message_id}/suggested-questions', + tags: ['console'], + }) + .input( + z.object({ + params: zGetInstalledAppsByInstalledAppIdMessagesByMessageIdSuggestedQuestionsPath, + }), + ) + .output(zGetInstalledAppsByInstalledAppIdMessagesByMessageIdSuggestedQuestionsResponse) + +export const suggestedQuestions = { + get: get3, +} + +export const byMessageId = { + feedbacks, + moreLikeThis, + suggestedQuestions, +} + +export const get4 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getInstalledAppsByInstalledAppIdMessages', + path: '/installed-apps/{installed_app_id}/messages', + tags: ['console'], + }) + .input( + z.object({ + params: zGetInstalledAppsByInstalledAppIdMessagesPath, + query: zGetInstalledAppsByInstalledAppIdMessagesQuery, + }), + ) + .output(zGetInstalledAppsByInstalledAppIdMessagesResponse) + +export const messages = { + get: get4, + byMessageId, +} + +/** + * Get app meta + */ +export const get5 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getInstalledAppsByInstalledAppIdMeta', + path: '/installed-apps/{installed_app_id}/meta', + summary: 'Get app meta', + tags: ['console'], + }) + .input(z.object({ params: zGetInstalledAppsByInstalledAppIdMetaPath })) + .output(zGetInstalledAppsByInstalledAppIdMetaResponse) + +export const meta = { + get: get5, +} + +/** + * Retrieve app parameters + */ +export const get6 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getInstalledAppsByInstalledAppIdParameters', + path: '/installed-apps/{installed_app_id}/parameters', + summary: 'Retrieve app parameters', + tags: ['console'], + }) + .input(z.object({ params: zGetInstalledAppsByInstalledAppIdParametersPath })) + .output(zGetInstalledAppsByInstalledAppIdParametersResponse) + +export const parameters = { + get: get6, +} + +export const delete2 = oc + .route({ + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteInstalledAppsByInstalledAppIdSavedMessagesByMessageId', + path: '/installed-apps/{installed_app_id}/saved-messages/{message_id}', + tags: ['console'], + }) + .input(z.object({ params: zDeleteInstalledAppsByInstalledAppIdSavedMessagesByMessageIdPath })) + .output(zDeleteInstalledAppsByInstalledAppIdSavedMessagesByMessageIdResponse) + +export const byMessageId2 = { + delete: delete2, +} + +export const get7 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getInstalledAppsByInstalledAppIdSavedMessages', + path: '/installed-apps/{installed_app_id}/saved-messages', + tags: ['console'], + }) + .input( + z.object({ + params: zGetInstalledAppsByInstalledAppIdSavedMessagesPath, + query: zGetInstalledAppsByInstalledAppIdSavedMessagesQuery.optional(), + }), + ) + .output(zGetInstalledAppsByInstalledAppIdSavedMessagesResponse) + +export const post8 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postInstalledAppsByInstalledAppIdSavedMessages', + path: '/installed-apps/{installed_app_id}/saved-messages', + tags: ['console'], + }) + .input( + z.object({ + body: zPostInstalledAppsByInstalledAppIdSavedMessagesBody, + params: zPostInstalledAppsByInstalledAppIdSavedMessagesPath, + }), + ) + .output(zPostInstalledAppsByInstalledAppIdSavedMessagesResponse) + +export const savedMessages = { + get: get7, + post: post8, + byMessageId: byMessageId2, +} + +export const post9 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postInstalledAppsByInstalledAppIdTextToAudio', + path: '/installed-apps/{installed_app_id}/text-to-audio', + tags: ['console'], + }) + .input( + z.object({ + body: zPostInstalledAppsByInstalledAppIdTextToAudioBody, + params: zPostInstalledAppsByInstalledAppIdTextToAudioPath, + }), + ) + .output(zPostInstalledAppsByInstalledAppIdTextToAudioResponse) + +export const textToAudio = { + post: post9, +} + +/** + * Run workflow + */ +export const post10 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postInstalledAppsByInstalledAppIdWorkflowsRun', + path: '/installed-apps/{installed_app_id}/workflows/run', + summary: 'Run workflow', + tags: ['console'], + }) + .input( + z.object({ + body: zPostInstalledAppsByInstalledAppIdWorkflowsRunBody, + params: zPostInstalledAppsByInstalledAppIdWorkflowsRunPath, + }), + ) + .output(zPostInstalledAppsByInstalledAppIdWorkflowsRunResponse) + +export const run = { + post: post10, +} + +/** + * Stop workflow task + */ +export const post11 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postInstalledAppsByInstalledAppIdWorkflowsTasksByTaskIdStop', + path: '/installed-apps/{installed_app_id}/workflows/tasks/{task_id}/stop', + summary: 'Stop workflow task', + tags: ['console'], + }) + .input(z.object({ params: zPostInstalledAppsByInstalledAppIdWorkflowsTasksByTaskIdStopPath })) + .output(zPostInstalledAppsByInstalledAppIdWorkflowsTasksByTaskIdStopResponse) + +export const stop3 = { + post: post11, +} + +export const byTaskId3 = { + stop: stop3, +} + +export const tasks = { + byTaskId: byTaskId3, +} + +export const workflows = { + run, + tasks, +} + +export const delete3 = oc + .route({ + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteInstalledAppsByInstalledAppId', + path: '/installed-apps/{installed_app_id}', + tags: ['console'], + }) + .input(z.object({ params: zDeleteInstalledAppsByInstalledAppIdPath })) + .output(zDeleteInstalledAppsByInstalledAppIdResponse) + +export const patch3 = oc + .route({ + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchInstalledAppsByInstalledAppId', + path: '/installed-apps/{installed_app_id}', + tags: ['console'], + }) + .input(z.object({ params: zPatchInstalledAppsByInstalledAppIdPath })) + .output(zPatchInstalledAppsByInstalledAppIdResponse) + +export const byInstalledAppId = { + delete: delete3, + patch: patch3, + audioToText, + chatMessages, + completionMessages, + conversations, + messages, + meta, + parameters, + savedMessages, + textToAudio, + workflows, +} + +export const get8 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getInstalledApps', + path: '/installed-apps', + tags: ['console'], + }) + .output(zGetInstalledAppsResponse) + +export const post12 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postInstalledApps', + path: '/installed-apps', + tags: ['console'], + }) + .output(zPostInstalledAppsResponse) + +export const installedApps = { + get: get8, + post: post12, + byInstalledAppId, +} + +export const contract = { + installedApps, +} diff --git a/packages/contracts/generated/api/console/installed-apps/types.gen.ts b/packages/contracts/generated/api/console/installed-apps/types.gen.ts new file mode 100644 index 0000000000..897fc29b8b --- /dev/null +++ b/packages/contracts/generated/api/console/installed-apps/types.gen.ts @@ -0,0 +1,571 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type InstalledAppListResponse = { + installed_apps: Array +} + +export type ChatMessagePayload = { + conversation_id?: string | null + files?: Array | null + inputs: { + [key: string]: unknown + } + model_config: { + [key: string]: unknown + } + parent_message_id?: string | null + query: string + response_mode?: 'blocking' | 'streaming' + retriever_from?: string +} + +export type CompletionMessageExplorePayload = { + files?: Array<{ + [key: string]: unknown + }> | null + inputs: { + [key: string]: unknown + } + query?: string + response_mode?: 'blocking' | 'streaming' | null + retriever_from?: string +} + +export type ConversationRenamePayload = { + auto_generate?: boolean + name?: string | null +} + +export type MessageFeedbackPayload = { + content?: string | null + message_id: string + rating?: 'like' | 'dislike' | null +} + +export type SavedMessageCreatePayload = { + message_id: string +} + +export type TextToAudioPayload = { + message_id?: string | null + streaming?: boolean | null + text?: string | null + voice?: string | null +} + +export type WorkflowRunPayload = { + files?: Array<{ + [key: string]: unknown + }> | null + inputs: { + [key: string]: unknown + } +} + +export type InstalledAppResponse = { + app: InstalledAppInfoResponse + app_owner_tenant_id: string + editable: boolean + id: string + is_pinned: boolean + last_used_at?: number | null + uninstallable: boolean +} + +export type InstalledAppInfoResponse = { + icon?: string | null + icon_background?: string | null + icon_type?: string | null + id: string + mode?: string | null + name?: string | null + use_icon_as_answer_icon?: boolean | null +} + +export type GetInstalledAppsData = { + body?: never + path?: never + query?: never + url: '/installed-apps' +} + +export type GetInstalledAppsResponses = { + 200: InstalledAppListResponse +} + +export type GetInstalledAppsResponse = GetInstalledAppsResponses[keyof GetInstalledAppsResponses] + +export type PostInstalledAppsData = { + body?: never + path?: never + query?: never + url: '/installed-apps' +} + +export type PostInstalledAppsResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostInstalledAppsResponse = PostInstalledAppsResponses[keyof PostInstalledAppsResponses] + +export type DeleteInstalledAppsByInstalledAppIdData = { + body?: never + path: { + installed_app_id: string + } + query?: never + url: '/installed-apps/{installed_app_id}' +} + +export type DeleteInstalledAppsByInstalledAppIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type DeleteInstalledAppsByInstalledAppIdResponse + = DeleteInstalledAppsByInstalledAppIdResponses[keyof DeleteInstalledAppsByInstalledAppIdResponses] + +export type PatchInstalledAppsByInstalledAppIdData = { + body?: never + path: { + installed_app_id: string + } + query?: never + url: '/installed-apps/{installed_app_id}' +} + +export type PatchInstalledAppsByInstalledAppIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type PatchInstalledAppsByInstalledAppIdResponse + = PatchInstalledAppsByInstalledAppIdResponses[keyof PatchInstalledAppsByInstalledAppIdResponses] + +export type PostInstalledAppsByInstalledAppIdAudioToTextData = { + body?: never + path: { + installed_app_id: string + } + query?: never + url: '/installed-apps/{installed_app_id}/audio-to-text' +} + +export type PostInstalledAppsByInstalledAppIdAudioToTextResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostInstalledAppsByInstalledAppIdAudioToTextResponse + = PostInstalledAppsByInstalledAppIdAudioToTextResponses[keyof PostInstalledAppsByInstalledAppIdAudioToTextResponses] + +export type PostInstalledAppsByInstalledAppIdChatMessagesData = { + body: ChatMessagePayload + path: { + installed_app_id: string + } + query?: never + url: '/installed-apps/{installed_app_id}/chat-messages' +} + +export type PostInstalledAppsByInstalledAppIdChatMessagesResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostInstalledAppsByInstalledAppIdChatMessagesResponse + = PostInstalledAppsByInstalledAppIdChatMessagesResponses[keyof PostInstalledAppsByInstalledAppIdChatMessagesResponses] + +export type PostInstalledAppsByInstalledAppIdChatMessagesByTaskIdStopData = { + body?: never + path: { + installed_app_id: string + task_id: string + } + query?: never + url: '/installed-apps/{installed_app_id}/chat-messages/{task_id}/stop' +} + +export type PostInstalledAppsByInstalledAppIdChatMessagesByTaskIdStopResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostInstalledAppsByInstalledAppIdChatMessagesByTaskIdStopResponse + = PostInstalledAppsByInstalledAppIdChatMessagesByTaskIdStopResponses[keyof PostInstalledAppsByInstalledAppIdChatMessagesByTaskIdStopResponses] + +export type PostInstalledAppsByInstalledAppIdCompletionMessagesData = { + body: CompletionMessageExplorePayload + path: { + installed_app_id: string + } + query?: never + url: '/installed-apps/{installed_app_id}/completion-messages' +} + +export type PostInstalledAppsByInstalledAppIdCompletionMessagesResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostInstalledAppsByInstalledAppIdCompletionMessagesResponse + = PostInstalledAppsByInstalledAppIdCompletionMessagesResponses[keyof PostInstalledAppsByInstalledAppIdCompletionMessagesResponses] + +export type PostInstalledAppsByInstalledAppIdCompletionMessagesByTaskIdStopData = { + body?: never + path: { + installed_app_id: string + task_id: string + } + query?: never + url: '/installed-apps/{installed_app_id}/completion-messages/{task_id}/stop' +} + +export type PostInstalledAppsByInstalledAppIdCompletionMessagesByTaskIdStopResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostInstalledAppsByInstalledAppIdCompletionMessagesByTaskIdStopResponse + = PostInstalledAppsByInstalledAppIdCompletionMessagesByTaskIdStopResponses[keyof PostInstalledAppsByInstalledAppIdCompletionMessagesByTaskIdStopResponses] + +export type GetInstalledAppsByInstalledAppIdConversationsData = { + body?: never + path: { + installed_app_id: string + } + query?: { + last_id?: string | null + limit?: number + pinned?: boolean | null + } + url: '/installed-apps/{installed_app_id}/conversations' +} + +export type GetInstalledAppsByInstalledAppIdConversationsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetInstalledAppsByInstalledAppIdConversationsResponse + = GetInstalledAppsByInstalledAppIdConversationsResponses[keyof GetInstalledAppsByInstalledAppIdConversationsResponses] + +export type DeleteInstalledAppsByInstalledAppIdConversationsByCIdData = { + body?: never + path: { + installed_app_id: string + c_id: string + } + query?: never + url: '/installed-apps/{installed_app_id}/conversations/{c_id}' +} + +export type DeleteInstalledAppsByInstalledAppIdConversationsByCIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type DeleteInstalledAppsByInstalledAppIdConversationsByCIdResponse + = DeleteInstalledAppsByInstalledAppIdConversationsByCIdResponses[keyof DeleteInstalledAppsByInstalledAppIdConversationsByCIdResponses] + +export type PostInstalledAppsByInstalledAppIdConversationsByCIdNameData = { + body: ConversationRenamePayload + path: { + installed_app_id: string + c_id: string + } + query?: never + url: '/installed-apps/{installed_app_id}/conversations/{c_id}/name' +} + +export type PostInstalledAppsByInstalledAppIdConversationsByCIdNameResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostInstalledAppsByInstalledAppIdConversationsByCIdNameResponse + = PostInstalledAppsByInstalledAppIdConversationsByCIdNameResponses[keyof PostInstalledAppsByInstalledAppIdConversationsByCIdNameResponses] + +export type PatchInstalledAppsByInstalledAppIdConversationsByCIdPinData = { + body?: never + path: { + installed_app_id: string + c_id: string + } + query?: never + url: '/installed-apps/{installed_app_id}/conversations/{c_id}/pin' +} + +export type PatchInstalledAppsByInstalledAppIdConversationsByCIdPinResponses = { + 200: { + [key: string]: unknown + } +} + +export type PatchInstalledAppsByInstalledAppIdConversationsByCIdPinResponse + = PatchInstalledAppsByInstalledAppIdConversationsByCIdPinResponses[keyof PatchInstalledAppsByInstalledAppIdConversationsByCIdPinResponses] + +export type PatchInstalledAppsByInstalledAppIdConversationsByCIdUnpinData = { + body?: never + path: { + installed_app_id: string + c_id: string + } + query?: never + url: '/installed-apps/{installed_app_id}/conversations/{c_id}/unpin' +} + +export type PatchInstalledAppsByInstalledAppIdConversationsByCIdUnpinResponses = { + 200: { + [key: string]: unknown + } +} + +export type PatchInstalledAppsByInstalledAppIdConversationsByCIdUnpinResponse + = PatchInstalledAppsByInstalledAppIdConversationsByCIdUnpinResponses[keyof PatchInstalledAppsByInstalledAppIdConversationsByCIdUnpinResponses] + +export type GetInstalledAppsByInstalledAppIdMessagesData = { + body?: never + path: { + installed_app_id: string + } + query: { + conversation_id: string + first_id?: string | null + limit?: number + } + url: '/installed-apps/{installed_app_id}/messages' +} + +export type GetInstalledAppsByInstalledAppIdMessagesResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetInstalledAppsByInstalledAppIdMessagesResponse + = GetInstalledAppsByInstalledAppIdMessagesResponses[keyof GetInstalledAppsByInstalledAppIdMessagesResponses] + +export type PostInstalledAppsByInstalledAppIdMessagesByMessageIdFeedbacksData = { + body: MessageFeedbackPayload + path: { + installed_app_id: string + message_id: string + } + query?: never + url: '/installed-apps/{installed_app_id}/messages/{message_id}/feedbacks' +} + +export type PostInstalledAppsByInstalledAppIdMessagesByMessageIdFeedbacksResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostInstalledAppsByInstalledAppIdMessagesByMessageIdFeedbacksResponse + = PostInstalledAppsByInstalledAppIdMessagesByMessageIdFeedbacksResponses[keyof PostInstalledAppsByInstalledAppIdMessagesByMessageIdFeedbacksResponses] + +export type GetInstalledAppsByInstalledAppIdMessagesByMessageIdMoreLikeThisData = { + body?: never + path: { + installed_app_id: string + message_id: string + } + query: { + response_mode: 'blocking' | 'streaming' + } + url: '/installed-apps/{installed_app_id}/messages/{message_id}/more-like-this' +} + +export type GetInstalledAppsByInstalledAppIdMessagesByMessageIdMoreLikeThisResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetInstalledAppsByInstalledAppIdMessagesByMessageIdMoreLikeThisResponse + = GetInstalledAppsByInstalledAppIdMessagesByMessageIdMoreLikeThisResponses[keyof GetInstalledAppsByInstalledAppIdMessagesByMessageIdMoreLikeThisResponses] + +export type GetInstalledAppsByInstalledAppIdMessagesByMessageIdSuggestedQuestionsData = { + body?: never + path: { + installed_app_id: string + message_id: string + } + query?: never + url: '/installed-apps/{installed_app_id}/messages/{message_id}/suggested-questions' +} + +export type GetInstalledAppsByInstalledAppIdMessagesByMessageIdSuggestedQuestionsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetInstalledAppsByInstalledAppIdMessagesByMessageIdSuggestedQuestionsResponse + = GetInstalledAppsByInstalledAppIdMessagesByMessageIdSuggestedQuestionsResponses[keyof GetInstalledAppsByInstalledAppIdMessagesByMessageIdSuggestedQuestionsResponses] + +export type GetInstalledAppsByInstalledAppIdMetaData = { + body?: never + path: { + installed_app_id: string + } + query?: never + url: '/installed-apps/{installed_app_id}/meta' +} + +export type GetInstalledAppsByInstalledAppIdMetaResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetInstalledAppsByInstalledAppIdMetaResponse + = GetInstalledAppsByInstalledAppIdMetaResponses[keyof GetInstalledAppsByInstalledAppIdMetaResponses] + +export type GetInstalledAppsByInstalledAppIdParametersData = { + body?: never + path: { + installed_app_id: string + } + query?: never + url: '/installed-apps/{installed_app_id}/parameters' +} + +export type GetInstalledAppsByInstalledAppIdParametersResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetInstalledAppsByInstalledAppIdParametersResponse + = GetInstalledAppsByInstalledAppIdParametersResponses[keyof GetInstalledAppsByInstalledAppIdParametersResponses] + +export type GetInstalledAppsByInstalledAppIdSavedMessagesData = { + body?: never + path: { + installed_app_id: string + } + query?: { + last_id?: string | null + limit?: number + } + url: '/installed-apps/{installed_app_id}/saved-messages' +} + +export type GetInstalledAppsByInstalledAppIdSavedMessagesResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetInstalledAppsByInstalledAppIdSavedMessagesResponse + = GetInstalledAppsByInstalledAppIdSavedMessagesResponses[keyof GetInstalledAppsByInstalledAppIdSavedMessagesResponses] + +export type PostInstalledAppsByInstalledAppIdSavedMessagesData = { + body: SavedMessageCreatePayload + path: { + installed_app_id: string + } + query?: never + url: '/installed-apps/{installed_app_id}/saved-messages' +} + +export type PostInstalledAppsByInstalledAppIdSavedMessagesResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostInstalledAppsByInstalledAppIdSavedMessagesResponse + = PostInstalledAppsByInstalledAppIdSavedMessagesResponses[keyof PostInstalledAppsByInstalledAppIdSavedMessagesResponses] + +export type DeleteInstalledAppsByInstalledAppIdSavedMessagesByMessageIdData = { + body?: never + path: { + installed_app_id: string + message_id: string + } + query?: never + url: '/installed-apps/{installed_app_id}/saved-messages/{message_id}' +} + +export type DeleteInstalledAppsByInstalledAppIdSavedMessagesByMessageIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type DeleteInstalledAppsByInstalledAppIdSavedMessagesByMessageIdResponse + = DeleteInstalledAppsByInstalledAppIdSavedMessagesByMessageIdResponses[keyof DeleteInstalledAppsByInstalledAppIdSavedMessagesByMessageIdResponses] + +export type PostInstalledAppsByInstalledAppIdTextToAudioData = { + body: TextToAudioPayload + path: { + installed_app_id: string + } + query?: never + url: '/installed-apps/{installed_app_id}/text-to-audio' +} + +export type PostInstalledAppsByInstalledAppIdTextToAudioResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostInstalledAppsByInstalledAppIdTextToAudioResponse + = PostInstalledAppsByInstalledAppIdTextToAudioResponses[keyof PostInstalledAppsByInstalledAppIdTextToAudioResponses] + +export type PostInstalledAppsByInstalledAppIdWorkflowsRunData = { + body: WorkflowRunPayload + path: { + installed_app_id: string + } + query?: never + url: '/installed-apps/{installed_app_id}/workflows/run' +} + +export type PostInstalledAppsByInstalledAppIdWorkflowsRunResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostInstalledAppsByInstalledAppIdWorkflowsRunResponse + = PostInstalledAppsByInstalledAppIdWorkflowsRunResponses[keyof PostInstalledAppsByInstalledAppIdWorkflowsRunResponses] + +export type PostInstalledAppsByInstalledAppIdWorkflowsTasksByTaskIdStopData = { + body?: never + path: { + installed_app_id: string + task_id: string + } + query?: never + url: '/installed-apps/{installed_app_id}/workflows/tasks/{task_id}/stop' +} + +export type PostInstalledAppsByInstalledAppIdWorkflowsTasksByTaskIdStopResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostInstalledAppsByInstalledAppIdWorkflowsTasksByTaskIdStopResponse + = PostInstalledAppsByInstalledAppIdWorkflowsTasksByTaskIdStopResponses[keyof PostInstalledAppsByInstalledAppIdWorkflowsTasksByTaskIdStopResponses] diff --git a/packages/contracts/generated/api/console/installed-apps/zod.gen.ts b/packages/contracts/generated/api/console/installed-apps/zod.gen.ts new file mode 100644 index 0000000000..c8683e092c --- /dev/null +++ b/packages/contracts/generated/api/console/installed-apps/zod.gen.ts @@ -0,0 +1,433 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * ChatMessagePayload + */ +export const zChatMessagePayload = z.object({ + conversation_id: z.string().nullish(), + files: z.array(z.unknown()).nullish(), + inputs: z.record(z.string(), z.unknown()), + model_config: z.record(z.string(), z.unknown()), + parent_message_id: z.string().nullish(), + query: z.string(), + response_mode: z.enum(['blocking', 'streaming']).optional().default('blocking'), + retriever_from: z.string().optional().default('dev'), +}) + +/** + * CompletionMessageExplorePayload + */ +export const zCompletionMessageExplorePayload = z.object({ + files: z.array(z.record(z.string(), z.unknown())).nullish(), + inputs: z.record(z.string(), z.unknown()), + query: z.string().optional().default(''), + response_mode: z.enum(['blocking', 'streaming']).nullish(), + retriever_from: z.string().optional().default('explore_app'), +}) + +/** + * ConversationRenamePayload + */ +export const zConversationRenamePayload = z.object({ + auto_generate: z.boolean().optional().default(false), + name: z.string().nullish(), +}) + +/** + * MessageFeedbackPayload + */ +export const zMessageFeedbackPayload = z.object({ + content: z.string().nullish(), + message_id: z.string(), + rating: z.enum(['like', 'dislike']).nullish(), +}) + +/** + * SavedMessageCreatePayload + */ +export const zSavedMessageCreatePayload = z.object({ + message_id: z.string(), +}) + +/** + * TextToAudioPayload + */ +export const zTextToAudioPayload = z.object({ + message_id: z.string().nullish(), + streaming: z.boolean().nullish(), + text: z.string().nullish(), + voice: z.string().nullish(), +}) + +/** + * WorkflowRunPayload + */ +export const zWorkflowRunPayload = z.object({ + files: z.array(z.record(z.string(), z.unknown())).nullish(), + inputs: z.record(z.string(), z.unknown()), +}) + +/** + * InstalledAppInfoResponse + */ +export const zInstalledAppInfoResponse = z.object({ + icon: z.string().nullish(), + icon_background: z.string().nullish(), + icon_type: z.string().nullish(), + id: z.string(), + mode: z.string().nullish(), + name: z.string().nullish(), + use_icon_as_answer_icon: z.boolean().nullish(), +}) + +/** + * InstalledAppResponse + */ +export const zInstalledAppResponse = z.object({ + app: zInstalledAppInfoResponse, + app_owner_tenant_id: z.string(), + editable: z.boolean(), + id: z.string(), + is_pinned: z.boolean(), + last_used_at: z.int().nullish(), + uninstallable: z.boolean(), +}) + +/** + * InstalledAppListResponse + */ +export const zInstalledAppListResponse = z.object({ + installed_apps: z.array(zInstalledAppResponse), +}) + +/** + * Success + */ +export const zGetInstalledAppsResponse = zInstalledAppListResponse + +/** + * Success + */ +export const zPostInstalledAppsResponse = z.record(z.string(), z.unknown()) + +export const zDeleteInstalledAppsByInstalledAppIdPath = z.object({ + installed_app_id: z.string(), +}) + +/** + * Success + */ +export const zDeleteInstalledAppsByInstalledAppIdResponse = z.record(z.string(), z.unknown()) + +export const zPatchInstalledAppsByInstalledAppIdPath = z.object({ + installed_app_id: z.string(), +}) + +/** + * Success + */ +export const zPatchInstalledAppsByInstalledAppIdResponse = z.record(z.string(), z.unknown()) + +export const zPostInstalledAppsByInstalledAppIdAudioToTextPath = z.object({ + installed_app_id: z.string(), +}) + +/** + * Success + */ +export const zPostInstalledAppsByInstalledAppIdAudioToTextResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostInstalledAppsByInstalledAppIdChatMessagesBody = zChatMessagePayload + +export const zPostInstalledAppsByInstalledAppIdChatMessagesPath = z.object({ + installed_app_id: z.string(), +}) + +/** + * Success + */ +export const zPostInstalledAppsByInstalledAppIdChatMessagesResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostInstalledAppsByInstalledAppIdChatMessagesByTaskIdStopPath = z.object({ + installed_app_id: z.string(), + task_id: z.string(), +}) + +/** + * Success + */ +export const zPostInstalledAppsByInstalledAppIdChatMessagesByTaskIdStopResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostInstalledAppsByInstalledAppIdCompletionMessagesBody + = zCompletionMessageExplorePayload + +export const zPostInstalledAppsByInstalledAppIdCompletionMessagesPath = z.object({ + installed_app_id: z.string(), +}) + +/** + * Success + */ +export const zPostInstalledAppsByInstalledAppIdCompletionMessagesResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostInstalledAppsByInstalledAppIdCompletionMessagesByTaskIdStopPath = z.object({ + installed_app_id: z.string(), + task_id: z.string(), +}) + +/** + * Success + */ +export const zPostInstalledAppsByInstalledAppIdCompletionMessagesByTaskIdStopResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetInstalledAppsByInstalledAppIdConversationsPath = z.object({ + installed_app_id: z.string(), +}) + +export const zGetInstalledAppsByInstalledAppIdConversationsQuery = z.object({ + last_id: z.string().nullish(), + limit: z.int().gte(1).lte(100).optional().default(20), + pinned: z.boolean().nullish(), +}) + +/** + * Success + */ +export const zGetInstalledAppsByInstalledAppIdConversationsResponse = z.record( + z.string(), + z.unknown(), +) + +export const zDeleteInstalledAppsByInstalledAppIdConversationsByCIdPath = z.object({ + installed_app_id: z.string(), + c_id: z.string(), +}) + +/** + * Success + */ +export const zDeleteInstalledAppsByInstalledAppIdConversationsByCIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostInstalledAppsByInstalledAppIdConversationsByCIdNameBody + = zConversationRenamePayload + +export const zPostInstalledAppsByInstalledAppIdConversationsByCIdNamePath = z.object({ + installed_app_id: z.string(), + c_id: z.string(), +}) + +/** + * Success + */ +export const zPostInstalledAppsByInstalledAppIdConversationsByCIdNameResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPatchInstalledAppsByInstalledAppIdConversationsByCIdPinPath = z.object({ + installed_app_id: z.string(), + c_id: z.string(), +}) + +/** + * Success + */ +export const zPatchInstalledAppsByInstalledAppIdConversationsByCIdPinResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPatchInstalledAppsByInstalledAppIdConversationsByCIdUnpinPath = z.object({ + installed_app_id: z.string(), + c_id: z.string(), +}) + +/** + * Success + */ +export const zPatchInstalledAppsByInstalledAppIdConversationsByCIdUnpinResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetInstalledAppsByInstalledAppIdMessagesPath = z.object({ + installed_app_id: z.string(), +}) + +export const zGetInstalledAppsByInstalledAppIdMessagesQuery = z.object({ + conversation_id: z.string(), + first_id: z.string().nullish(), + limit: z.int().gte(1).lte(100).optional().default(20), +}) + +/** + * Success + */ +export const zGetInstalledAppsByInstalledAppIdMessagesResponse = z.record(z.string(), z.unknown()) + +export const zPostInstalledAppsByInstalledAppIdMessagesByMessageIdFeedbacksBody + = zMessageFeedbackPayload + +export const zPostInstalledAppsByInstalledAppIdMessagesByMessageIdFeedbacksPath = z.object({ + installed_app_id: z.string(), + message_id: z.string(), +}) + +/** + * Success + */ +export const zPostInstalledAppsByInstalledAppIdMessagesByMessageIdFeedbacksResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetInstalledAppsByInstalledAppIdMessagesByMessageIdMoreLikeThisPath = z.object({ + installed_app_id: z.string(), + message_id: z.string(), +}) + +export const zGetInstalledAppsByInstalledAppIdMessagesByMessageIdMoreLikeThisQuery = z.object({ + response_mode: z.enum(['blocking', 'streaming']), +}) + +/** + * Success + */ +export const zGetInstalledAppsByInstalledAppIdMessagesByMessageIdMoreLikeThisResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetInstalledAppsByInstalledAppIdMessagesByMessageIdSuggestedQuestionsPath = z.object({ + installed_app_id: z.string(), + message_id: z.string(), +}) + +/** + * Success + */ +export const zGetInstalledAppsByInstalledAppIdMessagesByMessageIdSuggestedQuestionsResponse + = z.record(z.string(), z.unknown()) + +export const zGetInstalledAppsByInstalledAppIdMetaPath = z.object({ + installed_app_id: z.string(), +}) + +/** + * Success + */ +export const zGetInstalledAppsByInstalledAppIdMetaResponse = z.record(z.string(), z.unknown()) + +export const zGetInstalledAppsByInstalledAppIdParametersPath = z.object({ + installed_app_id: z.string(), +}) + +/** + * Success + */ +export const zGetInstalledAppsByInstalledAppIdParametersResponse = z.record(z.string(), z.unknown()) + +export const zGetInstalledAppsByInstalledAppIdSavedMessagesPath = z.object({ + installed_app_id: z.string(), +}) + +export const zGetInstalledAppsByInstalledAppIdSavedMessagesQuery = z.object({ + last_id: z.string().nullish(), + limit: z.int().gte(1).lte(100).optional().default(20), +}) + +/** + * Success + */ +export const zGetInstalledAppsByInstalledAppIdSavedMessagesResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostInstalledAppsByInstalledAppIdSavedMessagesBody = zSavedMessageCreatePayload + +export const zPostInstalledAppsByInstalledAppIdSavedMessagesPath = z.object({ + installed_app_id: z.string(), +}) + +/** + * Success + */ +export const zPostInstalledAppsByInstalledAppIdSavedMessagesResponse = z.record( + z.string(), + z.unknown(), +) + +export const zDeleteInstalledAppsByInstalledAppIdSavedMessagesByMessageIdPath = z.object({ + installed_app_id: z.string(), + message_id: z.string(), +}) + +/** + * Success + */ +export const zDeleteInstalledAppsByInstalledAppIdSavedMessagesByMessageIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostInstalledAppsByInstalledAppIdTextToAudioBody = zTextToAudioPayload + +export const zPostInstalledAppsByInstalledAppIdTextToAudioPath = z.object({ + installed_app_id: z.string(), +}) + +/** + * Success + */ +export const zPostInstalledAppsByInstalledAppIdTextToAudioResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostInstalledAppsByInstalledAppIdWorkflowsRunBody = zWorkflowRunPayload + +export const zPostInstalledAppsByInstalledAppIdWorkflowsRunPath = z.object({ + installed_app_id: z.string(), +}) + +/** + * Success + */ +export const zPostInstalledAppsByInstalledAppIdWorkflowsRunResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostInstalledAppsByInstalledAppIdWorkflowsTasksByTaskIdStopPath = z.object({ + installed_app_id: z.string(), + task_id: z.string(), +}) + +/** + * Success + */ +export const zPostInstalledAppsByInstalledAppIdWorkflowsTasksByTaskIdStopResponse = z.record( + z.string(), + z.unknown(), +) diff --git a/packages/contracts/generated/api/console/instruction-generate/orpc.gen.ts b/packages/contracts/generated/api/console/instruction-generate/orpc.gen.ts new file mode 100644 index 0000000000..3aff6a9a3b --- /dev/null +++ b/packages/contracts/generated/api/console/instruction-generate/orpc.gen.ts @@ -0,0 +1,54 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { + zPostInstructionGenerateBody, + zPostInstructionGenerateResponse, + zPostInstructionGenerateTemplateBody, + zPostInstructionGenerateTemplateResponse, +} from './zod.gen' + +/** + * Get instruction generation template + */ +export const post = oc + .route({ + description: 'Get instruction generation template', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postInstructionGenerateTemplate', + path: '/instruction-generate/template', + tags: ['console'], + }) + .input(z.object({ body: zPostInstructionGenerateTemplateBody })) + .output(zPostInstructionGenerateTemplateResponse) + +export const template = { + post, +} + +/** + * Generate instruction for workflow nodes or general use + */ +export const post2 = oc + .route({ + description: 'Generate instruction for workflow nodes or general use', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postInstructionGenerate', + path: '/instruction-generate', + tags: ['console'], + }) + .input(z.object({ body: zPostInstructionGenerateBody })) + .output(zPostInstructionGenerateResponse) + +export const instructionGenerate = { + post: post2, + template, +} + +export const contract = { + instructionGenerate, +} diff --git a/packages/contracts/generated/api/console/instruction-generate/types.gen.ts b/packages/contracts/generated/api/console/instruction-generate/types.gen.ts new file mode 100644 index 0000000000..1dd3530d44 --- /dev/null +++ b/packages/contracts/generated/api/console/instruction-generate/types.gen.ts @@ -0,0 +1,101 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type InstructionGeneratePayload = { + current?: string + flow_id: string + ideal_output?: string + instruction: string + language?: string + model_config: ModelConfig + node_id?: string +} + +export type InstructionTemplatePayload = { + type: string +} + +export type ModelConfig = { + agent_mode_dict?: JsonValue + annotation_reply_dict?: JsonValue + chat_prompt_config_dict?: JsonValue + completion_prompt_config_dict?: JsonValue + created_at?: number | null + created_by?: string | null + dataset_configs_dict?: JsonValue + dataset_query_variable?: string | null + external_data_tools_list?: JsonValue + file_upload_dict?: JsonValue + model_dict?: JsonValue + more_like_this_dict?: JsonValue + opening_statement?: string | null + pre_prompt?: string | null + prompt_type?: string | null + retriever_resource_dict?: JsonValue + sensitive_word_avoidance_dict?: JsonValue + speech_to_text_dict?: JsonValue + suggested_questions_after_answer_dict?: JsonValue + suggested_questions_list?: JsonValue + text_to_speech_dict?: JsonValue + updated_at?: number | null + updated_by?: string | null + user_input_form_list?: JsonValue +} + +export type JsonValue = unknown + +export type PostInstructionGenerateData = { + body: InstructionGeneratePayload + path?: never + query?: never + url: '/instruction-generate' +} + +export type PostInstructionGenerateErrors = { + 400: { + [key: string]: unknown + } + 402: { + [key: string]: unknown + } +} + +export type PostInstructionGenerateError + = PostInstructionGenerateErrors[keyof PostInstructionGenerateErrors] + +export type PostInstructionGenerateResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostInstructionGenerateResponse + = PostInstructionGenerateResponses[keyof PostInstructionGenerateResponses] + +export type PostInstructionGenerateTemplateData = { + body: InstructionTemplatePayload + path?: never + query?: never + url: '/instruction-generate/template' +} + +export type PostInstructionGenerateTemplateErrors = { + 400: { + [key: string]: unknown + } +} + +export type PostInstructionGenerateTemplateError + = PostInstructionGenerateTemplateErrors[keyof PostInstructionGenerateTemplateErrors] + +export type PostInstructionGenerateTemplateResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostInstructionGenerateTemplateResponse + = PostInstructionGenerateTemplateResponses[keyof PostInstructionGenerateTemplateResponses] diff --git a/packages/contracts/generated/api/console/instruction-generate/zod.gen.ts b/packages/contracts/generated/api/console/instruction-generate/zod.gen.ts new file mode 100644 index 0000000000..35135fdcf7 --- /dev/null +++ b/packages/contracts/generated/api/console/instruction-generate/zod.gen.ts @@ -0,0 +1,69 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * InstructionTemplatePayload + */ +export const zInstructionTemplatePayload = z.object({ + type: z.string(), +}) + +export const zJsonValue = z.unknown() + +/** + * ModelConfig + */ +export const zModelConfig = z.object({ + agent_mode_dict: zJsonValue.optional(), + annotation_reply_dict: zJsonValue.optional(), + chat_prompt_config_dict: zJsonValue.optional(), + completion_prompt_config_dict: zJsonValue.optional(), + created_at: z.int().nullish(), + created_by: z.string().nullish(), + dataset_configs_dict: zJsonValue.optional(), + dataset_query_variable: z.string().nullish(), + external_data_tools_list: zJsonValue.optional(), + file_upload_dict: zJsonValue.optional(), + model_dict: zJsonValue.optional(), + more_like_this_dict: zJsonValue.optional(), + opening_statement: z.string().nullish(), + pre_prompt: z.string().nullish(), + prompt_type: z.string().nullish(), + retriever_resource_dict: zJsonValue.optional(), + sensitive_word_avoidance_dict: zJsonValue.optional(), + speech_to_text_dict: zJsonValue.optional(), + suggested_questions_after_answer_dict: zJsonValue.optional(), + suggested_questions_list: zJsonValue.optional(), + text_to_speech_dict: zJsonValue.optional(), + updated_at: z.int().nullish(), + updated_by: z.string().nullish(), + user_input_form_list: zJsonValue.optional(), +}) + +/** + * InstructionGeneratePayload + */ +export const zInstructionGeneratePayload = z.object({ + current: z.string().optional().default(''), + flow_id: z.string(), + ideal_output: z.string().optional().default(''), + instruction: z.string(), + language: z.string().optional().default('javascript'), + model_config: zModelConfig, + node_id: z.string().optional().default(''), +}) + +export const zPostInstructionGenerateBody = zInstructionGeneratePayload + +/** + * Instruction generated successfully + */ +export const zPostInstructionGenerateResponse = z.record(z.string(), z.unknown()) + +export const zPostInstructionGenerateTemplateBody = zInstructionTemplatePayload + +/** + * Template retrieved successfully + */ +export const zPostInstructionGenerateTemplateResponse = z.record(z.string(), z.unknown()) diff --git a/packages/contracts/generated/api/console/login/orpc.gen.ts b/packages/contracts/generated/api/console/login/orpc.gen.ts new file mode 100644 index 0000000000..b8e647a11d --- /dev/null +++ b/packages/contracts/generated/api/console/login/orpc.gen.ts @@ -0,0 +1,29 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { zPostLoginBody, zPostLoginResponse } from './zod.gen' + +/** + * Authenticate user and login + */ +export const post = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postLogin', + path: '/login', + summary: 'Authenticate user and login', + tags: ['console'], + }) + .input(z.object({ body: zPostLoginBody })) + .output(zPostLoginResponse) + +export const login = { + post, +} + +export const contract = { + login, +} diff --git a/packages/contracts/generated/api/console/login/types.gen.ts b/packages/contracts/generated/api/console/login/types.gen.ts new file mode 100644 index 0000000000..8646c56c2a --- /dev/null +++ b/packages/contracts/generated/api/console/login/types.gen.ts @@ -0,0 +1,27 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type LoginPayload = { + email: string + invite_token?: string | null + password: string + remember_me?: boolean +} + +export type PostLoginData = { + body: LoginPayload + path?: never + query?: never + url: '/login' +} + +export type PostLoginResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostLoginResponse = PostLoginResponses[keyof PostLoginResponses] diff --git a/packages/contracts/generated/api/console/login/zod.gen.ts b/packages/contracts/generated/api/console/login/zod.gen.ts new file mode 100644 index 0000000000..612ab8d3be --- /dev/null +++ b/packages/contracts/generated/api/console/login/zod.gen.ts @@ -0,0 +1,20 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * LoginPayload + */ +export const zLoginPayload = z.object({ + email: z.string(), + invite_token: z.string().nullish(), + password: z.string(), + remember_me: z.boolean().optional().default(false), +}) + +export const zPostLoginBody = zLoginPayload + +/** + * Success + */ +export const zPostLoginResponse = z.record(z.string(), z.unknown()) diff --git a/packages/contracts/generated/api/console/logout/orpc.gen.ts b/packages/contracts/generated/api/console/logout/orpc.gen.ts new file mode 100644 index 0000000000..02ecd2c82d --- /dev/null +++ b/packages/contracts/generated/api/console/logout/orpc.gen.ts @@ -0,0 +1,23 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' + +import { zPostLogoutResponse } from './zod.gen' + +export const post = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postLogout', + path: '/logout', + tags: ['console'], + }) + .output(zPostLogoutResponse) + +export const logout = { + post, +} + +export const contract = { + logout, +} diff --git a/packages/contracts/generated/api/console/logout/types.gen.ts b/packages/contracts/generated/api/console/logout/types.gen.ts new file mode 100644 index 0000000000..9834d78dc1 --- /dev/null +++ b/packages/contracts/generated/api/console/logout/types.gen.ts @@ -0,0 +1,20 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type PostLogoutData = { + body?: never + path?: never + query?: never + url: '/logout' +} + +export type PostLogoutResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostLogoutResponse = PostLogoutResponses[keyof PostLogoutResponses] diff --git a/packages/contracts/generated/api/console/logout/zod.gen.ts b/packages/contracts/generated/api/console/logout/zod.gen.ts new file mode 100644 index 0000000000..2e2be21264 --- /dev/null +++ b/packages/contracts/generated/api/console/logout/zod.gen.ts @@ -0,0 +1,8 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * Success + */ +export const zPostLogoutResponse = z.record(z.string(), z.unknown()) diff --git a/packages/contracts/generated/api/console/mcp/orpc.gen.ts b/packages/contracts/generated/api/console/mcp/orpc.gen.ts new file mode 100644 index 0000000000..211e8cb2f2 --- /dev/null +++ b/packages/contracts/generated/api/console/mcp/orpc.gen.ts @@ -0,0 +1,31 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' + +import { zGetMcpOauthCallbackResponse } from './zod.gen' + +export const get = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getMcpOauthCallback', + path: '/mcp/oauth/callback', + tags: ['console'], + }) + .output(zGetMcpOauthCallbackResponse) + +export const callback = { + get, +} + +export const oauth = { + callback, +} + +export const mcp = { + oauth, +} + +export const contract = { + mcp, +} diff --git a/packages/contracts/generated/api/console/mcp/types.gen.ts b/packages/contracts/generated/api/console/mcp/types.gen.ts new file mode 100644 index 0000000000..4e96a66393 --- /dev/null +++ b/packages/contracts/generated/api/console/mcp/types.gen.ts @@ -0,0 +1,21 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type GetMcpOauthCallbackData = { + body?: never + path?: never + query?: never + url: '/mcp/oauth/callback' +} + +export type GetMcpOauthCallbackResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetMcpOauthCallbackResponse + = GetMcpOauthCallbackResponses[keyof GetMcpOauthCallbackResponses] diff --git a/packages/contracts/generated/api/console/mcp/zod.gen.ts b/packages/contracts/generated/api/console/mcp/zod.gen.ts new file mode 100644 index 0000000000..ade0c01f7a --- /dev/null +++ b/packages/contracts/generated/api/console/mcp/zod.gen.ts @@ -0,0 +1,8 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * Success + */ +export const zGetMcpOauthCallbackResponse = z.record(z.string(), z.unknown()) diff --git a/packages/contracts/generated/api/console/notification/orpc.gen.ts b/packages/contracts/generated/api/console/notification/orpc.gen.ts new file mode 100644 index 0000000000..f7125346cf --- /dev/null +++ b/packages/contracts/generated/api/console/notification/orpc.gen.ts @@ -0,0 +1,47 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' + +import { zGetNotificationResponse, zPostNotificationDismissResponse } from './zod.gen' + +/** + * Mark a notification as dismissed for the current user. + */ +export const post = oc + .route({ + description: 'Mark a notification as dismissed for the current user.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postNotificationDismiss', + path: '/notification/dismiss', + tags: ['console'], + }) + .output(zPostNotificationDismissResponse) + +export const dismiss = { + post, +} + +/** + * Return the active in-product notification for the current user in their interface language (falls back to English if unavailable). The notification is NOT marked as seen here; call POST /notification/dismiss when the user explicitly closes the modal. + */ +export const get = oc + .route({ + description: + 'Return the active in-product notification for the current user in their interface language (falls back to English if unavailable). The notification is NOT marked as seen here; call POST /notification/dismiss when the user explicitly closes the modal.', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getNotification', + path: '/notification', + tags: ['console'], + }) + .output(zGetNotificationResponse) + +export const notification = { + get, + dismiss, +} + +export const contract = { + notification, +} diff --git a/packages/contracts/generated/api/console/notification/types.gen.ts b/packages/contracts/generated/api/console/notification/types.gen.ts new file mode 100644 index 0000000000..8fe661016b --- /dev/null +++ b/packages/contracts/generated/api/console/notification/types.gen.ts @@ -0,0 +1,53 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type GetNotificationData = { + body?: never + path?: never + query?: never + url: '/notification' +} + +export type GetNotificationErrors = { + 401: { + [key: string]: unknown + } +} + +export type GetNotificationError = GetNotificationErrors[keyof GetNotificationErrors] + +export type GetNotificationResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetNotificationResponse = GetNotificationResponses[keyof GetNotificationResponses] + +export type PostNotificationDismissData = { + body?: never + path?: never + query?: never + url: '/notification/dismiss' +} + +export type PostNotificationDismissErrors = { + 401: { + [key: string]: unknown + } +} + +export type PostNotificationDismissError + = PostNotificationDismissErrors[keyof PostNotificationDismissErrors] + +export type PostNotificationDismissResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostNotificationDismissResponse + = PostNotificationDismissResponses[keyof PostNotificationDismissResponses] diff --git a/packages/contracts/generated/api/console/notification/zod.gen.ts b/packages/contracts/generated/api/console/notification/zod.gen.ts new file mode 100644 index 0000000000..c17e436ed9 --- /dev/null +++ b/packages/contracts/generated/api/console/notification/zod.gen.ts @@ -0,0 +1,13 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * Success — inspect should_show to decide whether to render the modal + */ +export const zGetNotificationResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zPostNotificationDismissResponse = z.record(z.string(), z.unknown()) diff --git a/packages/contracts/generated/api/console/notion/orpc.gen.ts b/packages/contracts/generated/api/console/notion/orpc.gen.ts new file mode 100644 index 0000000000..b8de9e89f2 --- /dev/null +++ b/packages/contracts/generated/api/console/notion/orpc.gen.ts @@ -0,0 +1,84 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { + zGetNotionPagesByPageIdByPageTypePreviewPath, + zGetNotionPagesByPageIdByPageTypePreviewResponse, + zGetNotionPreImportPagesResponse, + zPostNotionPagesByPageIdByPageTypePreviewBody, + zPostNotionPagesByPageIdByPageTypePreviewPath, + zPostNotionPagesByPageIdByPageTypePreviewResponse, +} from './zod.gen' + +export const get = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getNotionPagesByPageIdByPageTypePreview', + path: '/notion/pages/{page_id}/{page_type}/preview', + tags: ['console'], + }) + .input(z.object({ params: zGetNotionPagesByPageIdByPageTypePreviewPath })) + .output(zGetNotionPagesByPageIdByPageTypePreviewResponse) + +export const post = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postNotionPagesByPageIdByPageTypePreview', + path: '/notion/pages/{page_id}/{page_type}/preview', + tags: ['console'], + }) + .input( + z.object({ + body: zPostNotionPagesByPageIdByPageTypePreviewBody, + params: zPostNotionPagesByPageIdByPageTypePreviewPath, + }), + ) + .output(zPostNotionPagesByPageIdByPageTypePreviewResponse) + +export const preview = { + get, + post, +} + +export const byPageType = { + preview, +} + +export const byPageId = { + byPageType, +} + +export const pages = { + byPageId, +} + +export const get2 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getNotionPreImportPages', + path: '/notion/pre-import/pages', + tags: ['console'], + }) + .output(zGetNotionPreImportPagesResponse) + +export const pages2 = { + get: get2, +} + +export const preImport = { + pages: pages2, +} + +export const notion = { + pages, + preImport, +} + +export const contract = { + notion, +} diff --git a/packages/contracts/generated/api/console/notion/types.gen.ts b/packages/contracts/generated/api/console/notion/types.gen.ts new file mode 100644 index 0000000000..c616a22286 --- /dev/null +++ b/packages/contracts/generated/api/console/notion/types.gen.ts @@ -0,0 +1,70 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type NotionEstimatePayload = { + doc_form?: string + doc_language?: string + notion_info_list: Array<{ + [key: string]: unknown + }> + process_rule: { + [key: string]: unknown + } +} + +export type GetNotionPagesByPageIdByPageTypePreviewData = { + body?: never + path: { + page_id: string + page_type: string + } + query?: never + url: '/notion/pages/{page_id}/{page_type}/preview' +} + +export type GetNotionPagesByPageIdByPageTypePreviewResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetNotionPagesByPageIdByPageTypePreviewResponse + = GetNotionPagesByPageIdByPageTypePreviewResponses[keyof GetNotionPagesByPageIdByPageTypePreviewResponses] + +export type PostNotionPagesByPageIdByPageTypePreviewData = { + body: NotionEstimatePayload + path: { + page_id: string + page_type: string + } + query?: never + url: '/notion/pages/{page_id}/{page_type}/preview' +} + +export type PostNotionPagesByPageIdByPageTypePreviewResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostNotionPagesByPageIdByPageTypePreviewResponse + = PostNotionPagesByPageIdByPageTypePreviewResponses[keyof PostNotionPagesByPageIdByPageTypePreviewResponses] + +export type GetNotionPreImportPagesData = { + body?: never + path?: never + query?: never + url: '/notion/pre-import/pages' +} + +export type GetNotionPreImportPagesResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetNotionPreImportPagesResponse + = GetNotionPreImportPagesResponses[keyof GetNotionPreImportPagesResponses] diff --git a/packages/contracts/generated/api/console/notion/zod.gen.ts b/packages/contracts/generated/api/console/notion/zod.gen.ts new file mode 100644 index 0000000000..0aad7b682c --- /dev/null +++ b/packages/contracts/generated/api/console/notion/zod.gen.ts @@ -0,0 +1,40 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * NotionEstimatePayload + */ +export const zNotionEstimatePayload = z.object({ + doc_form: z.string().optional().default('text_model'), + doc_language: z.string().optional().default('English'), + notion_info_list: z.array(z.record(z.string(), z.unknown())), + process_rule: z.record(z.string(), z.unknown()), +}) + +export const zGetNotionPagesByPageIdByPageTypePreviewPath = z.object({ + page_id: z.string(), + page_type: z.string(), +}) + +/** + * Success + */ +export const zGetNotionPagesByPageIdByPageTypePreviewResponse = z.record(z.string(), z.unknown()) + +export const zPostNotionPagesByPageIdByPageTypePreviewBody = zNotionEstimatePayload + +export const zPostNotionPagesByPageIdByPageTypePreviewPath = z.object({ + page_id: z.string(), + page_type: z.string(), +}) + +/** + * Success + */ +export const zPostNotionPagesByPageIdByPageTypePreviewResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zGetNotionPreImportPagesResponse = z.record(z.string(), z.unknown()) diff --git a/packages/contracts/generated/api/console/oauth/orpc.gen.ts b/packages/contracts/generated/api/console/oauth/orpc.gen.ts new file mode 100644 index 0000000000..b9a57e3e88 --- /dev/null +++ b/packages/contracts/generated/api/console/oauth/orpc.gen.ts @@ -0,0 +1,376 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { + zGetOauthAuthorizeByProviderPath, + zGetOauthAuthorizeByProviderQuery, + zGetOauthAuthorizeByProviderResponse, + zGetOauthDataSourceBindingByProviderPath, + zGetOauthDataSourceBindingByProviderQuery, + zGetOauthDataSourceBindingByProviderResponse, + zGetOauthDataSourceByProviderByBindingIdSyncPath, + zGetOauthDataSourceByProviderByBindingIdSyncResponse, + zGetOauthDataSourceByProviderPath, + zGetOauthDataSourceByProviderResponse, + zGetOauthDataSourceCallbackByProviderPath, + zGetOauthDataSourceCallbackByProviderQuery, + zGetOauthDataSourceCallbackByProviderResponse, + zGetOauthLoginByProviderPath, + zGetOauthLoginByProviderQuery, + zGetOauthLoginByProviderResponse, + zGetOauthPluginByProviderIdDatasourceCallbackPath, + zGetOauthPluginByProviderIdDatasourceCallbackResponse, + zGetOauthPluginByProviderIdDatasourceGetAuthorizationUrlPath, + zGetOauthPluginByProviderIdDatasourceGetAuthorizationUrlResponse, + zGetOauthPluginByProviderToolAuthorizationUrlPath, + zGetOauthPluginByProviderToolAuthorizationUrlResponse, + zGetOauthPluginByProviderToolCallbackPath, + zGetOauthPluginByProviderToolCallbackResponse, + zGetOauthPluginByProviderTriggerCallbackPath, + zGetOauthPluginByProviderTriggerCallbackResponse, + zPostOauthProviderAccountResponse, + zPostOauthProviderAuthorizeResponse, + zPostOauthProviderResponse, + zPostOauthProviderTokenResponse, +} from './zod.gen' + +/** + * Handle OAuth callback and complete login process + */ +export const get = oc + .route({ + description: 'Handle OAuth callback and complete login process', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getOauthAuthorizeByProvider', + path: '/oauth/authorize/{provider}', + tags: ['console'], + }) + .input( + z.object({ + params: zGetOauthAuthorizeByProviderPath, + query: zGetOauthAuthorizeByProviderQuery.optional(), + }), + ) + .output(zGetOauthAuthorizeByProviderResponse) + +export const byProvider = { + get, +} + +export const authorize = { + byProvider, +} + +/** + * Bind OAuth data source with authorization code + */ +export const get2 = oc + .route({ + description: 'Bind OAuth data source with authorization code', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getOauthDataSourceBindingByProvider', + path: '/oauth/data-source/binding/{provider}', + tags: ['console'], + }) + .input( + z.object({ + params: zGetOauthDataSourceBindingByProviderPath, + query: zGetOauthDataSourceBindingByProviderQuery.optional(), + }), + ) + .output(zGetOauthDataSourceBindingByProviderResponse) + +export const byProvider2 = { + get: get2, +} + +export const binding = { + byProvider: byProvider2, +} + +/** + * Handle OAuth callback from data source provider + */ +export const get3 = oc + .route({ + description: 'Handle OAuth callback from data source provider', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getOauthDataSourceCallbackByProvider', + path: '/oauth/data-source/callback/{provider}', + tags: ['console'], + }) + .input( + z.object({ + params: zGetOauthDataSourceCallbackByProviderPath, + query: zGetOauthDataSourceCallbackByProviderQuery.optional(), + }), + ) + .output(zGetOauthDataSourceCallbackByProviderResponse) + +export const byProvider3 = { + get: get3, +} + +export const callback = { + byProvider: byProvider3, +} + +/** + * Sync data from OAuth data source + */ +export const get4 = oc + .route({ + description: 'Sync data from OAuth data source', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getOauthDataSourceByProviderByBindingIdSync', + path: '/oauth/data-source/{provider}/{binding_id}/sync', + tags: ['console'], + }) + .input(z.object({ params: zGetOauthDataSourceByProviderByBindingIdSyncPath })) + .output(zGetOauthDataSourceByProviderByBindingIdSyncResponse) + +export const sync = { + get: get4, +} + +export const byBindingId = { + sync, +} + +/** + * Get OAuth authorization URL for data source provider + */ +export const get5 = oc + .route({ + description: 'Get OAuth authorization URL for data source provider', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getOauthDataSourceByProvider', + path: '/oauth/data-source/{provider}', + tags: ['console'], + }) + .input(z.object({ params: zGetOauthDataSourceByProviderPath })) + .output(zGetOauthDataSourceByProviderResponse) + +export const byProvider4 = { + get: get5, + byBindingId, +} + +export const dataSource = { + binding, + callback, + byProvider: byProvider4, +} + +/** + * Initiate OAuth login process + */ +export const get6 = oc + .route({ + description: 'Initiate OAuth login process', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getOauthLoginByProvider', + path: '/oauth/login/{provider}', + tags: ['console'], + }) + .input( + z.object({ + params: zGetOauthLoginByProviderPath, + query: zGetOauthLoginByProviderQuery.optional(), + }), + ) + .output(zGetOauthLoginByProviderResponse) + +export const byProvider5 = { + get: get6, +} + +export const login = { + byProvider: byProvider5, +} + +export const get7 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getOauthPluginByProviderIdDatasourceCallback', + path: '/oauth/plugin/{provider_id}/datasource/callback', + tags: ['console'], + }) + .input(z.object({ params: zGetOauthPluginByProviderIdDatasourceCallbackPath })) + .output(zGetOauthPluginByProviderIdDatasourceCallbackResponse) + +export const callback2 = { + get: get7, +} + +export const get8 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getOauthPluginByProviderIdDatasourceGetAuthorizationUrl', + path: '/oauth/plugin/{provider_id}/datasource/get-authorization-url', + tags: ['console'], + }) + .input(z.object({ params: zGetOauthPluginByProviderIdDatasourceGetAuthorizationUrlPath })) + .output(zGetOauthPluginByProviderIdDatasourceGetAuthorizationUrlResponse) + +export const getAuthorizationUrl = { + get: get8, +} + +export const datasource = { + callback: callback2, + getAuthorizationUrl, +} + +export const byProviderId = { + datasource, +} + +export const get9 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getOauthPluginByProviderToolAuthorizationUrl', + path: '/oauth/plugin/{provider}/tool/authorization-url', + tags: ['console'], + }) + .input(z.object({ params: zGetOauthPluginByProviderToolAuthorizationUrlPath })) + .output(zGetOauthPluginByProviderToolAuthorizationUrlResponse) + +export const authorizationUrl = { + get: get9, +} + +export const get10 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getOauthPluginByProviderToolCallback', + path: '/oauth/plugin/{provider}/tool/callback', + tags: ['console'], + }) + .input(z.object({ params: zGetOauthPluginByProviderToolCallbackPath })) + .output(zGetOauthPluginByProviderToolCallbackResponse) + +export const callback3 = { + get: get10, +} + +export const tool = { + authorizationUrl, + callback: callback3, +} + +/** + * Handle OAuth callback for trigger provider + */ +export const get11 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getOauthPluginByProviderTriggerCallback', + path: '/oauth/plugin/{provider}/trigger/callback', + summary: 'Handle OAuth callback for trigger provider', + tags: ['console'], + }) + .input(z.object({ params: zGetOauthPluginByProviderTriggerCallbackPath })) + .output(zGetOauthPluginByProviderTriggerCallbackResponse) + +export const callback4 = { + get: get11, +} + +export const trigger = { + callback: callback4, +} + +export const byProvider6 = { + tool, + trigger, +} + +export const plugin = { + byProviderId, + byProvider: byProvider6, +} + +export const post = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postOauthProviderAccount', + path: '/oauth/provider/account', + tags: ['console'], + }) + .output(zPostOauthProviderAccountResponse) + +export const account = { + post, +} + +export const post2 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postOauthProviderAuthorize', + path: '/oauth/provider/authorize', + tags: ['console'], + }) + .output(zPostOauthProviderAuthorizeResponse) + +export const authorize2 = { + post: post2, +} + +export const post3 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postOauthProviderToken', + path: '/oauth/provider/token', + tags: ['console'], + }) + .output(zPostOauthProviderTokenResponse) + +export const token = { + post: post3, +} + +export const post4 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postOauthProvider', + path: '/oauth/provider', + tags: ['console'], + }) + .output(zPostOauthProviderResponse) + +export const provider = { + post: post4, + account, + authorize: authorize2, + token, +} + +export const oauth = { + authorize, + dataSource, + login, + plugin, + provider, +} + +export const contract = { + oauth, +} diff --git a/packages/contracts/generated/api/console/oauth/types.gen.ts b/packages/contracts/generated/api/console/oauth/types.gen.ts new file mode 100644 index 0000000000..7091cdab7c --- /dev/null +++ b/packages/contracts/generated/api/console/oauth/types.gen.ts @@ -0,0 +1,340 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type OAuthDataSourceBindingResponse = { + result: string +} + +export type OAuthDataSourceResponse = { + data: string +} + +export type OAuthDataSourceSyncResponse = { + result: string +} + +export type GetOauthAuthorizeByProviderData = { + body?: never + path: { + provider: string + } + query?: { + code?: string + state?: string + } + url: '/oauth/authorize/{provider}' +} + +export type GetOauthAuthorizeByProviderErrors = { + 400: { + [key: string]: unknown + } +} + +export type GetOauthAuthorizeByProviderError + = GetOauthAuthorizeByProviderErrors[keyof GetOauthAuthorizeByProviderErrors] + +export type GetOauthAuthorizeByProviderResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetOauthAuthorizeByProviderResponse + = GetOauthAuthorizeByProviderResponses[keyof GetOauthAuthorizeByProviderResponses] + +export type GetOauthDataSourceBindingByProviderData = { + body?: never + path: { + provider: string + } + query?: { + code?: string + } + url: '/oauth/data-source/binding/{provider}' +} + +export type GetOauthDataSourceBindingByProviderErrors = { + 400: { + [key: string]: unknown + } +} + +export type GetOauthDataSourceBindingByProviderError + = GetOauthDataSourceBindingByProviderErrors[keyof GetOauthDataSourceBindingByProviderErrors] + +export type GetOauthDataSourceBindingByProviderResponses = { + 200: OAuthDataSourceBindingResponse +} + +export type GetOauthDataSourceBindingByProviderResponse + = GetOauthDataSourceBindingByProviderResponses[keyof GetOauthDataSourceBindingByProviderResponses] + +export type GetOauthDataSourceCallbackByProviderData = { + body?: never + path: { + provider: string + } + query?: { + code?: string + error?: string + } + url: '/oauth/data-source/callback/{provider}' +} + +export type GetOauthDataSourceCallbackByProviderErrors = { + 400: { + [key: string]: unknown + } +} + +export type GetOauthDataSourceCallbackByProviderError + = GetOauthDataSourceCallbackByProviderErrors[keyof GetOauthDataSourceCallbackByProviderErrors] + +export type GetOauthDataSourceCallbackByProviderResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetOauthDataSourceCallbackByProviderResponse + = GetOauthDataSourceCallbackByProviderResponses[keyof GetOauthDataSourceCallbackByProviderResponses] + +export type GetOauthDataSourceByProviderData = { + body?: never + path: { + provider: string + } + query?: never + url: '/oauth/data-source/{provider}' +} + +export type GetOauthDataSourceByProviderErrors = { + 400: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } +} + +export type GetOauthDataSourceByProviderError + = GetOauthDataSourceByProviderErrors[keyof GetOauthDataSourceByProviderErrors] + +export type GetOauthDataSourceByProviderResponses = { + 200: OAuthDataSourceResponse +} + +export type GetOauthDataSourceByProviderResponse + = GetOauthDataSourceByProviderResponses[keyof GetOauthDataSourceByProviderResponses] + +export type GetOauthDataSourceByProviderByBindingIdSyncData = { + body?: never + path: { + provider: string + binding_id: string + } + query?: never + url: '/oauth/data-source/{provider}/{binding_id}/sync' +} + +export type GetOauthDataSourceByProviderByBindingIdSyncErrors = { + 400: { + [key: string]: unknown + } +} + +export type GetOauthDataSourceByProviderByBindingIdSyncError + = GetOauthDataSourceByProviderByBindingIdSyncErrors[keyof GetOauthDataSourceByProviderByBindingIdSyncErrors] + +export type GetOauthDataSourceByProviderByBindingIdSyncResponses = { + 200: OAuthDataSourceSyncResponse +} + +export type GetOauthDataSourceByProviderByBindingIdSyncResponse + = GetOauthDataSourceByProviderByBindingIdSyncResponses[keyof GetOauthDataSourceByProviderByBindingIdSyncResponses] + +export type GetOauthLoginByProviderData = { + body?: never + path: { + provider: string + } + query?: { + invite_token?: string + } + url: '/oauth/login/{provider}' +} + +export type GetOauthLoginByProviderErrors = { + 400: { + [key: string]: unknown + } +} + +export type GetOauthLoginByProviderError + = GetOauthLoginByProviderErrors[keyof GetOauthLoginByProviderErrors] + +export type GetOauthLoginByProviderResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetOauthLoginByProviderResponse + = GetOauthLoginByProviderResponses[keyof GetOauthLoginByProviderResponses] + +export type GetOauthPluginByProviderIdDatasourceCallbackData = { + body?: never + path: { + provider_id: string + } + query?: never + url: '/oauth/plugin/{provider_id}/datasource/callback' +} + +export type GetOauthPluginByProviderIdDatasourceCallbackResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetOauthPluginByProviderIdDatasourceCallbackResponse + = GetOauthPluginByProviderIdDatasourceCallbackResponses[keyof GetOauthPluginByProviderIdDatasourceCallbackResponses] + +export type GetOauthPluginByProviderIdDatasourceGetAuthorizationUrlData = { + body?: never + path: { + provider_id: string + } + query?: never + url: '/oauth/plugin/{provider_id}/datasource/get-authorization-url' +} + +export type GetOauthPluginByProviderIdDatasourceGetAuthorizationUrlResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetOauthPluginByProviderIdDatasourceGetAuthorizationUrlResponse + = GetOauthPluginByProviderIdDatasourceGetAuthorizationUrlResponses[keyof GetOauthPluginByProviderIdDatasourceGetAuthorizationUrlResponses] + +export type GetOauthPluginByProviderToolAuthorizationUrlData = { + body?: never + path: { + provider: string + } + query?: never + url: '/oauth/plugin/{provider}/tool/authorization-url' +} + +export type GetOauthPluginByProviderToolAuthorizationUrlResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetOauthPluginByProviderToolAuthorizationUrlResponse + = GetOauthPluginByProviderToolAuthorizationUrlResponses[keyof GetOauthPluginByProviderToolAuthorizationUrlResponses] + +export type GetOauthPluginByProviderToolCallbackData = { + body?: never + path: { + provider: string + } + query?: never + url: '/oauth/plugin/{provider}/tool/callback' +} + +export type GetOauthPluginByProviderToolCallbackResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetOauthPluginByProviderToolCallbackResponse + = GetOauthPluginByProviderToolCallbackResponses[keyof GetOauthPluginByProviderToolCallbackResponses] + +export type GetOauthPluginByProviderTriggerCallbackData = { + body?: never + path: { + provider: string + } + query?: never + url: '/oauth/plugin/{provider}/trigger/callback' +} + +export type GetOauthPluginByProviderTriggerCallbackResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetOauthPluginByProviderTriggerCallbackResponse + = GetOauthPluginByProviderTriggerCallbackResponses[keyof GetOauthPluginByProviderTriggerCallbackResponses] + +export type PostOauthProviderData = { + body?: never + path?: never + query?: never + url: '/oauth/provider' +} + +export type PostOauthProviderResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostOauthProviderResponse = PostOauthProviderResponses[keyof PostOauthProviderResponses] + +export type PostOauthProviderAccountData = { + body?: never + path?: never + query?: never + url: '/oauth/provider/account' +} + +export type PostOauthProviderAccountResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostOauthProviderAccountResponse + = PostOauthProviderAccountResponses[keyof PostOauthProviderAccountResponses] + +export type PostOauthProviderAuthorizeData = { + body?: never + path?: never + query?: never + url: '/oauth/provider/authorize' +} + +export type PostOauthProviderAuthorizeResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostOauthProviderAuthorizeResponse + = PostOauthProviderAuthorizeResponses[keyof PostOauthProviderAuthorizeResponses] + +export type PostOauthProviderTokenData = { + body?: never + path?: never + query?: never + url: '/oauth/provider/token' +} + +export type PostOauthProviderTokenResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostOauthProviderTokenResponse + = PostOauthProviderTokenResponses[keyof PostOauthProviderTokenResponses] diff --git a/packages/contracts/generated/api/console/oauth/zod.gen.ts b/packages/contracts/generated/api/console/oauth/zod.gen.ts new file mode 100644 index 0000000000..22f2c4bd76 --- /dev/null +++ b/packages/contracts/generated/api/console/oauth/zod.gen.ts @@ -0,0 +1,171 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * OAuthDataSourceBindingResponse + */ +export const zOAuthDataSourceBindingResponse = z.object({ + result: z.string(), +}) + +/** + * OAuthDataSourceResponse + */ +export const zOAuthDataSourceResponse = z.object({ + data: z.string(), +}) + +/** + * OAuthDataSourceSyncResponse + */ +export const zOAuthDataSourceSyncResponse = z.object({ + result: z.string(), +}) + +export const zGetOauthAuthorizeByProviderPath = z.object({ + provider: z.string(), +}) + +export const zGetOauthAuthorizeByProviderQuery = z.object({ + code: z.string().optional(), + state: z.string().optional(), +}) + +/** + * Success + */ +export const zGetOauthAuthorizeByProviderResponse = z.record(z.string(), z.unknown()) + +export const zGetOauthDataSourceBindingByProviderPath = z.object({ + provider: z.string(), +}) + +export const zGetOauthDataSourceBindingByProviderQuery = z.object({ + code: z.string().optional(), +}) + +/** + * Data source binding success + */ +export const zGetOauthDataSourceBindingByProviderResponse = zOAuthDataSourceBindingResponse + +export const zGetOauthDataSourceCallbackByProviderPath = z.object({ + provider: z.string(), +}) + +export const zGetOauthDataSourceCallbackByProviderQuery = z.object({ + code: z.string().optional(), + error: z.string().optional(), +}) + +/** + * Success + */ +export const zGetOauthDataSourceCallbackByProviderResponse = z.record(z.string(), z.unknown()) + +export const zGetOauthDataSourceByProviderPath = z.object({ + provider: z.string(), +}) + +/** + * Authorization URL or internal setup success + */ +export const zGetOauthDataSourceByProviderResponse = zOAuthDataSourceResponse + +export const zGetOauthDataSourceByProviderByBindingIdSyncPath = z.object({ + provider: z.string(), + binding_id: z.string(), +}) + +/** + * Data source sync success + */ +export const zGetOauthDataSourceByProviderByBindingIdSyncResponse = zOAuthDataSourceSyncResponse + +export const zGetOauthLoginByProviderPath = z.object({ + provider: z.string(), +}) + +export const zGetOauthLoginByProviderQuery = z.object({ + invite_token: z.string().optional(), +}) + +/** + * Success + */ +export const zGetOauthLoginByProviderResponse = z.record(z.string(), z.unknown()) + +export const zGetOauthPluginByProviderIdDatasourceCallbackPath = z.object({ + provider_id: z.string(), +}) + +/** + * Success + */ +export const zGetOauthPluginByProviderIdDatasourceCallbackResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetOauthPluginByProviderIdDatasourceGetAuthorizationUrlPath = z.object({ + provider_id: z.string(), +}) + +/** + * Success + */ +export const zGetOauthPluginByProviderIdDatasourceGetAuthorizationUrlResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetOauthPluginByProviderToolAuthorizationUrlPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zGetOauthPluginByProviderToolAuthorizationUrlResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetOauthPluginByProviderToolCallbackPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zGetOauthPluginByProviderToolCallbackResponse = z.record(z.string(), z.unknown()) + +export const zGetOauthPluginByProviderTriggerCallbackPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zGetOauthPluginByProviderTriggerCallbackResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zPostOauthProviderResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zPostOauthProviderAccountResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zPostOauthProviderAuthorizeResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zPostOauthProviderTokenResponse = z.record(z.string(), z.unknown()) diff --git a/packages/contracts/generated/api/console/rag/orpc.gen.ts b/packages/contracts/generated/api/console/rag/orpc.gen.ts new file mode 100644 index 0000000000..a642a91ba1 --- /dev/null +++ b/packages/contracts/generated/api/console/rag/orpc.gen.ts @@ -0,0 +1,1230 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { + zDeleteRagPipelineCustomizedTemplatesByTemplateIdPath, + zDeleteRagPipelineCustomizedTemplatesByTemplateIdResponse, + zDeleteRagPipelinesByPipelineIdWorkflowsByWorkflowIdPath, + zDeleteRagPipelinesByPipelineIdWorkflowsByWorkflowIdResponse, + zDeleteRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdVariablesPath, + zDeleteRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdVariablesResponse, + zDeleteRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdPath, + zDeleteRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResponse, + zDeleteRagPipelinesByPipelineIdWorkflowsDraftVariablesPath, + zDeleteRagPipelinesByPipelineIdWorkflowsDraftVariablesResponse, + zGetRagPipelinesByPipelineIdExportsPath, + zGetRagPipelinesByPipelineIdExportsResponse, + zGetRagPipelinesByPipelineIdWorkflowRunsByRunIdNodeExecutionsPath, + zGetRagPipelinesByPipelineIdWorkflowRunsByRunIdNodeExecutionsResponse, + zGetRagPipelinesByPipelineIdWorkflowRunsByRunIdPath, + zGetRagPipelinesByPipelineIdWorkflowRunsByRunIdResponse, + zGetRagPipelinesByPipelineIdWorkflowRunsPath, + zGetRagPipelinesByPipelineIdWorkflowRunsResponse, + zGetRagPipelinesByPipelineIdWorkflowsDefaultWorkflowBlockConfigsByBlockTypePath, + zGetRagPipelinesByPipelineIdWorkflowsDefaultWorkflowBlockConfigsByBlockTypeResponse, + zGetRagPipelinesByPipelineIdWorkflowsDefaultWorkflowBlockConfigsPath, + zGetRagPipelinesByPipelineIdWorkflowsDefaultWorkflowBlockConfigsResponse, + zGetRagPipelinesByPipelineIdWorkflowsDraftEnvironmentVariablesPath, + zGetRagPipelinesByPipelineIdWorkflowsDraftEnvironmentVariablesResponse, + zGetRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdLastRunPath, + zGetRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdLastRunResponse, + zGetRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdVariablesPath, + zGetRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdVariablesResponse, + zGetRagPipelinesByPipelineIdWorkflowsDraftPath, + zGetRagPipelinesByPipelineIdWorkflowsDraftPreProcessingParametersPath, + zGetRagPipelinesByPipelineIdWorkflowsDraftPreProcessingParametersResponse, + zGetRagPipelinesByPipelineIdWorkflowsDraftProcessingParametersPath, + zGetRagPipelinesByPipelineIdWorkflowsDraftProcessingParametersResponse, + zGetRagPipelinesByPipelineIdWorkflowsDraftResponse, + zGetRagPipelinesByPipelineIdWorkflowsDraftSystemVariablesPath, + zGetRagPipelinesByPipelineIdWorkflowsDraftSystemVariablesResponse, + zGetRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdPath, + zGetRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResponse, + zGetRagPipelinesByPipelineIdWorkflowsDraftVariablesPath, + zGetRagPipelinesByPipelineIdWorkflowsDraftVariablesResponse, + zGetRagPipelinesByPipelineIdWorkflowsPath, + zGetRagPipelinesByPipelineIdWorkflowsPublishedPreProcessingParametersPath, + zGetRagPipelinesByPipelineIdWorkflowsPublishedPreProcessingParametersResponse, + zGetRagPipelinesByPipelineIdWorkflowsPublishedProcessingParametersPath, + zGetRagPipelinesByPipelineIdWorkflowsPublishedProcessingParametersResponse, + zGetRagPipelinesByPipelineIdWorkflowsPublishPath, + zGetRagPipelinesByPipelineIdWorkflowsPublishResponse, + zGetRagPipelinesByPipelineIdWorkflowsResponse, + zGetRagPipelinesDatasourcePluginsResponse, + zGetRagPipelinesImportsByPipelineIdCheckDependenciesPath, + zGetRagPipelinesImportsByPipelineIdCheckDependenciesResponse, + zGetRagPipelinesRecommendedPluginsResponse, + zGetRagPipelineTemplatesByTemplateIdPath, + zGetRagPipelineTemplatesByTemplateIdResponse, + zGetRagPipelineTemplatesResponse, + zPatchRagPipelineCustomizedTemplatesByTemplateIdPath, + zPatchRagPipelineCustomizedTemplatesByTemplateIdResponse, + zPatchRagPipelinesByPipelineIdWorkflowsByWorkflowIdPath, + zPatchRagPipelinesByPipelineIdWorkflowsByWorkflowIdResponse, + zPatchRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdPath, + zPatchRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResponse, + zPostRagPipelineCustomizedTemplatesByTemplateIdPath, + zPostRagPipelineCustomizedTemplatesByTemplateIdResponse, + zPostRagPipelineDatasetBody, + zPostRagPipelineDatasetResponse, + zPostRagPipelineEmptyDatasetResponse, + zPostRagPipelinesByPipelineIdCustomizedPublishBody, + zPostRagPipelinesByPipelineIdCustomizedPublishPath, + zPostRagPipelinesByPipelineIdCustomizedPublishResponse, + zPostRagPipelinesByPipelineIdWorkflowRunsTasksByTaskIdStopPath, + zPostRagPipelinesByPipelineIdWorkflowRunsTasksByTaskIdStopResponse, + zPostRagPipelinesByPipelineIdWorkflowsByWorkflowIdRestorePath, + zPostRagPipelinesByPipelineIdWorkflowsByWorkflowIdRestoreResponse, + zPostRagPipelinesByPipelineIdWorkflowsDraftDatasourceNodesByNodeIdRunBody, + zPostRagPipelinesByPipelineIdWorkflowsDraftDatasourceNodesByNodeIdRunPath, + zPostRagPipelinesByPipelineIdWorkflowsDraftDatasourceNodesByNodeIdRunResponse, + zPostRagPipelinesByPipelineIdWorkflowsDraftDatasourceVariablesInspectBody, + zPostRagPipelinesByPipelineIdWorkflowsDraftDatasourceVariablesInspectPath, + zPostRagPipelinesByPipelineIdWorkflowsDraftDatasourceVariablesInspectResponse, + zPostRagPipelinesByPipelineIdWorkflowsDraftIterationNodesByNodeIdRunBody, + zPostRagPipelinesByPipelineIdWorkflowsDraftIterationNodesByNodeIdRunPath, + zPostRagPipelinesByPipelineIdWorkflowsDraftIterationNodesByNodeIdRunResponse, + zPostRagPipelinesByPipelineIdWorkflowsDraftLoopNodesByNodeIdRunBody, + zPostRagPipelinesByPipelineIdWorkflowsDraftLoopNodesByNodeIdRunPath, + zPostRagPipelinesByPipelineIdWorkflowsDraftLoopNodesByNodeIdRunResponse, + zPostRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdRunBody, + zPostRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdRunPath, + zPostRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdRunResponse, + zPostRagPipelinesByPipelineIdWorkflowsDraftPath, + zPostRagPipelinesByPipelineIdWorkflowsDraftResponse, + zPostRagPipelinesByPipelineIdWorkflowsDraftRunBody, + zPostRagPipelinesByPipelineIdWorkflowsDraftRunPath, + zPostRagPipelinesByPipelineIdWorkflowsDraftRunResponse, + zPostRagPipelinesByPipelineIdWorkflowsPublishedDatasourceNodesByNodeIdPreviewBody, + zPostRagPipelinesByPipelineIdWorkflowsPublishedDatasourceNodesByNodeIdPreviewPath, + zPostRagPipelinesByPipelineIdWorkflowsPublishedDatasourceNodesByNodeIdPreviewResponse, + zPostRagPipelinesByPipelineIdWorkflowsPublishedDatasourceNodesByNodeIdRunBody, + zPostRagPipelinesByPipelineIdWorkflowsPublishedDatasourceNodesByNodeIdRunPath, + zPostRagPipelinesByPipelineIdWorkflowsPublishedDatasourceNodesByNodeIdRunResponse, + zPostRagPipelinesByPipelineIdWorkflowsPublishedRunBody, + zPostRagPipelinesByPipelineIdWorkflowsPublishedRunPath, + zPostRagPipelinesByPipelineIdWorkflowsPublishedRunResponse, + zPostRagPipelinesByPipelineIdWorkflowsPublishPath, + zPostRagPipelinesByPipelineIdWorkflowsPublishResponse, + zPostRagPipelinesImportsBody, + zPostRagPipelinesImportsByImportIdConfirmPath, + zPostRagPipelinesImportsByImportIdConfirmResponse, + zPostRagPipelinesImportsResponse, + zPostRagPipelinesTransformDatasetsByDatasetIdPath, + zPostRagPipelinesTransformDatasetsByDatasetIdResponse, + zPutRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResetPath, + zPutRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResetResponse, +} from './zod.gen' + +export const delete_ = oc + .route({ + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteRagPipelineCustomizedTemplatesByTemplateId', + path: '/rag/pipeline/customized/templates/{template_id}', + tags: ['console'], + }) + .input(z.object({ params: zDeleteRagPipelineCustomizedTemplatesByTemplateIdPath })) + .output(zDeleteRagPipelineCustomizedTemplatesByTemplateIdResponse) + +export const patch = oc + .route({ + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchRagPipelineCustomizedTemplatesByTemplateId', + path: '/rag/pipeline/customized/templates/{template_id}', + tags: ['console'], + }) + .input(z.object({ params: zPatchRagPipelineCustomizedTemplatesByTemplateIdPath })) + .output(zPatchRagPipelineCustomizedTemplatesByTemplateIdResponse) + +export const post = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postRagPipelineCustomizedTemplatesByTemplateId', + path: '/rag/pipeline/customized/templates/{template_id}', + tags: ['console'], + }) + .input(z.object({ params: zPostRagPipelineCustomizedTemplatesByTemplateIdPath })) + .output(zPostRagPipelineCustomizedTemplatesByTemplateIdResponse) + +export const byTemplateId = { + delete: delete_, + patch, + post, +} + +export const templates = { + byTemplateId, +} + +export const customized = { + templates, +} + +export const post2 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postRagPipelineDataset', + path: '/rag/pipeline/dataset', + tags: ['console'], + }) + .input(z.object({ body: zPostRagPipelineDatasetBody })) + .output(zPostRagPipelineDatasetResponse) + +export const dataset = { + post: post2, +} + +export const post3 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postRagPipelineEmptyDataset', + path: '/rag/pipeline/empty-dataset', + tags: ['console'], + }) + .output(zPostRagPipelineEmptyDatasetResponse) + +export const emptyDataset = { + post: post3, +} + +export const get = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getRagPipelineTemplatesByTemplateId', + path: '/rag/pipeline/templates/{template_id}', + tags: ['console'], + }) + .input(z.object({ params: zGetRagPipelineTemplatesByTemplateIdPath })) + .output(zGetRagPipelineTemplatesByTemplateIdResponse) + +export const byTemplateId2 = { + get, +} + +export const get2 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getRagPipelineTemplates', + path: '/rag/pipeline/templates', + tags: ['console'], + }) + .output(zGetRagPipelineTemplatesResponse) + +export const templates2 = { + get: get2, + byTemplateId: byTemplateId2, +} + +export const pipeline = { + customized, + dataset, + emptyDataset, + templates: templates2, +} + +export const get3 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getRagPipelinesDatasourcePlugins', + path: '/rag/pipelines/datasource-plugins', + tags: ['console'], + }) + .output(zGetRagPipelinesDatasourcePluginsResponse) + +export const datasourcePlugins = { + get: get3, +} + +export const post4 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postRagPipelinesImportsByImportIdConfirm', + path: '/rag/pipelines/imports/{import_id}/confirm', + tags: ['console'], + }) + .input(z.object({ params: zPostRagPipelinesImportsByImportIdConfirmPath })) + .output(zPostRagPipelinesImportsByImportIdConfirmResponse) + +export const confirm = { + post: post4, +} + +export const byImportId = { + confirm, +} + +export const get4 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getRagPipelinesImportsByPipelineIdCheckDependencies', + path: '/rag/pipelines/imports/{pipeline_id}/check-dependencies', + tags: ['console'], + }) + .input(z.object({ params: zGetRagPipelinesImportsByPipelineIdCheckDependenciesPath })) + .output(zGetRagPipelinesImportsByPipelineIdCheckDependenciesResponse) + +export const checkDependencies = { + get: get4, +} + +export const byPipelineId = { + checkDependencies, +} + +export const post5 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postRagPipelinesImports', + path: '/rag/pipelines/imports', + tags: ['console'], + }) + .input(z.object({ body: zPostRagPipelinesImportsBody })) + .output(zPostRagPipelinesImportsResponse) + +export const imports = { + post: post5, + byImportId, + byPipelineId, +} + +export const get5 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getRagPipelinesRecommendedPlugins', + path: '/rag/pipelines/recommended-plugins', + tags: ['console'], + }) + .output(zGetRagPipelinesRecommendedPluginsResponse) + +export const recommendedPlugins = { + get: get5, +} + +export const post6 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postRagPipelinesTransformDatasetsByDatasetId', + path: '/rag/pipelines/transform/datasets/{dataset_id}', + tags: ['console'], + }) + .input(z.object({ params: zPostRagPipelinesTransformDatasetsByDatasetIdPath })) + .output(zPostRagPipelinesTransformDatasetsByDatasetIdResponse) + +export const byDatasetId = { + post: post6, +} + +export const datasets = { + byDatasetId, +} + +export const transform = { + datasets, +} + +export const post7 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postRagPipelinesByPipelineIdCustomizedPublish', + path: '/rag/pipelines/{pipeline_id}/customized/publish', + tags: ['console'], + }) + .input( + z.object({ + body: zPostRagPipelinesByPipelineIdCustomizedPublishBody, + params: zPostRagPipelinesByPipelineIdCustomizedPublishPath, + }), + ) + .output(zPostRagPipelinesByPipelineIdCustomizedPublishResponse) + +export const publish = { + post: post7, +} + +export const customized2 = { + publish, +} + +export const get6 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getRagPipelinesByPipelineIdExports', + path: '/rag/pipelines/{pipeline_id}/exports', + tags: ['console'], + }) + .input(z.object({ params: zGetRagPipelinesByPipelineIdExportsPath })) + .output(zGetRagPipelinesByPipelineIdExportsResponse) + +export const exports_ = { + get: get6, +} + +/** + * Stop workflow task + */ +export const post8 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postRagPipelinesByPipelineIdWorkflowRunsTasksByTaskIdStop', + path: '/rag/pipelines/{pipeline_id}/workflow-runs/tasks/{task_id}/stop', + summary: 'Stop workflow task', + tags: ['console'], + }) + .input(z.object({ params: zPostRagPipelinesByPipelineIdWorkflowRunsTasksByTaskIdStopPath })) + .output(zPostRagPipelinesByPipelineIdWorkflowRunsTasksByTaskIdStopResponse) + +export const stop = { + post: post8, +} + +export const byTaskId = { + stop, +} + +export const tasks = { + byTaskId, +} + +/** + * Get workflow run node execution list + */ +export const get7 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getRagPipelinesByPipelineIdWorkflowRunsByRunIdNodeExecutions', + path: '/rag/pipelines/{pipeline_id}/workflow-runs/{run_id}/node-executions', + summary: 'Get workflow run node execution list', + tags: ['console'], + }) + .input(z.object({ params: zGetRagPipelinesByPipelineIdWorkflowRunsByRunIdNodeExecutionsPath })) + .output(zGetRagPipelinesByPipelineIdWorkflowRunsByRunIdNodeExecutionsResponse) + +export const nodeExecutions = { + get: get7, +} + +/** + * Get workflow run detail + */ +export const get8 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getRagPipelinesByPipelineIdWorkflowRunsByRunId', + path: '/rag/pipelines/{pipeline_id}/workflow-runs/{run_id}', + summary: 'Get workflow run detail', + tags: ['console'], + }) + .input(z.object({ params: zGetRagPipelinesByPipelineIdWorkflowRunsByRunIdPath })) + .output(zGetRagPipelinesByPipelineIdWorkflowRunsByRunIdResponse) + +export const byRunId = { + get: get8, + nodeExecutions, +} + +/** + * Get workflow run list + */ +export const get9 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getRagPipelinesByPipelineIdWorkflowRuns', + path: '/rag/pipelines/{pipeline_id}/workflow-runs', + summary: 'Get workflow run list', + tags: ['console'], + }) + .input(z.object({ params: zGetRagPipelinesByPipelineIdWorkflowRunsPath })) + .output(zGetRagPipelinesByPipelineIdWorkflowRunsResponse) + +export const workflowRuns = { + get: get9, + tasks, + byRunId, +} + +/** + * Get default block config + */ +export const get10 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getRagPipelinesByPipelineIdWorkflowsDefaultWorkflowBlockConfigsByBlockType', + path: '/rag/pipelines/{pipeline_id}/workflows/default-workflow-block-configs/{block_type}', + summary: 'Get default block config', + tags: ['console'], + }) + .input( + z.object({ + params: zGetRagPipelinesByPipelineIdWorkflowsDefaultWorkflowBlockConfigsByBlockTypePath, + }), + ) + .output(zGetRagPipelinesByPipelineIdWorkflowsDefaultWorkflowBlockConfigsByBlockTypeResponse) + +export const byBlockType = { + get: get10, +} + +/** + * Get default block config + */ +export const get11 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getRagPipelinesByPipelineIdWorkflowsDefaultWorkflowBlockConfigs', + path: '/rag/pipelines/{pipeline_id}/workflows/default-workflow-block-configs', + summary: 'Get default block config', + tags: ['console'], + }) + .input(z.object({ params: zGetRagPipelinesByPipelineIdWorkflowsDefaultWorkflowBlockConfigsPath })) + .output(zGetRagPipelinesByPipelineIdWorkflowsDefaultWorkflowBlockConfigsResponse) + +export const defaultWorkflowBlockConfigs = { + get: get11, + byBlockType, +} + +/** + * Run rag pipeline datasource + */ +export const post9 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postRagPipelinesByPipelineIdWorkflowsDraftDatasourceNodesByNodeIdRun', + path: '/rag/pipelines/{pipeline_id}/workflows/draft/datasource/nodes/{node_id}/run', + summary: 'Run rag pipeline datasource', + tags: ['console'], + }) + .input( + z.object({ + body: zPostRagPipelinesByPipelineIdWorkflowsDraftDatasourceNodesByNodeIdRunBody, + params: zPostRagPipelinesByPipelineIdWorkflowsDraftDatasourceNodesByNodeIdRunPath, + }), + ) + .output(zPostRagPipelinesByPipelineIdWorkflowsDraftDatasourceNodesByNodeIdRunResponse) + +export const run = { + post: post9, +} + +export const byNodeId = { + run, +} + +export const nodes = { + byNodeId, +} + +/** + * Set datasource variables + */ +export const post10 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postRagPipelinesByPipelineIdWorkflowsDraftDatasourceVariablesInspect', + path: '/rag/pipelines/{pipeline_id}/workflows/draft/datasource/variables-inspect', + summary: 'Set datasource variables', + tags: ['console'], + }) + .input( + z.object({ + body: zPostRagPipelinesByPipelineIdWorkflowsDraftDatasourceVariablesInspectBody, + params: zPostRagPipelinesByPipelineIdWorkflowsDraftDatasourceVariablesInspectPath, + }), + ) + .output(zPostRagPipelinesByPipelineIdWorkflowsDraftDatasourceVariablesInspectResponse) + +export const variablesInspect = { + post: post10, +} + +export const datasource = { + nodes, + variablesInspect, +} + +export const get12 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getRagPipelinesByPipelineIdWorkflowsDraftEnvironmentVariables', + path: '/rag/pipelines/{pipeline_id}/workflows/draft/environment-variables', + tags: ['console'], + }) + .input(z.object({ params: zGetRagPipelinesByPipelineIdWorkflowsDraftEnvironmentVariablesPath })) + .output(zGetRagPipelinesByPipelineIdWorkflowsDraftEnvironmentVariablesResponse) + +export const environmentVariables = { + get: get12, +} + +/** + * Run draft workflow iteration node + */ +export const post11 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postRagPipelinesByPipelineIdWorkflowsDraftIterationNodesByNodeIdRun', + path: '/rag/pipelines/{pipeline_id}/workflows/draft/iteration/nodes/{node_id}/run', + summary: 'Run draft workflow iteration node', + tags: ['console'], + }) + .input( + z.object({ + body: zPostRagPipelinesByPipelineIdWorkflowsDraftIterationNodesByNodeIdRunBody, + params: zPostRagPipelinesByPipelineIdWorkflowsDraftIterationNodesByNodeIdRunPath, + }), + ) + .output(zPostRagPipelinesByPipelineIdWorkflowsDraftIterationNodesByNodeIdRunResponse) + +export const run2 = { + post: post11, +} + +export const byNodeId2 = { + run: run2, +} + +export const nodes2 = { + byNodeId: byNodeId2, +} + +export const iteration = { + nodes: nodes2, +} + +/** + * Run draft workflow loop node + */ +export const post12 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postRagPipelinesByPipelineIdWorkflowsDraftLoopNodesByNodeIdRun', + path: '/rag/pipelines/{pipeline_id}/workflows/draft/loop/nodes/{node_id}/run', + summary: 'Run draft workflow loop node', + tags: ['console'], + }) + .input( + z.object({ + body: zPostRagPipelinesByPipelineIdWorkflowsDraftLoopNodesByNodeIdRunBody, + params: zPostRagPipelinesByPipelineIdWorkflowsDraftLoopNodesByNodeIdRunPath, + }), + ) + .output(zPostRagPipelinesByPipelineIdWorkflowsDraftLoopNodesByNodeIdRunResponse) + +export const run3 = { + post: post12, +} + +export const byNodeId3 = { + run: run3, +} + +export const nodes3 = { + byNodeId: byNodeId3, +} + +export const loop = { + nodes: nodes3, +} + +export const get13 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdLastRun', + path: '/rag/pipelines/{pipeline_id}/workflows/draft/nodes/{node_id}/last-run', + tags: ['console'], + }) + .input(z.object({ params: zGetRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdLastRunPath })) + .output(zGetRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdLastRunResponse) + +export const lastRun = { + get: get13, +} + +/** + * Run draft workflow node + */ +export const post13 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdRun', + path: '/rag/pipelines/{pipeline_id}/workflows/draft/nodes/{node_id}/run', + summary: 'Run draft workflow node', + tags: ['console'], + }) + .input( + z.object({ + body: zPostRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdRunBody, + params: zPostRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdRunPath, + }), + ) + .output(zPostRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdRunResponse) + +export const run4 = { + post: post13, +} + +export const delete2 = oc + .route({ + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdVariables', + path: '/rag/pipelines/{pipeline_id}/workflows/draft/nodes/{node_id}/variables', + tags: ['console'], + }) + .input( + z.object({ params: zDeleteRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdVariablesPath }), + ) + .output(zDeleteRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdVariablesResponse) + +export const get14 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdVariables', + path: '/rag/pipelines/{pipeline_id}/workflows/draft/nodes/{node_id}/variables', + tags: ['console'], + }) + .input(z.object({ params: zGetRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdVariablesPath })) + .output(zGetRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdVariablesResponse) + +export const variables = { + delete: delete2, + get: get14, +} + +export const byNodeId4 = { + lastRun, + run: run4, + variables, +} + +export const nodes4 = { + byNodeId: byNodeId4, +} + +/** + * Get first step parameters of rag pipeline + */ +export const get15 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getRagPipelinesByPipelineIdWorkflowsDraftPreProcessingParameters', + path: '/rag/pipelines/{pipeline_id}/workflows/draft/pre-processing/parameters', + summary: 'Get first step parameters of rag pipeline', + tags: ['console'], + }) + .input( + z.object({ params: zGetRagPipelinesByPipelineIdWorkflowsDraftPreProcessingParametersPath }), + ) + .output(zGetRagPipelinesByPipelineIdWorkflowsDraftPreProcessingParametersResponse) + +export const parameters = { + get: get15, +} + +export const preProcessing = { + parameters, +} + +/** + * Get second step parameters of rag pipeline + */ +export const get16 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getRagPipelinesByPipelineIdWorkflowsDraftProcessingParameters', + path: '/rag/pipelines/{pipeline_id}/workflows/draft/processing/parameters', + summary: 'Get second step parameters of rag pipeline', + tags: ['console'], + }) + .input(z.object({ params: zGetRagPipelinesByPipelineIdWorkflowsDraftProcessingParametersPath })) + .output(zGetRagPipelinesByPipelineIdWorkflowsDraftProcessingParametersResponse) + +export const parameters2 = { + get: get16, +} + +export const processing = { + parameters: parameters2, +} + +/** + * Run draft workflow + */ +export const post14 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postRagPipelinesByPipelineIdWorkflowsDraftRun', + path: '/rag/pipelines/{pipeline_id}/workflows/draft/run', + summary: 'Run draft workflow', + tags: ['console'], + }) + .input( + z.object({ + body: zPostRagPipelinesByPipelineIdWorkflowsDraftRunBody, + params: zPostRagPipelinesByPipelineIdWorkflowsDraftRunPath, + }), + ) + .output(zPostRagPipelinesByPipelineIdWorkflowsDraftRunResponse) + +export const run5 = { + post: post14, +} + +export const get17 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getRagPipelinesByPipelineIdWorkflowsDraftSystemVariables', + path: '/rag/pipelines/{pipeline_id}/workflows/draft/system-variables', + tags: ['console'], + }) + .input(z.object({ params: zGetRagPipelinesByPipelineIdWorkflowsDraftSystemVariablesPath })) + .output(zGetRagPipelinesByPipelineIdWorkflowsDraftSystemVariablesResponse) + +export const systemVariables = { + get: get17, +} + +export const put = oc + .route({ + inputStructure: 'detailed', + method: 'PUT', + operationId: 'putRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdReset', + path: '/rag/pipelines/{pipeline_id}/workflows/draft/variables/{variable_id}/reset', + tags: ['console'], + }) + .input( + z.object({ params: zPutRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResetPath }), + ) + .output(zPutRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResetResponse) + +export const reset = { + put, +} + +export const delete3 = oc + .route({ + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableId', + path: '/rag/pipelines/{pipeline_id}/workflows/draft/variables/{variable_id}', + tags: ['console'], + }) + .input( + z.object({ params: zDeleteRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdPath }), + ) + .output(zDeleteRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResponse) + +export const get18 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableId', + path: '/rag/pipelines/{pipeline_id}/workflows/draft/variables/{variable_id}', + tags: ['console'], + }) + .input(z.object({ params: zGetRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdPath })) + .output(zGetRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResponse) + +export const patch2 = oc + .route({ + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableId', + path: '/rag/pipelines/{pipeline_id}/workflows/draft/variables/{variable_id}', + tags: ['console'], + }) + .input( + z.object({ params: zPatchRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdPath }), + ) + .output(zPatchRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResponse) + +export const byVariableId = { + delete: delete3, + get: get18, + patch: patch2, + reset, +} + +export const delete4 = oc + .route({ + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteRagPipelinesByPipelineIdWorkflowsDraftVariables', + path: '/rag/pipelines/{pipeline_id}/workflows/draft/variables', + tags: ['console'], + }) + .input(z.object({ params: zDeleteRagPipelinesByPipelineIdWorkflowsDraftVariablesPath })) + .output(zDeleteRagPipelinesByPipelineIdWorkflowsDraftVariablesResponse) + +export const get19 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getRagPipelinesByPipelineIdWorkflowsDraftVariables', + path: '/rag/pipelines/{pipeline_id}/workflows/draft/variables', + tags: ['console'], + }) + .input(z.object({ params: zGetRagPipelinesByPipelineIdWorkflowsDraftVariablesPath })) + .output(zGetRagPipelinesByPipelineIdWorkflowsDraftVariablesResponse) + +export const variables2 = { + delete: delete4, + get: get19, + byVariableId, +} + +/** + * Get draft rag pipeline's workflow + */ +export const get20 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getRagPipelinesByPipelineIdWorkflowsDraft', + path: '/rag/pipelines/{pipeline_id}/workflows/draft', + summary: 'Get draft rag pipeline\'s workflow', + tags: ['console'], + }) + .input(z.object({ params: zGetRagPipelinesByPipelineIdWorkflowsDraftPath })) + .output(zGetRagPipelinesByPipelineIdWorkflowsDraftResponse) + +/** + * Sync draft workflow + */ +export const post15 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postRagPipelinesByPipelineIdWorkflowsDraft', + path: '/rag/pipelines/{pipeline_id}/workflows/draft', + summary: 'Sync draft workflow', + tags: ['console'], + }) + .input(z.object({ params: zPostRagPipelinesByPipelineIdWorkflowsDraftPath })) + .output(zPostRagPipelinesByPipelineIdWorkflowsDraftResponse) + +export const draft = { + get: get20, + post: post15, + datasource, + environmentVariables, + iteration, + loop, + nodes: nodes4, + preProcessing, + processing, + run: run5, + systemVariables, + variables: variables2, +} + +/** + * Get published pipeline + */ +export const get21 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getRagPipelinesByPipelineIdWorkflowsPublish', + path: '/rag/pipelines/{pipeline_id}/workflows/publish', + summary: 'Get published pipeline', + tags: ['console'], + }) + .input(z.object({ params: zGetRagPipelinesByPipelineIdWorkflowsPublishPath })) + .output(zGetRagPipelinesByPipelineIdWorkflowsPublishResponse) + +/** + * Publish workflow + */ +export const post16 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postRagPipelinesByPipelineIdWorkflowsPublish', + path: '/rag/pipelines/{pipeline_id}/workflows/publish', + summary: 'Publish workflow', + tags: ['console'], + }) + .input(z.object({ params: zPostRagPipelinesByPipelineIdWorkflowsPublishPath })) + .output(zPostRagPipelinesByPipelineIdWorkflowsPublishResponse) + +export const publish2 = { + get: get21, + post: post16, +} + +/** + * Run datasource content preview + */ +export const post17 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postRagPipelinesByPipelineIdWorkflowsPublishedDatasourceNodesByNodeIdPreview', + path: '/rag/pipelines/{pipeline_id}/workflows/published/datasource/nodes/{node_id}/preview', + summary: 'Run datasource content preview', + tags: ['console'], + }) + .input( + z.object({ + body: zPostRagPipelinesByPipelineIdWorkflowsPublishedDatasourceNodesByNodeIdPreviewBody, + params: zPostRagPipelinesByPipelineIdWorkflowsPublishedDatasourceNodesByNodeIdPreviewPath, + }), + ) + .output(zPostRagPipelinesByPipelineIdWorkflowsPublishedDatasourceNodesByNodeIdPreviewResponse) + +export const preview = { + post: post17, +} + +/** + * Run rag pipeline datasource + */ +export const post18 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postRagPipelinesByPipelineIdWorkflowsPublishedDatasourceNodesByNodeIdRun', + path: '/rag/pipelines/{pipeline_id}/workflows/published/datasource/nodes/{node_id}/run', + summary: 'Run rag pipeline datasource', + tags: ['console'], + }) + .input( + z.object({ + body: zPostRagPipelinesByPipelineIdWorkflowsPublishedDatasourceNodesByNodeIdRunBody, + params: zPostRagPipelinesByPipelineIdWorkflowsPublishedDatasourceNodesByNodeIdRunPath, + }), + ) + .output(zPostRagPipelinesByPipelineIdWorkflowsPublishedDatasourceNodesByNodeIdRunResponse) + +export const run6 = { + post: post18, +} + +export const byNodeId5 = { + preview, + run: run6, +} + +export const nodes5 = { + byNodeId: byNodeId5, +} + +export const datasource2 = { + nodes: nodes5, +} + +/** + * Get first step parameters of rag pipeline + */ +export const get22 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getRagPipelinesByPipelineIdWorkflowsPublishedPreProcessingParameters', + path: '/rag/pipelines/{pipeline_id}/workflows/published/pre-processing/parameters', + summary: 'Get first step parameters of rag pipeline', + tags: ['console'], + }) + .input( + z.object({ params: zGetRagPipelinesByPipelineIdWorkflowsPublishedPreProcessingParametersPath }), + ) + .output(zGetRagPipelinesByPipelineIdWorkflowsPublishedPreProcessingParametersResponse) + +export const parameters3 = { + get: get22, +} + +export const preProcessing2 = { + parameters: parameters3, +} + +/** + * Get second step parameters of rag pipeline + */ +export const get23 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getRagPipelinesByPipelineIdWorkflowsPublishedProcessingParameters', + path: '/rag/pipelines/{pipeline_id}/workflows/published/processing/parameters', + summary: 'Get second step parameters of rag pipeline', + tags: ['console'], + }) + .input( + z.object({ params: zGetRagPipelinesByPipelineIdWorkflowsPublishedProcessingParametersPath }), + ) + .output(zGetRagPipelinesByPipelineIdWorkflowsPublishedProcessingParametersResponse) + +export const parameters4 = { + get: get23, +} + +export const processing2 = { + parameters: parameters4, +} + +/** + * Run published workflow + */ +export const post19 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postRagPipelinesByPipelineIdWorkflowsPublishedRun', + path: '/rag/pipelines/{pipeline_id}/workflows/published/run', + summary: 'Run published workflow', + tags: ['console'], + }) + .input( + z.object({ + body: zPostRagPipelinesByPipelineIdWorkflowsPublishedRunBody, + params: zPostRagPipelinesByPipelineIdWorkflowsPublishedRunPath, + }), + ) + .output(zPostRagPipelinesByPipelineIdWorkflowsPublishedRunResponse) + +export const run7 = { + post: post19, +} + +export const published = { + datasource: datasource2, + preProcessing: preProcessing2, + processing: processing2, + run: run7, +} + +export const post20 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postRagPipelinesByPipelineIdWorkflowsByWorkflowIdRestore', + path: '/rag/pipelines/{pipeline_id}/workflows/{workflow_id}/restore', + tags: ['console'], + }) + .input(z.object({ params: zPostRagPipelinesByPipelineIdWorkflowsByWorkflowIdRestorePath })) + .output(zPostRagPipelinesByPipelineIdWorkflowsByWorkflowIdRestoreResponse) + +export const restore = { + post: post20, +} + +/** + * Delete a published workflow version that is not currently active on the pipeline + */ +export const delete5 = oc + .route({ + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteRagPipelinesByPipelineIdWorkflowsByWorkflowId', + path: '/rag/pipelines/{pipeline_id}/workflows/{workflow_id}', + summary: 'Delete a published workflow version that is not currently active on the pipeline', + tags: ['console'], + }) + .input(z.object({ params: zDeleteRagPipelinesByPipelineIdWorkflowsByWorkflowIdPath })) + .output(zDeleteRagPipelinesByPipelineIdWorkflowsByWorkflowIdResponse) + +/** + * Update workflow attributes + */ +export const patch3 = oc + .route({ + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchRagPipelinesByPipelineIdWorkflowsByWorkflowId', + path: '/rag/pipelines/{pipeline_id}/workflows/{workflow_id}', + summary: 'Update workflow attributes', + tags: ['console'], + }) + .input(z.object({ params: zPatchRagPipelinesByPipelineIdWorkflowsByWorkflowIdPath })) + .output(zPatchRagPipelinesByPipelineIdWorkflowsByWorkflowIdResponse) + +export const byWorkflowId = { + delete: delete5, + patch: patch3, + restore, +} + +/** + * Get published workflows + */ +export const get24 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getRagPipelinesByPipelineIdWorkflows', + path: '/rag/pipelines/{pipeline_id}/workflows', + summary: 'Get published workflows', + tags: ['console'], + }) + .input(z.object({ params: zGetRagPipelinesByPipelineIdWorkflowsPath })) + .output(zGetRagPipelinesByPipelineIdWorkflowsResponse) + +export const workflows = { + get: get24, + defaultWorkflowBlockConfigs, + draft, + publish: publish2, + published, + byWorkflowId, +} + +export const byPipelineId2 = { + customized: customized2, + exports: exports_, + workflowRuns, + workflows, +} + +export const pipelines = { + datasourcePlugins, + imports, + recommendedPlugins, + transform, + byPipelineId: byPipelineId2, +} + +export const rag = { + pipeline, + pipelines, +} + +export const contract = { + rag, +} diff --git a/packages/contracts/generated/api/console/rag/types.gen.ts b/packages/contracts/generated/api/console/rag/types.gen.ts new file mode 100644 index 0000000000..e300ff443c --- /dev/null +++ b/packages/contracts/generated/api/console/rag/types.gen.ts @@ -0,0 +1,1054 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type RagPipelineDatasetImportPayload = { + yaml_content: string +} + +export type RagPipelineImportPayload = { + description?: string | null + icon?: string | null + icon_background?: string | null + icon_type?: string | null + mode: string + name?: string | null + pipeline_id?: string | null + yaml_content?: string | null + yaml_url?: string | null +} + +export type Payload = { + description?: string + icon_info?: { + [key: string]: unknown + } | null + name: string +} + +export type DatasourceNodeRunPayload = { + credential_id?: string | null + datasource_type: string + inputs: { + [key: string]: unknown + } +} + +export type DatasourceVariablesPayload = { + datasource_info: { + [key: string]: unknown + } + datasource_type: string + start_node_id: string + start_node_title: string +} + +export type NodeRunPayload = { + inputs?: { + [key: string]: unknown + } | null +} + +export type NodeRunRequiredPayload = { + inputs: { + [key: string]: unknown + } +} + +export type DraftWorkflowRunPayload = { + datasource_info_list: Array<{ + [key: string]: unknown + }> + datasource_type: string + inputs: { + [key: string]: unknown + } + start_node_id: string +} + +export type Parser = { + credential_id?: string | null + datasource_type: string + inputs: { + [key: string]: unknown + } +} + +export type PublishedWorkflowRunPayload = { + datasource_info_list: Array<{ + [key: string]: unknown + }> + datasource_type: string + inputs: { + [key: string]: unknown + } + is_preview?: boolean + original_document_id?: string | null + response_mode?: 'streaming' | 'blocking' + start_node_id: string +} + +export type DeleteRagPipelineCustomizedTemplatesByTemplateIdData = { + body?: never + path: { + template_id: string + } + query?: never + url: '/rag/pipeline/customized/templates/{template_id}' +} + +export type DeleteRagPipelineCustomizedTemplatesByTemplateIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type DeleteRagPipelineCustomizedTemplatesByTemplateIdResponse + = DeleteRagPipelineCustomizedTemplatesByTemplateIdResponses[keyof DeleteRagPipelineCustomizedTemplatesByTemplateIdResponses] + +export type PatchRagPipelineCustomizedTemplatesByTemplateIdData = { + body?: never + path: { + template_id: string + } + query?: never + url: '/rag/pipeline/customized/templates/{template_id}' +} + +export type PatchRagPipelineCustomizedTemplatesByTemplateIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type PatchRagPipelineCustomizedTemplatesByTemplateIdResponse + = PatchRagPipelineCustomizedTemplatesByTemplateIdResponses[keyof PatchRagPipelineCustomizedTemplatesByTemplateIdResponses] + +export type PostRagPipelineCustomizedTemplatesByTemplateIdData = { + body?: never + path: { + template_id: string + } + query?: never + url: '/rag/pipeline/customized/templates/{template_id}' +} + +export type PostRagPipelineCustomizedTemplatesByTemplateIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostRagPipelineCustomizedTemplatesByTemplateIdResponse + = PostRagPipelineCustomizedTemplatesByTemplateIdResponses[keyof PostRagPipelineCustomizedTemplatesByTemplateIdResponses] + +export type PostRagPipelineDatasetData = { + body: RagPipelineDatasetImportPayload + path?: never + query?: never + url: '/rag/pipeline/dataset' +} + +export type PostRagPipelineDatasetResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostRagPipelineDatasetResponse + = PostRagPipelineDatasetResponses[keyof PostRagPipelineDatasetResponses] + +export type PostRagPipelineEmptyDatasetData = { + body?: never + path?: never + query?: never + url: '/rag/pipeline/empty-dataset' +} + +export type PostRagPipelineEmptyDatasetResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostRagPipelineEmptyDatasetResponse + = PostRagPipelineEmptyDatasetResponses[keyof PostRagPipelineEmptyDatasetResponses] + +export type GetRagPipelineTemplatesData = { + body?: never + path?: never + query?: never + url: '/rag/pipeline/templates' +} + +export type GetRagPipelineTemplatesResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetRagPipelineTemplatesResponse + = GetRagPipelineTemplatesResponses[keyof GetRagPipelineTemplatesResponses] + +export type GetRagPipelineTemplatesByTemplateIdData = { + body?: never + path: { + template_id: string + } + query?: never + url: '/rag/pipeline/templates/{template_id}' +} + +export type GetRagPipelineTemplatesByTemplateIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetRagPipelineTemplatesByTemplateIdResponse + = GetRagPipelineTemplatesByTemplateIdResponses[keyof GetRagPipelineTemplatesByTemplateIdResponses] + +export type GetRagPipelinesDatasourcePluginsData = { + body?: never + path?: never + query?: never + url: '/rag/pipelines/datasource-plugins' +} + +export type GetRagPipelinesDatasourcePluginsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetRagPipelinesDatasourcePluginsResponse + = GetRagPipelinesDatasourcePluginsResponses[keyof GetRagPipelinesDatasourcePluginsResponses] + +export type PostRagPipelinesImportsData = { + body: RagPipelineImportPayload + path?: never + query?: never + url: '/rag/pipelines/imports' +} + +export type PostRagPipelinesImportsResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostRagPipelinesImportsResponse + = PostRagPipelinesImportsResponses[keyof PostRagPipelinesImportsResponses] + +export type PostRagPipelinesImportsByImportIdConfirmData = { + body?: never + path: { + import_id: string + } + query?: never + url: '/rag/pipelines/imports/{import_id}/confirm' +} + +export type PostRagPipelinesImportsByImportIdConfirmResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostRagPipelinesImportsByImportIdConfirmResponse + = PostRagPipelinesImportsByImportIdConfirmResponses[keyof PostRagPipelinesImportsByImportIdConfirmResponses] + +export type GetRagPipelinesImportsByPipelineIdCheckDependenciesData = { + body?: never + path: { + pipeline_id: string + } + query?: never + url: '/rag/pipelines/imports/{pipeline_id}/check-dependencies' +} + +export type GetRagPipelinesImportsByPipelineIdCheckDependenciesResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetRagPipelinesImportsByPipelineIdCheckDependenciesResponse + = GetRagPipelinesImportsByPipelineIdCheckDependenciesResponses[keyof GetRagPipelinesImportsByPipelineIdCheckDependenciesResponses] + +export type GetRagPipelinesRecommendedPluginsData = { + body?: never + path?: never + query?: never + url: '/rag/pipelines/recommended-plugins' +} + +export type GetRagPipelinesRecommendedPluginsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetRagPipelinesRecommendedPluginsResponse + = GetRagPipelinesRecommendedPluginsResponses[keyof GetRagPipelinesRecommendedPluginsResponses] + +export type PostRagPipelinesTransformDatasetsByDatasetIdData = { + body?: never + path: { + dataset_id: string + } + query?: never + url: '/rag/pipelines/transform/datasets/{dataset_id}' +} + +export type PostRagPipelinesTransformDatasetsByDatasetIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostRagPipelinesTransformDatasetsByDatasetIdResponse + = PostRagPipelinesTransformDatasetsByDatasetIdResponses[keyof PostRagPipelinesTransformDatasetsByDatasetIdResponses] + +export type PostRagPipelinesByPipelineIdCustomizedPublishData = { + body: Payload + path: { + pipeline_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/customized/publish' +} + +export type PostRagPipelinesByPipelineIdCustomizedPublishResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostRagPipelinesByPipelineIdCustomizedPublishResponse + = PostRagPipelinesByPipelineIdCustomizedPublishResponses[keyof PostRagPipelinesByPipelineIdCustomizedPublishResponses] + +export type GetRagPipelinesByPipelineIdExportsData = { + body?: never + path: { + pipeline_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/exports' +} + +export type GetRagPipelinesByPipelineIdExportsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetRagPipelinesByPipelineIdExportsResponse + = GetRagPipelinesByPipelineIdExportsResponses[keyof GetRagPipelinesByPipelineIdExportsResponses] + +export type GetRagPipelinesByPipelineIdWorkflowRunsData = { + body?: never + path: { + pipeline_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflow-runs' +} + +export type GetRagPipelinesByPipelineIdWorkflowRunsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetRagPipelinesByPipelineIdWorkflowRunsResponse + = GetRagPipelinesByPipelineIdWorkflowRunsResponses[keyof GetRagPipelinesByPipelineIdWorkflowRunsResponses] + +export type PostRagPipelinesByPipelineIdWorkflowRunsTasksByTaskIdStopData = { + body?: never + path: { + pipeline_id: string + task_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflow-runs/tasks/{task_id}/stop' +} + +export type PostRagPipelinesByPipelineIdWorkflowRunsTasksByTaskIdStopResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostRagPipelinesByPipelineIdWorkflowRunsTasksByTaskIdStopResponse + = PostRagPipelinesByPipelineIdWorkflowRunsTasksByTaskIdStopResponses[keyof PostRagPipelinesByPipelineIdWorkflowRunsTasksByTaskIdStopResponses] + +export type GetRagPipelinesByPipelineIdWorkflowRunsByRunIdData = { + body?: never + path: { + pipeline_id: string + run_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflow-runs/{run_id}' +} + +export type GetRagPipelinesByPipelineIdWorkflowRunsByRunIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetRagPipelinesByPipelineIdWorkflowRunsByRunIdResponse + = GetRagPipelinesByPipelineIdWorkflowRunsByRunIdResponses[keyof GetRagPipelinesByPipelineIdWorkflowRunsByRunIdResponses] + +export type GetRagPipelinesByPipelineIdWorkflowRunsByRunIdNodeExecutionsData = { + body?: never + path: { + pipeline_id: string + run_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflow-runs/{run_id}/node-executions' +} + +export type GetRagPipelinesByPipelineIdWorkflowRunsByRunIdNodeExecutionsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetRagPipelinesByPipelineIdWorkflowRunsByRunIdNodeExecutionsResponse + = GetRagPipelinesByPipelineIdWorkflowRunsByRunIdNodeExecutionsResponses[keyof GetRagPipelinesByPipelineIdWorkflowRunsByRunIdNodeExecutionsResponses] + +export type GetRagPipelinesByPipelineIdWorkflowsData = { + body?: never + path: { + pipeline_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows' +} + +export type GetRagPipelinesByPipelineIdWorkflowsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetRagPipelinesByPipelineIdWorkflowsResponse + = GetRagPipelinesByPipelineIdWorkflowsResponses[keyof GetRagPipelinesByPipelineIdWorkflowsResponses] + +export type GetRagPipelinesByPipelineIdWorkflowsDefaultWorkflowBlockConfigsData = { + body?: never + path: { + pipeline_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/default-workflow-block-configs' +} + +export type GetRagPipelinesByPipelineIdWorkflowsDefaultWorkflowBlockConfigsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetRagPipelinesByPipelineIdWorkflowsDefaultWorkflowBlockConfigsResponse + = GetRagPipelinesByPipelineIdWorkflowsDefaultWorkflowBlockConfigsResponses[keyof GetRagPipelinesByPipelineIdWorkflowsDefaultWorkflowBlockConfigsResponses] + +export type GetRagPipelinesByPipelineIdWorkflowsDefaultWorkflowBlockConfigsByBlockTypeData = { + body?: never + path: { + pipeline_id: string + block_type: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/default-workflow-block-configs/{block_type}' +} + +export type GetRagPipelinesByPipelineIdWorkflowsDefaultWorkflowBlockConfigsByBlockTypeResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetRagPipelinesByPipelineIdWorkflowsDefaultWorkflowBlockConfigsByBlockTypeResponse + = GetRagPipelinesByPipelineIdWorkflowsDefaultWorkflowBlockConfigsByBlockTypeResponses[keyof GetRagPipelinesByPipelineIdWorkflowsDefaultWorkflowBlockConfigsByBlockTypeResponses] + +export type GetRagPipelinesByPipelineIdWorkflowsDraftData = { + body?: never + path: { + pipeline_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/draft' +} + +export type GetRagPipelinesByPipelineIdWorkflowsDraftResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetRagPipelinesByPipelineIdWorkflowsDraftResponse + = GetRagPipelinesByPipelineIdWorkflowsDraftResponses[keyof GetRagPipelinesByPipelineIdWorkflowsDraftResponses] + +export type PostRagPipelinesByPipelineIdWorkflowsDraftData = { + body?: never + path: { + pipeline_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/draft' +} + +export type PostRagPipelinesByPipelineIdWorkflowsDraftResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostRagPipelinesByPipelineIdWorkflowsDraftResponse + = PostRagPipelinesByPipelineIdWorkflowsDraftResponses[keyof PostRagPipelinesByPipelineIdWorkflowsDraftResponses] + +export type PostRagPipelinesByPipelineIdWorkflowsDraftDatasourceNodesByNodeIdRunData = { + body: DatasourceNodeRunPayload + path: { + pipeline_id: string + node_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/draft/datasource/nodes/{node_id}/run' +} + +export type PostRagPipelinesByPipelineIdWorkflowsDraftDatasourceNodesByNodeIdRunResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostRagPipelinesByPipelineIdWorkflowsDraftDatasourceNodesByNodeIdRunResponse + = PostRagPipelinesByPipelineIdWorkflowsDraftDatasourceNodesByNodeIdRunResponses[keyof PostRagPipelinesByPipelineIdWorkflowsDraftDatasourceNodesByNodeIdRunResponses] + +export type PostRagPipelinesByPipelineIdWorkflowsDraftDatasourceVariablesInspectData = { + body: DatasourceVariablesPayload + path: { + pipeline_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/draft/datasource/variables-inspect' +} + +export type PostRagPipelinesByPipelineIdWorkflowsDraftDatasourceVariablesInspectResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostRagPipelinesByPipelineIdWorkflowsDraftDatasourceVariablesInspectResponse + = PostRagPipelinesByPipelineIdWorkflowsDraftDatasourceVariablesInspectResponses[keyof PostRagPipelinesByPipelineIdWorkflowsDraftDatasourceVariablesInspectResponses] + +export type GetRagPipelinesByPipelineIdWorkflowsDraftEnvironmentVariablesData = { + body?: never + path: { + pipeline_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/draft/environment-variables' +} + +export type GetRagPipelinesByPipelineIdWorkflowsDraftEnvironmentVariablesResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetRagPipelinesByPipelineIdWorkflowsDraftEnvironmentVariablesResponse + = GetRagPipelinesByPipelineIdWorkflowsDraftEnvironmentVariablesResponses[keyof GetRagPipelinesByPipelineIdWorkflowsDraftEnvironmentVariablesResponses] + +export type PostRagPipelinesByPipelineIdWorkflowsDraftIterationNodesByNodeIdRunData = { + body: NodeRunPayload + path: { + pipeline_id: string + node_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/draft/iteration/nodes/{node_id}/run' +} + +export type PostRagPipelinesByPipelineIdWorkflowsDraftIterationNodesByNodeIdRunResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostRagPipelinesByPipelineIdWorkflowsDraftIterationNodesByNodeIdRunResponse + = PostRagPipelinesByPipelineIdWorkflowsDraftIterationNodesByNodeIdRunResponses[keyof PostRagPipelinesByPipelineIdWorkflowsDraftIterationNodesByNodeIdRunResponses] + +export type PostRagPipelinesByPipelineIdWorkflowsDraftLoopNodesByNodeIdRunData = { + body: NodeRunPayload + path: { + pipeline_id: string + node_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/draft/loop/nodes/{node_id}/run' +} + +export type PostRagPipelinesByPipelineIdWorkflowsDraftLoopNodesByNodeIdRunResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostRagPipelinesByPipelineIdWorkflowsDraftLoopNodesByNodeIdRunResponse + = PostRagPipelinesByPipelineIdWorkflowsDraftLoopNodesByNodeIdRunResponses[keyof PostRagPipelinesByPipelineIdWorkflowsDraftLoopNodesByNodeIdRunResponses] + +export type GetRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdLastRunData = { + body?: never + path: { + pipeline_id: string + node_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/draft/nodes/{node_id}/last-run' +} + +export type GetRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdLastRunResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdLastRunResponse + = GetRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdLastRunResponses[keyof GetRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdLastRunResponses] + +export type PostRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdRunData = { + body: NodeRunRequiredPayload + path: { + pipeline_id: string + node_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/draft/nodes/{node_id}/run' +} + +export type PostRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdRunResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdRunResponse + = PostRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdRunResponses[keyof PostRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdRunResponses] + +export type DeleteRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdVariablesData = { + body?: never + path: { + pipeline_id: string + node_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/draft/nodes/{node_id}/variables' +} + +export type DeleteRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdVariablesResponses = { + 200: { + [key: string]: unknown + } +} + +export type DeleteRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdVariablesResponse + = DeleteRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdVariablesResponses[keyof DeleteRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdVariablesResponses] + +export type GetRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdVariablesData = { + body?: never + path: { + pipeline_id: string + node_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/draft/nodes/{node_id}/variables' +} + +export type GetRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdVariablesResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdVariablesResponse + = GetRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdVariablesResponses[keyof GetRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdVariablesResponses] + +export type GetRagPipelinesByPipelineIdWorkflowsDraftPreProcessingParametersData = { + body?: never + path: { + pipeline_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/draft/pre-processing/parameters' +} + +export type GetRagPipelinesByPipelineIdWorkflowsDraftPreProcessingParametersResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetRagPipelinesByPipelineIdWorkflowsDraftPreProcessingParametersResponse + = GetRagPipelinesByPipelineIdWorkflowsDraftPreProcessingParametersResponses[keyof GetRagPipelinesByPipelineIdWorkflowsDraftPreProcessingParametersResponses] + +export type GetRagPipelinesByPipelineIdWorkflowsDraftProcessingParametersData = { + body?: never + path: { + pipeline_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/draft/processing/parameters' +} + +export type GetRagPipelinesByPipelineIdWorkflowsDraftProcessingParametersResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetRagPipelinesByPipelineIdWorkflowsDraftProcessingParametersResponse + = GetRagPipelinesByPipelineIdWorkflowsDraftProcessingParametersResponses[keyof GetRagPipelinesByPipelineIdWorkflowsDraftProcessingParametersResponses] + +export type PostRagPipelinesByPipelineIdWorkflowsDraftRunData = { + body: DraftWorkflowRunPayload + path: { + pipeline_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/draft/run' +} + +export type PostRagPipelinesByPipelineIdWorkflowsDraftRunResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostRagPipelinesByPipelineIdWorkflowsDraftRunResponse + = PostRagPipelinesByPipelineIdWorkflowsDraftRunResponses[keyof PostRagPipelinesByPipelineIdWorkflowsDraftRunResponses] + +export type GetRagPipelinesByPipelineIdWorkflowsDraftSystemVariablesData = { + body?: never + path: { + pipeline_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/draft/system-variables' +} + +export type GetRagPipelinesByPipelineIdWorkflowsDraftSystemVariablesResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetRagPipelinesByPipelineIdWorkflowsDraftSystemVariablesResponse + = GetRagPipelinesByPipelineIdWorkflowsDraftSystemVariablesResponses[keyof GetRagPipelinesByPipelineIdWorkflowsDraftSystemVariablesResponses] + +export type DeleteRagPipelinesByPipelineIdWorkflowsDraftVariablesData = { + body?: never + path: { + pipeline_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/draft/variables' +} + +export type DeleteRagPipelinesByPipelineIdWorkflowsDraftVariablesResponses = { + 200: { + [key: string]: unknown + } +} + +export type DeleteRagPipelinesByPipelineIdWorkflowsDraftVariablesResponse + = DeleteRagPipelinesByPipelineIdWorkflowsDraftVariablesResponses[keyof DeleteRagPipelinesByPipelineIdWorkflowsDraftVariablesResponses] + +export type GetRagPipelinesByPipelineIdWorkflowsDraftVariablesData = { + body?: never + path: { + pipeline_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/draft/variables' +} + +export type GetRagPipelinesByPipelineIdWorkflowsDraftVariablesResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetRagPipelinesByPipelineIdWorkflowsDraftVariablesResponse + = GetRagPipelinesByPipelineIdWorkflowsDraftVariablesResponses[keyof GetRagPipelinesByPipelineIdWorkflowsDraftVariablesResponses] + +export type DeleteRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdData = { + body?: never + path: { + pipeline_id: string + variable_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/draft/variables/{variable_id}' +} + +export type DeleteRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type DeleteRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResponse + = DeleteRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResponses[keyof DeleteRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResponses] + +export type GetRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdData = { + body?: never + path: { + pipeline_id: string + variable_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/draft/variables/{variable_id}' +} + +export type GetRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResponse + = GetRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResponses[keyof GetRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResponses] + +export type PatchRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdData = { + body?: never + path: { + pipeline_id: string + variable_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/draft/variables/{variable_id}' +} + +export type PatchRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type PatchRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResponse + = PatchRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResponses[keyof PatchRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResponses] + +export type PutRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResetData = { + body?: never + path: { + pipeline_id: string + variable_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/draft/variables/{variable_id}/reset' +} + +export type PutRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResetResponses = { + 200: { + [key: string]: unknown + } +} + +export type PutRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResetResponse + = PutRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResetResponses[keyof PutRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResetResponses] + +export type GetRagPipelinesByPipelineIdWorkflowsPublishData = { + body?: never + path: { + pipeline_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/publish' +} + +export type GetRagPipelinesByPipelineIdWorkflowsPublishResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetRagPipelinesByPipelineIdWorkflowsPublishResponse + = GetRagPipelinesByPipelineIdWorkflowsPublishResponses[keyof GetRagPipelinesByPipelineIdWorkflowsPublishResponses] + +export type PostRagPipelinesByPipelineIdWorkflowsPublishData = { + body?: never + path: { + pipeline_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/publish' +} + +export type PostRagPipelinesByPipelineIdWorkflowsPublishResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostRagPipelinesByPipelineIdWorkflowsPublishResponse + = PostRagPipelinesByPipelineIdWorkflowsPublishResponses[keyof PostRagPipelinesByPipelineIdWorkflowsPublishResponses] + +export type PostRagPipelinesByPipelineIdWorkflowsPublishedDatasourceNodesByNodeIdPreviewData = { + body: Parser + path: { + pipeline_id: string + node_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/published/datasource/nodes/{node_id}/preview' +} + +export type PostRagPipelinesByPipelineIdWorkflowsPublishedDatasourceNodesByNodeIdPreviewResponses + = { + 200: { + [key: string]: unknown + } + } + +export type PostRagPipelinesByPipelineIdWorkflowsPublishedDatasourceNodesByNodeIdPreviewResponse + = PostRagPipelinesByPipelineIdWorkflowsPublishedDatasourceNodesByNodeIdPreviewResponses[keyof PostRagPipelinesByPipelineIdWorkflowsPublishedDatasourceNodesByNodeIdPreviewResponses] + +export type PostRagPipelinesByPipelineIdWorkflowsPublishedDatasourceNodesByNodeIdRunData = { + body: DatasourceNodeRunPayload + path: { + pipeline_id: string + node_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/published/datasource/nodes/{node_id}/run' +} + +export type PostRagPipelinesByPipelineIdWorkflowsPublishedDatasourceNodesByNodeIdRunResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostRagPipelinesByPipelineIdWorkflowsPublishedDatasourceNodesByNodeIdRunResponse + = PostRagPipelinesByPipelineIdWorkflowsPublishedDatasourceNodesByNodeIdRunResponses[keyof PostRagPipelinesByPipelineIdWorkflowsPublishedDatasourceNodesByNodeIdRunResponses] + +export type GetRagPipelinesByPipelineIdWorkflowsPublishedPreProcessingParametersData = { + body?: never + path: { + pipeline_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/published/pre-processing/parameters' +} + +export type GetRagPipelinesByPipelineIdWorkflowsPublishedPreProcessingParametersResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetRagPipelinesByPipelineIdWorkflowsPublishedPreProcessingParametersResponse + = GetRagPipelinesByPipelineIdWorkflowsPublishedPreProcessingParametersResponses[keyof GetRagPipelinesByPipelineIdWorkflowsPublishedPreProcessingParametersResponses] + +export type GetRagPipelinesByPipelineIdWorkflowsPublishedProcessingParametersData = { + body?: never + path: { + pipeline_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/published/processing/parameters' +} + +export type GetRagPipelinesByPipelineIdWorkflowsPublishedProcessingParametersResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetRagPipelinesByPipelineIdWorkflowsPublishedProcessingParametersResponse + = GetRagPipelinesByPipelineIdWorkflowsPublishedProcessingParametersResponses[keyof GetRagPipelinesByPipelineIdWorkflowsPublishedProcessingParametersResponses] + +export type PostRagPipelinesByPipelineIdWorkflowsPublishedRunData = { + body: PublishedWorkflowRunPayload + path: { + pipeline_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/published/run' +} + +export type PostRagPipelinesByPipelineIdWorkflowsPublishedRunResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostRagPipelinesByPipelineIdWorkflowsPublishedRunResponse + = PostRagPipelinesByPipelineIdWorkflowsPublishedRunResponses[keyof PostRagPipelinesByPipelineIdWorkflowsPublishedRunResponses] + +export type DeleteRagPipelinesByPipelineIdWorkflowsByWorkflowIdData = { + body?: never + path: { + pipeline_id: string + workflow_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/{workflow_id}' +} + +export type DeleteRagPipelinesByPipelineIdWorkflowsByWorkflowIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type DeleteRagPipelinesByPipelineIdWorkflowsByWorkflowIdResponse + = DeleteRagPipelinesByPipelineIdWorkflowsByWorkflowIdResponses[keyof DeleteRagPipelinesByPipelineIdWorkflowsByWorkflowIdResponses] + +export type PatchRagPipelinesByPipelineIdWorkflowsByWorkflowIdData = { + body?: never + path: { + pipeline_id: string + workflow_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/{workflow_id}' +} + +export type PatchRagPipelinesByPipelineIdWorkflowsByWorkflowIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type PatchRagPipelinesByPipelineIdWorkflowsByWorkflowIdResponse + = PatchRagPipelinesByPipelineIdWorkflowsByWorkflowIdResponses[keyof PatchRagPipelinesByPipelineIdWorkflowsByWorkflowIdResponses] + +export type PostRagPipelinesByPipelineIdWorkflowsByWorkflowIdRestoreData = { + body?: never + path: { + pipeline_id: string + workflow_id: string + } + query?: never + url: '/rag/pipelines/{pipeline_id}/workflows/{workflow_id}/restore' +} + +export type PostRagPipelinesByPipelineIdWorkflowsByWorkflowIdRestoreResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostRagPipelinesByPipelineIdWorkflowsByWorkflowIdRestoreResponse + = PostRagPipelinesByPipelineIdWorkflowsByWorkflowIdRestoreResponses[keyof PostRagPipelinesByPipelineIdWorkflowsByWorkflowIdRestoreResponses] diff --git a/packages/contracts/generated/api/console/rag/zod.gen.ts b/packages/contracts/generated/api/console/rag/zod.gen.ts new file mode 100644 index 0000000000..b28d7fafc7 --- /dev/null +++ b/packages/contracts/generated/api/console/rag/zod.gen.ts @@ -0,0 +1,709 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * RagPipelineDatasetImportPayload + */ +export const zRagPipelineDatasetImportPayload = z.object({ + yaml_content: z.string(), +}) + +/** + * RagPipelineImportPayload + */ +export const zRagPipelineImportPayload = z.object({ + description: z.string().nullish(), + icon: z.string().nullish(), + icon_background: z.string().nullish(), + icon_type: z.string().nullish(), + mode: z.string(), + name: z.string().nullish(), + pipeline_id: z.string().nullish(), + yaml_content: z.string().nullish(), + yaml_url: z.string().nullish(), +}) + +/** + * Payload + */ +export const zPayload = z.object({ + description: z.string().max(400).optional().default(''), + icon_info: z.record(z.string(), z.unknown()).nullish(), + name: z.string().min(1).max(40), +}) + +/** + * DatasourceNodeRunPayload + */ +export const zDatasourceNodeRunPayload = z.object({ + credential_id: z.string().nullish(), + datasource_type: z.string(), + inputs: z.record(z.string(), z.unknown()), +}) + +/** + * DatasourceVariablesPayload + */ +export const zDatasourceVariablesPayload = z.object({ + datasource_info: z.record(z.string(), z.unknown()), + datasource_type: z.string(), + start_node_id: z.string(), + start_node_title: z.string(), +}) + +/** + * NodeRunPayload + */ +export const zNodeRunPayload = z.object({ + inputs: z.record(z.string(), z.unknown()).nullish(), +}) + +/** + * NodeRunRequiredPayload + */ +export const zNodeRunRequiredPayload = z.object({ + inputs: z.record(z.string(), z.unknown()), +}) + +/** + * DraftWorkflowRunPayload + */ +export const zDraftWorkflowRunPayload = z.object({ + datasource_info_list: z.array(z.record(z.string(), z.unknown())), + datasource_type: z.string(), + inputs: z.record(z.string(), z.unknown()), + start_node_id: z.string(), +}) + +/** + * Parser + */ +export const zParser = z.object({ + credential_id: z.string().nullish(), + datasource_type: z.string(), + inputs: z.record(z.string(), z.unknown()), +}) + +/** + * PublishedWorkflowRunPayload + */ +export const zPublishedWorkflowRunPayload = z.object({ + datasource_info_list: z.array(z.record(z.string(), z.unknown())), + datasource_type: z.string(), + inputs: z.record(z.string(), z.unknown()), + is_preview: z.boolean().optional().default(false), + original_document_id: z.string().nullish(), + response_mode: z.enum(['streaming', 'blocking']).optional().default('streaming'), + start_node_id: z.string(), +}) + +export const zDeleteRagPipelineCustomizedTemplatesByTemplateIdPath = z.object({ + template_id: z.string(), +}) + +/** + * Success + */ +export const zDeleteRagPipelineCustomizedTemplatesByTemplateIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPatchRagPipelineCustomizedTemplatesByTemplateIdPath = z.object({ + template_id: z.string(), +}) + +/** + * Success + */ +export const zPatchRagPipelineCustomizedTemplatesByTemplateIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostRagPipelineCustomizedTemplatesByTemplateIdPath = z.object({ + template_id: z.string(), +}) + +/** + * Success + */ +export const zPostRagPipelineCustomizedTemplatesByTemplateIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostRagPipelineDatasetBody = zRagPipelineDatasetImportPayload + +/** + * Success + */ +export const zPostRagPipelineDatasetResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zPostRagPipelineEmptyDatasetResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zGetRagPipelineTemplatesResponse = z.record(z.string(), z.unknown()) + +export const zGetRagPipelineTemplatesByTemplateIdPath = z.object({ + template_id: z.string(), +}) + +/** + * Success + */ +export const zGetRagPipelineTemplatesByTemplateIdResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zGetRagPipelinesDatasourcePluginsResponse = z.record(z.string(), z.unknown()) + +export const zPostRagPipelinesImportsBody = zRagPipelineImportPayload + +/** + * Success + */ +export const zPostRagPipelinesImportsResponse = z.record(z.string(), z.unknown()) + +export const zPostRagPipelinesImportsByImportIdConfirmPath = z.object({ + import_id: z.string(), +}) + +/** + * Success + */ +export const zPostRagPipelinesImportsByImportIdConfirmResponse = z.record(z.string(), z.unknown()) + +export const zGetRagPipelinesImportsByPipelineIdCheckDependenciesPath = z.object({ + pipeline_id: z.string(), +}) + +/** + * Success + */ +export const zGetRagPipelinesImportsByPipelineIdCheckDependenciesResponse = z.record( + z.string(), + z.unknown(), +) + +/** + * Success + */ +export const zGetRagPipelinesRecommendedPluginsResponse = z.record(z.string(), z.unknown()) + +export const zPostRagPipelinesTransformDatasetsByDatasetIdPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Success + */ +export const zPostRagPipelinesTransformDatasetsByDatasetIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostRagPipelinesByPipelineIdCustomizedPublishBody = zPayload + +export const zPostRagPipelinesByPipelineIdCustomizedPublishPath = z.object({ + pipeline_id: z.string(), +}) + +/** + * Success + */ +export const zPostRagPipelinesByPipelineIdCustomizedPublishResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetRagPipelinesByPipelineIdExportsPath = z.object({ + pipeline_id: z.string(), +}) + +/** + * Success + */ +export const zGetRagPipelinesByPipelineIdExportsResponse = z.record(z.string(), z.unknown()) + +export const zGetRagPipelinesByPipelineIdWorkflowRunsPath = z.object({ + pipeline_id: z.string(), +}) + +/** + * Success + */ +export const zGetRagPipelinesByPipelineIdWorkflowRunsResponse = z.record(z.string(), z.unknown()) + +export const zPostRagPipelinesByPipelineIdWorkflowRunsTasksByTaskIdStopPath = z.object({ + pipeline_id: z.string(), + task_id: z.string(), +}) + +/** + * Success + */ +export const zPostRagPipelinesByPipelineIdWorkflowRunsTasksByTaskIdStopResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetRagPipelinesByPipelineIdWorkflowRunsByRunIdPath = z.object({ + pipeline_id: z.string(), + run_id: z.string(), +}) + +/** + * Success + */ +export const zGetRagPipelinesByPipelineIdWorkflowRunsByRunIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetRagPipelinesByPipelineIdWorkflowRunsByRunIdNodeExecutionsPath = z.object({ + pipeline_id: z.string(), + run_id: z.string(), +}) + +/** + * Success + */ +export const zGetRagPipelinesByPipelineIdWorkflowRunsByRunIdNodeExecutionsResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetRagPipelinesByPipelineIdWorkflowsPath = z.object({ + pipeline_id: z.string(), +}) + +/** + * Success + */ +export const zGetRagPipelinesByPipelineIdWorkflowsResponse = z.record(z.string(), z.unknown()) + +export const zGetRagPipelinesByPipelineIdWorkflowsDefaultWorkflowBlockConfigsPath = z.object({ + pipeline_id: z.string(), +}) + +/** + * Success + */ +export const zGetRagPipelinesByPipelineIdWorkflowsDefaultWorkflowBlockConfigsResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetRagPipelinesByPipelineIdWorkflowsDefaultWorkflowBlockConfigsByBlockTypePath + = z.object({ + pipeline_id: z.string(), + block_type: z.string(), + }) + +/** + * Success + */ +export const zGetRagPipelinesByPipelineIdWorkflowsDefaultWorkflowBlockConfigsByBlockTypeResponse + = z.record(z.string(), z.unknown()) + +export const zGetRagPipelinesByPipelineIdWorkflowsDraftPath = z.object({ + pipeline_id: z.string(), +}) + +/** + * Success + */ +export const zGetRagPipelinesByPipelineIdWorkflowsDraftResponse = z.record(z.string(), z.unknown()) + +export const zPostRagPipelinesByPipelineIdWorkflowsDraftPath = z.object({ + pipeline_id: z.string(), +}) + +/** + * Success + */ +export const zPostRagPipelinesByPipelineIdWorkflowsDraftResponse = z.record(z.string(), z.unknown()) + +export const zPostRagPipelinesByPipelineIdWorkflowsDraftDatasourceNodesByNodeIdRunBody + = zDatasourceNodeRunPayload + +export const zPostRagPipelinesByPipelineIdWorkflowsDraftDatasourceNodesByNodeIdRunPath = z.object({ + pipeline_id: z.string(), + node_id: z.string(), +}) + +/** + * Success + */ +export const zPostRagPipelinesByPipelineIdWorkflowsDraftDatasourceNodesByNodeIdRunResponse + = z.record(z.string(), z.unknown()) + +export const zPostRagPipelinesByPipelineIdWorkflowsDraftDatasourceVariablesInspectBody + = zDatasourceVariablesPayload + +export const zPostRagPipelinesByPipelineIdWorkflowsDraftDatasourceVariablesInspectPath = z.object({ + pipeline_id: z.string(), +}) + +/** + * Success + */ +export const zPostRagPipelinesByPipelineIdWorkflowsDraftDatasourceVariablesInspectResponse + = z.record(z.string(), z.unknown()) + +export const zGetRagPipelinesByPipelineIdWorkflowsDraftEnvironmentVariablesPath = z.object({ + pipeline_id: z.string(), +}) + +/** + * Success + */ +export const zGetRagPipelinesByPipelineIdWorkflowsDraftEnvironmentVariablesResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostRagPipelinesByPipelineIdWorkflowsDraftIterationNodesByNodeIdRunBody + = zNodeRunPayload + +export const zPostRagPipelinesByPipelineIdWorkflowsDraftIterationNodesByNodeIdRunPath = z.object({ + pipeline_id: z.string(), + node_id: z.string(), +}) + +/** + * Success + */ +export const zPostRagPipelinesByPipelineIdWorkflowsDraftIterationNodesByNodeIdRunResponse + = z.record(z.string(), z.unknown()) + +export const zPostRagPipelinesByPipelineIdWorkflowsDraftLoopNodesByNodeIdRunBody = zNodeRunPayload + +export const zPostRagPipelinesByPipelineIdWorkflowsDraftLoopNodesByNodeIdRunPath = z.object({ + pipeline_id: z.string(), + node_id: z.string(), +}) + +/** + * Success + */ +export const zPostRagPipelinesByPipelineIdWorkflowsDraftLoopNodesByNodeIdRunResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdLastRunPath = z.object({ + pipeline_id: z.string(), + node_id: z.string(), +}) + +/** + * Success + */ +export const zGetRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdLastRunResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdRunBody + = zNodeRunRequiredPayload + +export const zPostRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdRunPath = z.object({ + pipeline_id: z.string(), + node_id: z.string(), +}) + +/** + * Success + */ +export const zPostRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdRunResponse = z.record( + z.string(), + z.unknown(), +) + +export const zDeleteRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdVariablesPath = z.object({ + pipeline_id: z.string(), + node_id: z.string(), +}) + +/** + * Success + */ +export const zDeleteRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdVariablesResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdVariablesPath = z.object({ + pipeline_id: z.string(), + node_id: z.string(), +}) + +/** + * Success + */ +export const zGetRagPipelinesByPipelineIdWorkflowsDraftNodesByNodeIdVariablesResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetRagPipelinesByPipelineIdWorkflowsDraftPreProcessingParametersPath = z.object({ + pipeline_id: z.string(), +}) + +/** + * Success + */ +export const zGetRagPipelinesByPipelineIdWorkflowsDraftPreProcessingParametersResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetRagPipelinesByPipelineIdWorkflowsDraftProcessingParametersPath = z.object({ + pipeline_id: z.string(), +}) + +/** + * Success + */ +export const zGetRagPipelinesByPipelineIdWorkflowsDraftProcessingParametersResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostRagPipelinesByPipelineIdWorkflowsDraftRunBody = zDraftWorkflowRunPayload + +export const zPostRagPipelinesByPipelineIdWorkflowsDraftRunPath = z.object({ + pipeline_id: z.string(), +}) + +/** + * Success + */ +export const zPostRagPipelinesByPipelineIdWorkflowsDraftRunResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetRagPipelinesByPipelineIdWorkflowsDraftSystemVariablesPath = z.object({ + pipeline_id: z.string(), +}) + +/** + * Success + */ +export const zGetRagPipelinesByPipelineIdWorkflowsDraftSystemVariablesResponse = z.record( + z.string(), + z.unknown(), +) + +export const zDeleteRagPipelinesByPipelineIdWorkflowsDraftVariablesPath = z.object({ + pipeline_id: z.string(), +}) + +/** + * Success + */ +export const zDeleteRagPipelinesByPipelineIdWorkflowsDraftVariablesResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetRagPipelinesByPipelineIdWorkflowsDraftVariablesPath = z.object({ + pipeline_id: z.string(), +}) + +/** + * Success + */ +export const zGetRagPipelinesByPipelineIdWorkflowsDraftVariablesResponse = z.record( + z.string(), + z.unknown(), +) + +export const zDeleteRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdPath = z.object({ + pipeline_id: z.string(), + variable_id: z.string(), +}) + +/** + * Success + */ +export const zDeleteRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdPath = z.object({ + pipeline_id: z.string(), + variable_id: z.string(), +}) + +/** + * Success + */ +export const zGetRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPatchRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdPath = z.object({ + pipeline_id: z.string(), + variable_id: z.string(), +}) + +/** + * Success + */ +export const zPatchRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPutRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResetPath = z.object({ + pipeline_id: z.string(), + variable_id: z.string(), +}) + +/** + * Success + */ +export const zPutRagPipelinesByPipelineIdWorkflowsDraftVariablesByVariableIdResetResponse + = z.record(z.string(), z.unknown()) + +export const zGetRagPipelinesByPipelineIdWorkflowsPublishPath = z.object({ + pipeline_id: z.string(), +}) + +/** + * Success + */ +export const zGetRagPipelinesByPipelineIdWorkflowsPublishResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostRagPipelinesByPipelineIdWorkflowsPublishPath = z.object({ + pipeline_id: z.string(), +}) + +/** + * Success + */ +export const zPostRagPipelinesByPipelineIdWorkflowsPublishResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostRagPipelinesByPipelineIdWorkflowsPublishedDatasourceNodesByNodeIdPreviewBody + = zParser + +export const zPostRagPipelinesByPipelineIdWorkflowsPublishedDatasourceNodesByNodeIdPreviewPath + = z.object({ + pipeline_id: z.string(), + node_id: z.string(), + }) + +/** + * Success + */ +export const zPostRagPipelinesByPipelineIdWorkflowsPublishedDatasourceNodesByNodeIdPreviewResponse + = z.record(z.string(), z.unknown()) + +export const zPostRagPipelinesByPipelineIdWorkflowsPublishedDatasourceNodesByNodeIdRunBody + = zDatasourceNodeRunPayload + +export const zPostRagPipelinesByPipelineIdWorkflowsPublishedDatasourceNodesByNodeIdRunPath + = z.object({ + pipeline_id: z.string(), + node_id: z.string(), + }) + +/** + * Success + */ +export const zPostRagPipelinesByPipelineIdWorkflowsPublishedDatasourceNodesByNodeIdRunResponse + = z.record(z.string(), z.unknown()) + +export const zGetRagPipelinesByPipelineIdWorkflowsPublishedPreProcessingParametersPath = z.object({ + pipeline_id: z.string(), +}) + +/** + * Success + */ +export const zGetRagPipelinesByPipelineIdWorkflowsPublishedPreProcessingParametersResponse + = z.record(z.string(), z.unknown()) + +export const zGetRagPipelinesByPipelineIdWorkflowsPublishedProcessingParametersPath = z.object({ + pipeline_id: z.string(), +}) + +/** + * Success + */ +export const zGetRagPipelinesByPipelineIdWorkflowsPublishedProcessingParametersResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostRagPipelinesByPipelineIdWorkflowsPublishedRunBody = zPublishedWorkflowRunPayload + +export const zPostRagPipelinesByPipelineIdWorkflowsPublishedRunPath = z.object({ + pipeline_id: z.string(), +}) + +/** + * Success + */ +export const zPostRagPipelinesByPipelineIdWorkflowsPublishedRunResponse = z.record( + z.string(), + z.unknown(), +) + +export const zDeleteRagPipelinesByPipelineIdWorkflowsByWorkflowIdPath = z.object({ + pipeline_id: z.string(), + workflow_id: z.string(), +}) + +/** + * Success + */ +export const zDeleteRagPipelinesByPipelineIdWorkflowsByWorkflowIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPatchRagPipelinesByPipelineIdWorkflowsByWorkflowIdPath = z.object({ + pipeline_id: z.string(), + workflow_id: z.string(), +}) + +/** + * Success + */ +export const zPatchRagPipelinesByPipelineIdWorkflowsByWorkflowIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostRagPipelinesByPipelineIdWorkflowsByWorkflowIdRestorePath = z.object({ + pipeline_id: z.string(), + workflow_id: z.string(), +}) + +/** + * Success + */ +export const zPostRagPipelinesByPipelineIdWorkflowsByWorkflowIdRestoreResponse = z.record( + z.string(), + z.unknown(), +) diff --git a/packages/contracts/generated/api/console/refresh-token/orpc.gen.ts b/packages/contracts/generated/api/console/refresh-token/orpc.gen.ts new file mode 100644 index 0000000000..4faa4d7d23 --- /dev/null +++ b/packages/contracts/generated/api/console/refresh-token/orpc.gen.ts @@ -0,0 +1,23 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' + +import { zPostRefreshTokenResponse } from './zod.gen' + +export const post = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postRefreshToken', + path: '/refresh-token', + tags: ['console'], + }) + .output(zPostRefreshTokenResponse) + +export const refreshToken = { + post, +} + +export const contract = { + refreshToken, +} diff --git a/packages/contracts/generated/api/console/refresh-token/types.gen.ts b/packages/contracts/generated/api/console/refresh-token/types.gen.ts new file mode 100644 index 0000000000..15c939b947 --- /dev/null +++ b/packages/contracts/generated/api/console/refresh-token/types.gen.ts @@ -0,0 +1,20 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type PostRefreshTokenData = { + body?: never + path?: never + query?: never + url: '/refresh-token' +} + +export type PostRefreshTokenResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostRefreshTokenResponse = PostRefreshTokenResponses[keyof PostRefreshTokenResponses] diff --git a/packages/contracts/generated/api/console/refresh-token/zod.gen.ts b/packages/contracts/generated/api/console/refresh-token/zod.gen.ts new file mode 100644 index 0000000000..d76067552c --- /dev/null +++ b/packages/contracts/generated/api/console/refresh-token/zod.gen.ts @@ -0,0 +1,8 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * Success + */ +export const zPostRefreshTokenResponse = z.record(z.string(), z.unknown()) diff --git a/packages/contracts/generated/api/console/remote-files/orpc.gen.ts b/packages/contracts/generated/api/console/remote-files/orpc.gen.ts new file mode 100644 index 0000000000..977af4a09c --- /dev/null +++ b/packages/contracts/generated/api/console/remote-files/orpc.gen.ts @@ -0,0 +1,48 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { + zGetRemoteFilesByUrlPath, + zGetRemoteFilesByUrlResponse, + zPostRemoteFilesUploadResponse, +} from './zod.gen' + +export const post = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postRemoteFilesUpload', + path: '/remote-files/upload', + tags: ['console'], + }) + .output(zPostRemoteFilesUploadResponse) + +export const upload = { + post, +} + +export const get = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getRemoteFilesByUrl', + path: '/remote-files/{url}', + tags: ['console'], + }) + .input(z.object({ params: zGetRemoteFilesByUrlPath })) + .output(zGetRemoteFilesByUrlResponse) + +export const byUrl = { + get, +} + +export const remoteFiles = { + upload, + byUrl, +} + +export const contract = { + remoteFiles, +} diff --git a/packages/contracts/generated/api/console/remote-files/types.gen.ts b/packages/contracts/generated/api/console/remote-files/types.gen.ts new file mode 100644 index 0000000000..ea61592a76 --- /dev/null +++ b/packages/contracts/generated/api/console/remote-files/types.gen.ts @@ -0,0 +1,39 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type PostRemoteFilesUploadData = { + body?: never + path?: never + query?: never + url: '/remote-files/upload' +} + +export type PostRemoteFilesUploadResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostRemoteFilesUploadResponse + = PostRemoteFilesUploadResponses[keyof PostRemoteFilesUploadResponses] + +export type GetRemoteFilesByUrlData = { + body?: never + path: { + url: string + } + query?: never + url: '/remote-files/{url}' +} + +export type GetRemoteFilesByUrlResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetRemoteFilesByUrlResponse + = GetRemoteFilesByUrlResponses[keyof GetRemoteFilesByUrlResponses] diff --git a/packages/contracts/generated/api/console/remote-files/zod.gen.ts b/packages/contracts/generated/api/console/remote-files/zod.gen.ts new file mode 100644 index 0000000000..cee96cf65f --- /dev/null +++ b/packages/contracts/generated/api/console/remote-files/zod.gen.ts @@ -0,0 +1,17 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * Success + */ +export const zPostRemoteFilesUploadResponse = z.record(z.string(), z.unknown()) + +export const zGetRemoteFilesByUrlPath = z.object({ + url: z.string(), +}) + +/** + * Success + */ +export const zGetRemoteFilesByUrlResponse = z.record(z.string(), z.unknown()) diff --git a/packages/contracts/generated/api/console/reset-password/orpc.gen.ts b/packages/contracts/generated/api/console/reset-password/orpc.gen.ts new file mode 100644 index 0000000000..93701280db --- /dev/null +++ b/packages/contracts/generated/api/console/reset-password/orpc.gen.ts @@ -0,0 +1,25 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { zPostResetPasswordBody, zPostResetPasswordResponse } from './zod.gen' + +export const post = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postResetPassword', + path: '/reset-password', + tags: ['console'], + }) + .input(z.object({ body: zPostResetPasswordBody })) + .output(zPostResetPasswordResponse) + +export const resetPassword = { + post, +} + +export const contract = { + resetPassword, +} diff --git a/packages/contracts/generated/api/console/reset-password/types.gen.ts b/packages/contracts/generated/api/console/reset-password/types.gen.ts new file mode 100644 index 0000000000..6c2467aab4 --- /dev/null +++ b/packages/contracts/generated/api/console/reset-password/types.gen.ts @@ -0,0 +1,25 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type EmailPayload = { + email: string + language?: string | null +} + +export type PostResetPasswordData = { + body: EmailPayload + path?: never + query?: never + url: '/reset-password' +} + +export type PostResetPasswordResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostResetPasswordResponse = PostResetPasswordResponses[keyof PostResetPasswordResponses] diff --git a/packages/contracts/generated/api/console/reset-password/zod.gen.ts b/packages/contracts/generated/api/console/reset-password/zod.gen.ts new file mode 100644 index 0000000000..055ed9e127 --- /dev/null +++ b/packages/contracts/generated/api/console/reset-password/zod.gen.ts @@ -0,0 +1,18 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * EmailPayload + */ +export const zEmailPayload = z.object({ + email: z.string(), + language: z.string().nullish(), +}) + +export const zPostResetPasswordBody = zEmailPayload + +/** + * Success + */ +export const zPostResetPasswordResponse = z.record(z.string(), z.unknown()) diff --git a/packages/contracts/generated/api/console/rule-code-generate/orpc.gen.ts b/packages/contracts/generated/api/console/rule-code-generate/orpc.gen.ts new file mode 100644 index 0000000000..1c5252525c --- /dev/null +++ b/packages/contracts/generated/api/console/rule-code-generate/orpc.gen.ts @@ -0,0 +1,29 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { zPostRuleCodeGenerateBody, zPostRuleCodeGenerateResponse } from './zod.gen' + +/** + * Generate code rules using LLM + */ +export const post = oc + .route({ + description: 'Generate code rules using LLM', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postRuleCodeGenerate', + path: '/rule-code-generate', + tags: ['console'], + }) + .input(z.object({ body: zPostRuleCodeGenerateBody })) + .output(zPostRuleCodeGenerateResponse) + +export const ruleCodeGenerate = { + post, +} + +export const contract = { + ruleCodeGenerate, +} diff --git a/packages/contracts/generated/api/console/rule-code-generate/types.gen.ts b/packages/contracts/generated/api/console/rule-code-generate/types.gen.ts new file mode 100644 index 0000000000..dc1b045285 --- /dev/null +++ b/packages/contracts/generated/api/console/rule-code-generate/types.gen.ts @@ -0,0 +1,68 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type RuleCodeGeneratePayload = { + code_language?: string + instruction: string + model_config: ModelConfig + no_variable?: boolean +} + +export type ModelConfig = { + agent_mode_dict?: JsonValue + annotation_reply_dict?: JsonValue + chat_prompt_config_dict?: JsonValue + completion_prompt_config_dict?: JsonValue + created_at?: number | null + created_by?: string | null + dataset_configs_dict?: JsonValue + dataset_query_variable?: string | null + external_data_tools_list?: JsonValue + file_upload_dict?: JsonValue + model_dict?: JsonValue + more_like_this_dict?: JsonValue + opening_statement?: string | null + pre_prompt?: string | null + prompt_type?: string | null + retriever_resource_dict?: JsonValue + sensitive_word_avoidance_dict?: JsonValue + speech_to_text_dict?: JsonValue + suggested_questions_after_answer_dict?: JsonValue + suggested_questions_list?: JsonValue + text_to_speech_dict?: JsonValue + updated_at?: number | null + updated_by?: string | null + user_input_form_list?: JsonValue +} + +export type JsonValue = unknown + +export type PostRuleCodeGenerateData = { + body: RuleCodeGeneratePayload + path?: never + query?: never + url: '/rule-code-generate' +} + +export type PostRuleCodeGenerateErrors = { + 400: { + [key: string]: unknown + } + 402: { + [key: string]: unknown + } +} + +export type PostRuleCodeGenerateError = PostRuleCodeGenerateErrors[keyof PostRuleCodeGenerateErrors] + +export type PostRuleCodeGenerateResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostRuleCodeGenerateResponse + = PostRuleCodeGenerateResponses[keyof PostRuleCodeGenerateResponses] diff --git a/packages/contracts/generated/api/console/rule-code-generate/zod.gen.ts b/packages/contracts/generated/api/console/rule-code-generate/zod.gen.ts new file mode 100644 index 0000000000..40b840dc8a --- /dev/null +++ b/packages/contracts/generated/api/console/rule-code-generate/zod.gen.ts @@ -0,0 +1,52 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +export const zJsonValue = z.unknown() + +/** + * ModelConfig + */ +export const zModelConfig = z.object({ + agent_mode_dict: zJsonValue.optional(), + annotation_reply_dict: zJsonValue.optional(), + chat_prompt_config_dict: zJsonValue.optional(), + completion_prompt_config_dict: zJsonValue.optional(), + created_at: z.int().nullish(), + created_by: z.string().nullish(), + dataset_configs_dict: zJsonValue.optional(), + dataset_query_variable: z.string().nullish(), + external_data_tools_list: zJsonValue.optional(), + file_upload_dict: zJsonValue.optional(), + model_dict: zJsonValue.optional(), + more_like_this_dict: zJsonValue.optional(), + opening_statement: z.string().nullish(), + pre_prompt: z.string().nullish(), + prompt_type: z.string().nullish(), + retriever_resource_dict: zJsonValue.optional(), + sensitive_word_avoidance_dict: zJsonValue.optional(), + speech_to_text_dict: zJsonValue.optional(), + suggested_questions_after_answer_dict: zJsonValue.optional(), + suggested_questions_list: zJsonValue.optional(), + text_to_speech_dict: zJsonValue.optional(), + updated_at: z.int().nullish(), + updated_by: z.string().nullish(), + user_input_form_list: zJsonValue.optional(), +}) + +/** + * RuleCodeGeneratePayload + */ +export const zRuleCodeGeneratePayload = z.object({ + code_language: z.string().optional().default('javascript'), + instruction: z.string(), + model_config: zModelConfig, + no_variable: z.boolean().optional().default(false), +}) + +export const zPostRuleCodeGenerateBody = zRuleCodeGeneratePayload + +/** + * Code rules generated successfully + */ +export const zPostRuleCodeGenerateResponse = z.record(z.string(), z.unknown()) diff --git a/packages/contracts/generated/api/console/rule-generate/orpc.gen.ts b/packages/contracts/generated/api/console/rule-generate/orpc.gen.ts new file mode 100644 index 0000000000..7bd233de2b --- /dev/null +++ b/packages/contracts/generated/api/console/rule-generate/orpc.gen.ts @@ -0,0 +1,29 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { zPostRuleGenerateBody, zPostRuleGenerateResponse } from './zod.gen' + +/** + * Generate rule configuration using LLM + */ +export const post = oc + .route({ + description: 'Generate rule configuration using LLM', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postRuleGenerate', + path: '/rule-generate', + tags: ['console'], + }) + .input(z.object({ body: zPostRuleGenerateBody })) + .output(zPostRuleGenerateResponse) + +export const ruleGenerate = { + post, +} + +export const contract = { + ruleGenerate, +} diff --git a/packages/contracts/generated/api/console/rule-generate/types.gen.ts b/packages/contracts/generated/api/console/rule-generate/types.gen.ts new file mode 100644 index 0000000000..265ca5013d --- /dev/null +++ b/packages/contracts/generated/api/console/rule-generate/types.gen.ts @@ -0,0 +1,66 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type RuleGeneratePayload = { + instruction: string + model_config: ModelConfig + no_variable?: boolean +} + +export type ModelConfig = { + agent_mode_dict?: JsonValue + annotation_reply_dict?: JsonValue + chat_prompt_config_dict?: JsonValue + completion_prompt_config_dict?: JsonValue + created_at?: number | null + created_by?: string | null + dataset_configs_dict?: JsonValue + dataset_query_variable?: string | null + external_data_tools_list?: JsonValue + file_upload_dict?: JsonValue + model_dict?: JsonValue + more_like_this_dict?: JsonValue + opening_statement?: string | null + pre_prompt?: string | null + prompt_type?: string | null + retriever_resource_dict?: JsonValue + sensitive_word_avoidance_dict?: JsonValue + speech_to_text_dict?: JsonValue + suggested_questions_after_answer_dict?: JsonValue + suggested_questions_list?: JsonValue + text_to_speech_dict?: JsonValue + updated_at?: number | null + updated_by?: string | null + user_input_form_list?: JsonValue +} + +export type JsonValue = unknown + +export type PostRuleGenerateData = { + body: RuleGeneratePayload + path?: never + query?: never + url: '/rule-generate' +} + +export type PostRuleGenerateErrors = { + 400: { + [key: string]: unknown + } + 402: { + [key: string]: unknown + } +} + +export type PostRuleGenerateError = PostRuleGenerateErrors[keyof PostRuleGenerateErrors] + +export type PostRuleGenerateResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostRuleGenerateResponse = PostRuleGenerateResponses[keyof PostRuleGenerateResponses] diff --git a/packages/contracts/generated/api/console/rule-generate/zod.gen.ts b/packages/contracts/generated/api/console/rule-generate/zod.gen.ts new file mode 100644 index 0000000000..7a346a58fc --- /dev/null +++ b/packages/contracts/generated/api/console/rule-generate/zod.gen.ts @@ -0,0 +1,51 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +export const zJsonValue = z.unknown() + +/** + * ModelConfig + */ +export const zModelConfig = z.object({ + agent_mode_dict: zJsonValue.optional(), + annotation_reply_dict: zJsonValue.optional(), + chat_prompt_config_dict: zJsonValue.optional(), + completion_prompt_config_dict: zJsonValue.optional(), + created_at: z.int().nullish(), + created_by: z.string().nullish(), + dataset_configs_dict: zJsonValue.optional(), + dataset_query_variable: z.string().nullish(), + external_data_tools_list: zJsonValue.optional(), + file_upload_dict: zJsonValue.optional(), + model_dict: zJsonValue.optional(), + more_like_this_dict: zJsonValue.optional(), + opening_statement: z.string().nullish(), + pre_prompt: z.string().nullish(), + prompt_type: z.string().nullish(), + retriever_resource_dict: zJsonValue.optional(), + sensitive_word_avoidance_dict: zJsonValue.optional(), + speech_to_text_dict: zJsonValue.optional(), + suggested_questions_after_answer_dict: zJsonValue.optional(), + suggested_questions_list: zJsonValue.optional(), + text_to_speech_dict: zJsonValue.optional(), + updated_at: z.int().nullish(), + updated_by: z.string().nullish(), + user_input_form_list: zJsonValue.optional(), +}) + +/** + * RuleGeneratePayload + */ +export const zRuleGeneratePayload = z.object({ + instruction: z.string(), + model_config: zModelConfig, + no_variable: z.boolean().optional().default(false), +}) + +export const zPostRuleGenerateBody = zRuleGeneratePayload + +/** + * Rule configuration generated successfully + */ +export const zPostRuleGenerateResponse = z.record(z.string(), z.unknown()) diff --git a/packages/contracts/generated/api/console/rule-structured-output-generate/orpc.gen.ts b/packages/contracts/generated/api/console/rule-structured-output-generate/orpc.gen.ts new file mode 100644 index 0000000000..276442f1c9 --- /dev/null +++ b/packages/contracts/generated/api/console/rule-structured-output-generate/orpc.gen.ts @@ -0,0 +1,32 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { + zPostRuleStructuredOutputGenerateBody, + zPostRuleStructuredOutputGenerateResponse, +} from './zod.gen' + +/** + * Generate structured output rules using LLM + */ +export const post = oc + .route({ + description: 'Generate structured output rules using LLM', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postRuleStructuredOutputGenerate', + path: '/rule-structured-output-generate', + tags: ['console'], + }) + .input(z.object({ body: zPostRuleStructuredOutputGenerateBody })) + .output(zPostRuleStructuredOutputGenerateResponse) + +export const ruleStructuredOutputGenerate = { + post, +} + +export const contract = { + ruleStructuredOutputGenerate, +} diff --git a/packages/contracts/generated/api/console/rule-structured-output-generate/types.gen.ts b/packages/contracts/generated/api/console/rule-structured-output-generate/types.gen.ts new file mode 100644 index 0000000000..f6124c6956 --- /dev/null +++ b/packages/contracts/generated/api/console/rule-structured-output-generate/types.gen.ts @@ -0,0 +1,67 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type RuleStructuredOutputPayload = { + instruction: string + model_config: ModelConfig +} + +export type ModelConfig = { + agent_mode_dict?: JsonValue + annotation_reply_dict?: JsonValue + chat_prompt_config_dict?: JsonValue + completion_prompt_config_dict?: JsonValue + created_at?: number | null + created_by?: string | null + dataset_configs_dict?: JsonValue + dataset_query_variable?: string | null + external_data_tools_list?: JsonValue + file_upload_dict?: JsonValue + model_dict?: JsonValue + more_like_this_dict?: JsonValue + opening_statement?: string | null + pre_prompt?: string | null + prompt_type?: string | null + retriever_resource_dict?: JsonValue + sensitive_word_avoidance_dict?: JsonValue + speech_to_text_dict?: JsonValue + suggested_questions_after_answer_dict?: JsonValue + suggested_questions_list?: JsonValue + text_to_speech_dict?: JsonValue + updated_at?: number | null + updated_by?: string | null + user_input_form_list?: JsonValue +} + +export type JsonValue = unknown + +export type PostRuleStructuredOutputGenerateData = { + body: RuleStructuredOutputPayload + path?: never + query?: never + url: '/rule-structured-output-generate' +} + +export type PostRuleStructuredOutputGenerateErrors = { + 400: { + [key: string]: unknown + } + 402: { + [key: string]: unknown + } +} + +export type PostRuleStructuredOutputGenerateError + = PostRuleStructuredOutputGenerateErrors[keyof PostRuleStructuredOutputGenerateErrors] + +export type PostRuleStructuredOutputGenerateResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostRuleStructuredOutputGenerateResponse + = PostRuleStructuredOutputGenerateResponses[keyof PostRuleStructuredOutputGenerateResponses] diff --git a/packages/contracts/generated/api/console/rule-structured-output-generate/zod.gen.ts b/packages/contracts/generated/api/console/rule-structured-output-generate/zod.gen.ts new file mode 100644 index 0000000000..231b5a072c --- /dev/null +++ b/packages/contracts/generated/api/console/rule-structured-output-generate/zod.gen.ts @@ -0,0 +1,50 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +export const zJsonValue = z.unknown() + +/** + * ModelConfig + */ +export const zModelConfig = z.object({ + agent_mode_dict: zJsonValue.optional(), + annotation_reply_dict: zJsonValue.optional(), + chat_prompt_config_dict: zJsonValue.optional(), + completion_prompt_config_dict: zJsonValue.optional(), + created_at: z.int().nullish(), + created_by: z.string().nullish(), + dataset_configs_dict: zJsonValue.optional(), + dataset_query_variable: z.string().nullish(), + external_data_tools_list: zJsonValue.optional(), + file_upload_dict: zJsonValue.optional(), + model_dict: zJsonValue.optional(), + more_like_this_dict: zJsonValue.optional(), + opening_statement: z.string().nullish(), + pre_prompt: z.string().nullish(), + prompt_type: z.string().nullish(), + retriever_resource_dict: zJsonValue.optional(), + sensitive_word_avoidance_dict: zJsonValue.optional(), + speech_to_text_dict: zJsonValue.optional(), + suggested_questions_after_answer_dict: zJsonValue.optional(), + suggested_questions_list: zJsonValue.optional(), + text_to_speech_dict: zJsonValue.optional(), + updated_at: z.int().nullish(), + updated_by: z.string().nullish(), + user_input_form_list: zJsonValue.optional(), +}) + +/** + * RuleStructuredOutputPayload + */ +export const zRuleStructuredOutputPayload = z.object({ + instruction: z.string(), + model_config: zModelConfig, +}) + +export const zPostRuleStructuredOutputGenerateBody = zRuleStructuredOutputPayload + +/** + * Structured output generated successfully + */ +export const zPostRuleStructuredOutputGenerateResponse = z.record(z.string(), z.unknown()) diff --git a/packages/contracts/generated/api/console/spec/orpc.gen.ts b/packages/contracts/generated/api/console/spec/orpc.gen.ts new file mode 100644 index 0000000000..bd2e750e6d --- /dev/null +++ b/packages/contracts/generated/api/console/spec/orpc.gen.ts @@ -0,0 +1,34 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' + +import { zGetSpecSchemaDefinitionsResponse } from './zod.gen' + +/** + * Get system JSON Schema definitions specification + * + * Used for frontend component type mapping + */ +export const get = oc + .route({ + description: 'Used for frontend component type mapping', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getSpecSchemaDefinitions', + path: '/spec/schema-definitions', + summary: 'Get system JSON Schema definitions specification', + tags: ['console'], + }) + .output(zGetSpecSchemaDefinitionsResponse) + +export const schemaDefinitions = { + get, +} + +export const spec = { + schemaDefinitions, +} + +export const contract = { + spec, +} diff --git a/packages/contracts/generated/api/console/spec/types.gen.ts b/packages/contracts/generated/api/console/spec/types.gen.ts new file mode 100644 index 0000000000..eaad80aa9a --- /dev/null +++ b/packages/contracts/generated/api/console/spec/types.gen.ts @@ -0,0 +1,21 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type GetSpecSchemaDefinitionsData = { + body?: never + path?: never + query?: never + url: '/spec/schema-definitions' +} + +export type GetSpecSchemaDefinitionsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetSpecSchemaDefinitionsResponse + = GetSpecSchemaDefinitionsResponses[keyof GetSpecSchemaDefinitionsResponses] diff --git a/packages/contracts/generated/api/console/spec/zod.gen.ts b/packages/contracts/generated/api/console/spec/zod.gen.ts new file mode 100644 index 0000000000..fa057bc269 --- /dev/null +++ b/packages/contracts/generated/api/console/spec/zod.gen.ts @@ -0,0 +1,8 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * Success + */ +export const zGetSpecSchemaDefinitionsResponse = z.record(z.string(), z.unknown()) diff --git a/packages/contracts/generated/api/console/system-features/orpc.gen.ts b/packages/contracts/generated/api/console/system-features/orpc.gen.ts new file mode 100644 index 0000000000..5c0a475585 --- /dev/null +++ b/packages/contracts/generated/api/console/system-features/orpc.gen.ts @@ -0,0 +1,37 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' + +import { zGetSystemFeaturesResponse } from './zod.gen' + +/** + * Get system-wide feature configuration + * + * Get system-wide feature configuration + * NOTE: This endpoint is unauthenticated by design, as it provides system features + * data required for dashboard initialization. + * + * Authentication would create circular dependency (can't login without dashboard loading). + * + * Only non-sensitive configuration data should be returned by this endpoint. + */ +export const get = oc + .route({ + description: + 'Get system-wide feature configuration\nNOTE: This endpoint is unauthenticated by design, as it provides system features\ndata required for dashboard initialization.\n\nAuthentication would create circular dependency (can\'t login without dashboard loading).\n\nOnly non-sensitive configuration data should be returned by this endpoint.', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getSystemFeatures', + path: '/system-features', + summary: 'Get system-wide feature configuration', + tags: ['console'], + }) + .output(zGetSystemFeaturesResponse) + +export const systemFeatures = { + get, +} + +export const contract = { + systemFeatures, +} diff --git a/packages/contracts/generated/api/console/system-features/types.gen.ts b/packages/contracts/generated/api/console/system-features/types.gen.ts new file mode 100644 index 0000000000..0fbea39beb --- /dev/null +++ b/packages/contracts/generated/api/console/system-features/types.gen.ts @@ -0,0 +1,22 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type SystemFeatureResponse = { + [key: string]: unknown +} + +export type GetSystemFeaturesData = { + body?: never + path?: never + query?: never + url: '/system-features' +} + +export type GetSystemFeaturesResponses = { + 200: SystemFeatureResponse +} + +export type GetSystemFeaturesResponse = GetSystemFeaturesResponses[keyof GetSystemFeaturesResponses] diff --git a/packages/contracts/generated/api/console/system-features/zod.gen.ts b/packages/contracts/generated/api/console/system-features/zod.gen.ts new file mode 100644 index 0000000000..affb2a10a3 --- /dev/null +++ b/packages/contracts/generated/api/console/system-features/zod.gen.ts @@ -0,0 +1,10 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +export const zSystemFeatureResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zGetSystemFeaturesResponse = zSystemFeatureResponse diff --git a/packages/contracts/generated/api/console/tag-bindings/orpc.gen.ts b/packages/contracts/generated/api/console/tag-bindings/orpc.gen.ts new file mode 100644 index 0000000000..8b58d2c47d --- /dev/null +++ b/packages/contracts/generated/api/console/tag-bindings/orpc.gen.ts @@ -0,0 +1,97 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { + zDeleteTagBindingsByIdBody, + zDeleteTagBindingsByIdPath, + zDeleteTagBindingsByIdResponse, + zPostTagBindingsBody, + zPostTagBindingsCreateBody, + zPostTagBindingsCreateResponse, + zPostTagBindingsRemoveBody, + zPostTagBindingsRemoveResponse, + zPostTagBindingsResponse, +} from './zod.gen' + +/** + * Deprecated legacy alias. Use POST /tag-bindings instead. + * + * @deprecated + */ +export const post = oc + .route({ + deprecated: true, + description: 'Deprecated legacy alias. Use POST /tag-bindings instead.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postTagBindingsCreate', + path: '/tag-bindings/create', + tags: ['console'], + }) + .input(z.object({ body: zPostTagBindingsCreateBody })) + .output(zPostTagBindingsCreateResponse) + +export const create = { + post, +} + +/** + * Deprecated legacy alias. Use DELETE /tag-bindings/{id} instead. + * + * @deprecated + */ +export const post2 = oc + .route({ + deprecated: true, + description: 'Deprecated legacy alias. Use DELETE /tag-bindings/{id} instead.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postTagBindingsRemove', + path: '/tag-bindings/remove', + tags: ['console'], + }) + .input(z.object({ body: zPostTagBindingsRemoveBody })) + .output(zPostTagBindingsRemoveResponse) + +export const remove = { + post: post2, +} + +export const delete_ = oc + .route({ + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteTagBindingsById', + path: '/tag-bindings/{id}', + tags: ['console'], + }) + .input(z.object({ body: zDeleteTagBindingsByIdBody, params: zDeleteTagBindingsByIdPath })) + .output(zDeleteTagBindingsByIdResponse) + +export const byId = { + delete: delete_, +} + +export const post3 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postTagBindings', + path: '/tag-bindings', + tags: ['console'], + }) + .input(z.object({ body: zPostTagBindingsBody })) + .output(zPostTagBindingsResponse) + +export const tagBindings = { + post: post3, + create, + remove, + byId, +} + +export const contract = { + tagBindings, +} diff --git a/packages/contracts/generated/api/console/tag-bindings/types.gen.ts b/packages/contracts/generated/api/console/tag-bindings/types.gen.ts new file mode 100644 index 0000000000..e9426766ed --- /dev/null +++ b/packages/contracts/generated/api/console/tag-bindings/types.gen.ts @@ -0,0 +1,89 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type TagBindingPayload = { + tag_ids: Array + target_id: string + type: TagType +} + +export type TagBindingRemovePayload = { + tag_id: string + target_id: string + type: TagType +} + +export type TagBindingItemDeletePayload = { + target_id: string + type: TagType +} + +export type TagType = 'knowledge' | 'app' + +export type PostTagBindingsData = { + body: TagBindingPayload + path?: never + query?: never + url: '/tag-bindings' +} + +export type PostTagBindingsResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostTagBindingsResponse = PostTagBindingsResponses[keyof PostTagBindingsResponses] + +export type PostTagBindingsCreateData = { + body: TagBindingPayload + path?: never + query?: never + url: '/tag-bindings/create' +} + +export type PostTagBindingsCreateResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostTagBindingsCreateResponse + = PostTagBindingsCreateResponses[keyof PostTagBindingsCreateResponses] + +export type PostTagBindingsRemoveData = { + body: TagBindingRemovePayload + path?: never + query?: never + url: '/tag-bindings/remove' +} + +export type PostTagBindingsRemoveResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostTagBindingsRemoveResponse + = PostTagBindingsRemoveResponses[keyof PostTagBindingsRemoveResponses] + +export type DeleteTagBindingsByIdData = { + body: TagBindingItemDeletePayload + path: { + id: string + } + query?: never + url: '/tag-bindings/{id}' +} + +export type DeleteTagBindingsByIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type DeleteTagBindingsByIdResponse + = DeleteTagBindingsByIdResponses[keyof DeleteTagBindingsByIdResponses] diff --git a/packages/contracts/generated/api/console/tag-bindings/zod.gen.ts b/packages/contracts/generated/api/console/tag-bindings/zod.gen.ts new file mode 100644 index 0000000000..3dead1ec1b --- /dev/null +++ b/packages/contracts/generated/api/console/tag-bindings/zod.gen.ts @@ -0,0 +1,68 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * TagType + * + * Tag type + */ +export const zTagType = z.enum(['knowledge', 'app']) + +/** + * TagBindingPayload + */ +export const zTagBindingPayload = z.object({ + tag_ids: z.array(z.string()), + target_id: z.string(), + type: zTagType, +}) + +/** + * TagBindingRemovePayload + */ +export const zTagBindingRemovePayload = z.object({ + tag_id: z.string(), + target_id: z.string(), + type: zTagType, +}) + +/** + * TagBindingItemDeletePayload + */ +export const zTagBindingItemDeletePayload = z.object({ + target_id: z.string(), + type: zTagType, +}) + +export const zPostTagBindingsBody = zTagBindingPayload + +/** + * Success + */ +export const zPostTagBindingsResponse = z.record(z.string(), z.unknown()) + +export const zPostTagBindingsCreateBody = zTagBindingPayload + +/** + * Success + */ +export const zPostTagBindingsCreateResponse = z.record(z.string(), z.unknown()) + +export const zPostTagBindingsRemoveBody = zTagBindingRemovePayload + +/** + * Success + */ +export const zPostTagBindingsRemoveResponse = z.record(z.string(), z.unknown()) + +export const zDeleteTagBindingsByIdBody = zTagBindingItemDeletePayload + +export const zDeleteTagBindingsByIdPath = z.object({ + id: z.string(), +}) + +/** + * Success + */ +export const zDeleteTagBindingsByIdResponse = z.record(z.string(), z.unknown()) diff --git a/packages/contracts/generated/api/console/tags/orpc.gen.ts b/packages/contracts/generated/api/console/tags/orpc.gen.ts new file mode 100644 index 0000000000..937ccce634 --- /dev/null +++ b/packages/contracts/generated/api/console/tags/orpc.gen.ts @@ -0,0 +1,75 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { + zDeleteTagsByTagIdPath, + zDeleteTagsByTagIdResponse, + zGetTagsQuery, + zGetTagsResponse, + zPatchTagsByTagIdBody, + zPatchTagsByTagIdPath, + zPatchTagsByTagIdResponse, + zPostTagsBody, + zPostTagsResponse, +} from './zod.gen' + +export const delete_ = oc + .route({ + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteTagsByTagId', + path: '/tags/{tag_id}', + tags: ['console'], + }) + .input(z.object({ params: zDeleteTagsByTagIdPath })) + .output(zDeleteTagsByTagIdResponse) + +export const patch = oc + .route({ + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchTagsByTagId', + path: '/tags/{tag_id}', + tags: ['console'], + }) + .input(z.object({ body: zPatchTagsByTagIdBody, params: zPatchTagsByTagIdPath })) + .output(zPatchTagsByTagIdResponse) + +export const byTagId = { + delete: delete_, + patch, +} + +export const get = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getTags', + path: '/tags', + tags: ['console'], + }) + .input(z.object({ query: zGetTagsQuery.optional() })) + .output(zGetTagsResponse) + +export const post = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postTags', + path: '/tags', + tags: ['console'], + }) + .input(z.object({ body: zPostTagsBody })) + .output(zPostTagsResponse) + +export const tags = { + get, + post, + byTagId, +} + +export const contract = { + tags, +} diff --git a/packages/contracts/generated/api/console/tags/types.gen.ts b/packages/contracts/generated/api/console/tags/types.gen.ts new file mode 100644 index 0000000000..f3c3b1eb3f --- /dev/null +++ b/packages/contracts/generated/api/console/tags/types.gen.ts @@ -0,0 +1,84 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type TagResponse = { + binding_count?: string | null + id: string + name: string + type?: string | null +} + +export type TagBasePayload = { + name: string + type: TagType +} + +export type TagType = 'knowledge' | 'app' + +export type GetTagsData = { + body?: never + path?: never + query?: { + type?: string + keyword?: string + } + url: '/tags' +} + +export type GetTagsResponses = { + 200: Array +} + +export type GetTagsResponse = GetTagsResponses[keyof GetTagsResponses] + +export type PostTagsData = { + body: TagBasePayload + path?: never + query?: never + url: '/tags' +} + +export type PostTagsResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostTagsResponse = PostTagsResponses[keyof PostTagsResponses] + +export type DeleteTagsByTagIdData = { + body?: never + path: { + tag_id: string + } + query?: never + url: '/tags/{tag_id}' +} + +export type DeleteTagsByTagIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type DeleteTagsByTagIdResponse = DeleteTagsByTagIdResponses[keyof DeleteTagsByTagIdResponses] + +export type PatchTagsByTagIdData = { + body: TagBasePayload + path: { + tag_id: string + } + query?: never + url: '/tags/{tag_id}' +} + +export type PatchTagsByTagIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type PatchTagsByTagIdResponse = PatchTagsByTagIdResponses[keyof PatchTagsByTagIdResponses] diff --git a/packages/contracts/generated/api/console/tags/zod.gen.ts b/packages/contracts/generated/api/console/tags/zod.gen.ts new file mode 100644 index 0000000000..4bb8e1783c --- /dev/null +++ b/packages/contracts/generated/api/console/tags/zod.gen.ts @@ -0,0 +1,65 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * TagResponse + */ +export const zTagResponse = z.object({ + binding_count: z.string().nullish(), + id: z.string(), + name: z.string(), + type: z.string().nullish(), +}) + +/** + * TagType + * + * Tag type + */ +export const zTagType = z.enum(['knowledge', 'app']) + +/** + * TagBasePayload + */ +export const zTagBasePayload = z.object({ + name: z.string().min(1).max(50), + type: zTagType, +}) + +export const zGetTagsQuery = z.object({ + type: z.string().optional(), + keyword: z.string().optional(), +}) + +/** + * Success + */ +export const zGetTagsResponse = z.array(zTagResponse) + +export const zPostTagsBody = zTagBasePayload + +/** + * Success + */ +export const zPostTagsResponse = z.record(z.string(), z.unknown()) + +export const zDeleteTagsByTagIdPath = z.object({ + tag_id: z.string(), +}) + +/** + * Success + */ +export const zDeleteTagsByTagIdResponse = z.record(z.string(), z.unknown()) + +export const zPatchTagsByTagIdBody = zTagBasePayload + +export const zPatchTagsByTagIdPath = z.object({ + tag_id: z.string(), +}) + +/** + * Success + */ +export const zPatchTagsByTagIdResponse = z.record(z.string(), z.unknown()) diff --git a/packages/contracts/generated/api/console/test/orpc.gen.ts b/packages/contracts/generated/api/console/test/orpc.gen.ts new file mode 100644 index 0000000000..1bdf526b70 --- /dev/null +++ b/packages/contracts/generated/api/console/test/orpc.gen.ts @@ -0,0 +1,33 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { zPostTestRetrievalBody, zPostTestRetrievalResponse } from './zod.gen' + +/** + * Bedrock retrieval test (internal use only) + */ +export const post = oc + .route({ + description: 'Bedrock retrieval test (internal use only)', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postTestRetrieval', + path: '/test/retrieval', + tags: ['console'], + }) + .input(z.object({ body: zPostTestRetrievalBody })) + .output(zPostTestRetrievalResponse) + +export const retrieval = { + post, +} + +export const test = { + retrieval, +} + +export const contract = { + test, +} diff --git a/packages/contracts/generated/api/console/test/types.gen.ts b/packages/contracts/generated/api/console/test/types.gen.ts new file mode 100644 index 0000000000..3e04b732ee --- /dev/null +++ b/packages/contracts/generated/api/console/test/types.gen.ts @@ -0,0 +1,31 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type BedrockRetrievalPayload = { + knowledge_id: string + query: string + retrieval_setting: BedrockRetrievalSetting +} + +export type BedrockRetrievalSetting = { + score_threshold?: number + top_k?: number | null +} + +export type PostTestRetrievalData = { + body: BedrockRetrievalPayload + path?: never + query?: never + url: '/test/retrieval' +} + +export type PostTestRetrievalResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostTestRetrievalResponse = PostTestRetrievalResponses[keyof PostTestRetrievalResponses] diff --git a/packages/contracts/generated/api/console/test/zod.gen.ts b/packages/contracts/generated/api/console/test/zod.gen.ts new file mode 100644 index 0000000000..9421c6c03f --- /dev/null +++ b/packages/contracts/generated/api/console/test/zod.gen.ts @@ -0,0 +1,29 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * BedrockRetrievalSetting + * + * Retrieval settings for Amazon Bedrock knowledge base queries. + */ +export const zBedrockRetrievalSetting = z.object({ + score_threshold: z.number().optional().default(0), + top_k: z.int().nullish(), +}) + +/** + * BedrockRetrievalPayload + */ +export const zBedrockRetrievalPayload = z.object({ + knowledge_id: z.string(), + query: z.string(), + retrieval_setting: zBedrockRetrievalSetting, +}) + +export const zPostTestRetrievalBody = zBedrockRetrievalPayload + +/** + * Bedrock retrieval test completed + */ +export const zPostTestRetrievalResponse = z.record(z.string(), z.unknown()) diff --git a/packages/contracts/generated/api/console/trial-apps/orpc.gen.ts b/packages/contracts/generated/api/console/trial-apps/orpc.gen.ts new file mode 100644 index 0000000000..eca85c206f --- /dev/null +++ b/packages/contracts/generated/api/console/trial-apps/orpc.gen.ts @@ -0,0 +1,298 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { + zGetTrialAppsByAppIdDatasetsPath, + zGetTrialAppsByAppIdDatasetsResponse, + zGetTrialAppsByAppIdMessagesByMessageIdSuggestedQuestionsPath, + zGetTrialAppsByAppIdMessagesByMessageIdSuggestedQuestionsResponse, + zGetTrialAppsByAppIdParametersPath, + zGetTrialAppsByAppIdParametersResponse, + zGetTrialAppsByAppIdPath, + zGetTrialAppsByAppIdResponse, + zGetTrialAppsByAppIdSitePath, + zGetTrialAppsByAppIdSiteResponse, + zGetTrialAppsByAppIdWorkflowsPath, + zGetTrialAppsByAppIdWorkflowsResponse, + zPostTrialAppsByAppIdAudioToTextPath, + zPostTrialAppsByAppIdAudioToTextResponse, + zPostTrialAppsByAppIdChatMessagesBody, + zPostTrialAppsByAppIdChatMessagesPath, + zPostTrialAppsByAppIdChatMessagesResponse, + zPostTrialAppsByAppIdCompletionMessagesBody, + zPostTrialAppsByAppIdCompletionMessagesPath, + zPostTrialAppsByAppIdCompletionMessagesResponse, + zPostTrialAppsByAppIdTextToAudioBody, + zPostTrialAppsByAppIdTextToAudioPath, + zPostTrialAppsByAppIdTextToAudioResponse, + zPostTrialAppsByAppIdWorkflowsRunBody, + zPostTrialAppsByAppIdWorkflowsRunPath, + zPostTrialAppsByAppIdWorkflowsRunResponse, + zPostTrialAppsByAppIdWorkflowsTasksByTaskIdStopPath, + zPostTrialAppsByAppIdWorkflowsTasksByTaskIdStopResponse, +} from './zod.gen' + +export const post = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postTrialAppsByAppIdAudioToText', + path: '/trial-apps/{app_id}/audio-to-text', + tags: ['console'], + }) + .input(z.object({ params: zPostTrialAppsByAppIdAudioToTextPath })) + .output(zPostTrialAppsByAppIdAudioToTextResponse) + +export const audioToText = { + post, +} + +export const post2 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postTrialAppsByAppIdChatMessages', + path: '/trial-apps/{app_id}/chat-messages', + tags: ['console'], + }) + .input( + z.object({ + body: zPostTrialAppsByAppIdChatMessagesBody, + params: zPostTrialAppsByAppIdChatMessagesPath, + }), + ) + .output(zPostTrialAppsByAppIdChatMessagesResponse) + +export const chatMessages = { + post: post2, +} + +export const post3 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postTrialAppsByAppIdCompletionMessages', + path: '/trial-apps/{app_id}/completion-messages', + tags: ['console'], + }) + .input( + z.object({ + body: zPostTrialAppsByAppIdCompletionMessagesBody, + params: zPostTrialAppsByAppIdCompletionMessagesPath, + }), + ) + .output(zPostTrialAppsByAppIdCompletionMessagesResponse) + +export const completionMessages = { + post: post3, +} + +export const get = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getTrialAppsByAppIdDatasets', + path: '/trial-apps/{app_id}/datasets', + tags: ['console'], + }) + .input(z.object({ params: zGetTrialAppsByAppIdDatasetsPath })) + .output(zGetTrialAppsByAppIdDatasetsResponse) + +export const datasets = { + get, +} + +export const get2 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getTrialAppsByAppIdMessagesByMessageIdSuggestedQuestions', + path: '/trial-apps/{app_id}/messages/{message_id}/suggested-questions', + tags: ['console'], + }) + .input(z.object({ params: zGetTrialAppsByAppIdMessagesByMessageIdSuggestedQuestionsPath })) + .output(zGetTrialAppsByAppIdMessagesByMessageIdSuggestedQuestionsResponse) + +export const suggestedQuestions = { + get: get2, +} + +export const byMessageId = { + suggestedQuestions, +} + +export const messages = { + byMessageId, +} + +/** + * Retrieve app parameters + */ +export const get3 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getTrialAppsByAppIdParameters', + path: '/trial-apps/{app_id}/parameters', + summary: 'Retrieve app parameters', + tags: ['console'], + }) + .input(z.object({ params: zGetTrialAppsByAppIdParametersPath })) + .output(zGetTrialAppsByAppIdParametersResponse) + +export const parameters = { + get: get3, +} + +/** + * Retrieve app site info + * + * Returns the site configuration for the application including theme, icons, and text. + */ +export const get4 = oc + .route({ + description: + 'Returns the site configuration for the application including theme, icons, and text.', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getTrialAppsByAppIdSite', + path: '/trial-apps/{app_id}/site', + summary: 'Retrieve app site info', + tags: ['console'], + }) + .input(z.object({ params: zGetTrialAppsByAppIdSitePath })) + .output(zGetTrialAppsByAppIdSiteResponse) + +export const site = { + get: get4, +} + +export const post4 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postTrialAppsByAppIdTextToAudio', + path: '/trial-apps/{app_id}/text-to-audio', + tags: ['console'], + }) + .input( + z.object({ + body: zPostTrialAppsByAppIdTextToAudioBody, + params: zPostTrialAppsByAppIdTextToAudioPath, + }), + ) + .output(zPostTrialAppsByAppIdTextToAudioResponse) + +export const textToAudio = { + post: post4, +} + +/** + * Run workflow + */ +export const post5 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postTrialAppsByAppIdWorkflowsRun', + path: '/trial-apps/{app_id}/workflows/run', + summary: 'Run workflow', + tags: ['console'], + }) + .input( + z.object({ + body: zPostTrialAppsByAppIdWorkflowsRunBody, + params: zPostTrialAppsByAppIdWorkflowsRunPath, + }), + ) + .output(zPostTrialAppsByAppIdWorkflowsRunResponse) + +export const run = { + post: post5, +} + +/** + * Stop workflow task + */ +export const post6 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postTrialAppsByAppIdWorkflowsTasksByTaskIdStop', + path: '/trial-apps/{app_id}/workflows/tasks/{task_id}/stop', + summary: 'Stop workflow task', + tags: ['console'], + }) + .input(z.object({ params: zPostTrialAppsByAppIdWorkflowsTasksByTaskIdStopPath })) + .output(zPostTrialAppsByAppIdWorkflowsTasksByTaskIdStopResponse) + +export const stop = { + post: post6, +} + +export const byTaskId = { + stop, +} + +export const tasks = { + byTaskId, +} + +/** + * Get workflow detail + */ +export const get5 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getTrialAppsByAppIdWorkflows', + path: '/trial-apps/{app_id}/workflows', + summary: 'Get workflow detail', + tags: ['console'], + }) + .input(z.object({ params: zGetTrialAppsByAppIdWorkflowsPath })) + .output(zGetTrialAppsByAppIdWorkflowsResponse) + +export const workflows = { + get: get5, + run, + tasks, +} + +/** + * Get app detail + */ +export const get6 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getTrialAppsByAppId', + path: '/trial-apps/{app_id}', + summary: 'Get app detail', + tags: ['console'], + }) + .input(z.object({ params: zGetTrialAppsByAppIdPath })) + .output(zGetTrialAppsByAppIdResponse) + +export const byAppId = { + get: get6, + audioToText, + chatMessages, + completionMessages, + datasets, + messages, + parameters, + site, + textToAudio, + workflows, +} + +export const trialApps = { + byAppId, +} + +export const contract = { + trialApps, +} diff --git a/packages/contracts/generated/api/console/trial-apps/types.gen.ts b/packages/contracts/generated/api/console/trial-apps/types.gen.ts new file mode 100644 index 0000000000..2965aafebf --- /dev/null +++ b/packages/contracts/generated/api/console/trial-apps/types.gen.ts @@ -0,0 +1,258 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type ChatRequest = { + conversation_id?: string | null + files?: Array | null + inputs: { + [key: string]: unknown + } + parent_message_id?: string | null + query: string + retriever_from?: string +} + +export type CompletionRequest = { + files?: Array | null + inputs: { + [key: string]: unknown + } + query?: string + response_mode?: 'blocking' | 'streaming' | null + retriever_from?: string +} + +export type TextToSpeechRequest = { + message_id?: string | null + streaming?: boolean | null + text?: string | null + voice?: string | null +} + +export type WorkflowRunRequest = { + files?: Array | null + inputs: { + [key: string]: unknown + } +} + +export type GetTrialAppsByAppIdData = { + body?: never + path: { + app_id: string + } + query?: never + url: '/trial-apps/{app_id}' +} + +export type GetTrialAppsByAppIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetTrialAppsByAppIdResponse + = GetTrialAppsByAppIdResponses[keyof GetTrialAppsByAppIdResponses] + +export type PostTrialAppsByAppIdAudioToTextData = { + body?: never + path: { + app_id: string + } + query?: never + url: '/trial-apps/{app_id}/audio-to-text' +} + +export type PostTrialAppsByAppIdAudioToTextResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostTrialAppsByAppIdAudioToTextResponse + = PostTrialAppsByAppIdAudioToTextResponses[keyof PostTrialAppsByAppIdAudioToTextResponses] + +export type PostTrialAppsByAppIdChatMessagesData = { + body: ChatRequest + path: { + app_id: string + } + query?: never + url: '/trial-apps/{app_id}/chat-messages' +} + +export type PostTrialAppsByAppIdChatMessagesResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostTrialAppsByAppIdChatMessagesResponse + = PostTrialAppsByAppIdChatMessagesResponses[keyof PostTrialAppsByAppIdChatMessagesResponses] + +export type PostTrialAppsByAppIdCompletionMessagesData = { + body: CompletionRequest + path: { + app_id: string + } + query?: never + url: '/trial-apps/{app_id}/completion-messages' +} + +export type PostTrialAppsByAppIdCompletionMessagesResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostTrialAppsByAppIdCompletionMessagesResponse + = PostTrialAppsByAppIdCompletionMessagesResponses[keyof PostTrialAppsByAppIdCompletionMessagesResponses] + +export type GetTrialAppsByAppIdDatasetsData = { + body?: never + path: { + app_id: string + } + query?: never + url: '/trial-apps/{app_id}/datasets' +} + +export type GetTrialAppsByAppIdDatasetsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetTrialAppsByAppIdDatasetsResponse + = GetTrialAppsByAppIdDatasetsResponses[keyof GetTrialAppsByAppIdDatasetsResponses] + +export type GetTrialAppsByAppIdMessagesByMessageIdSuggestedQuestionsData = { + body?: never + path: { + app_id: string + message_id: string + } + query?: never + url: '/trial-apps/{app_id}/messages/{message_id}/suggested-questions' +} + +export type GetTrialAppsByAppIdMessagesByMessageIdSuggestedQuestionsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetTrialAppsByAppIdMessagesByMessageIdSuggestedQuestionsResponse + = GetTrialAppsByAppIdMessagesByMessageIdSuggestedQuestionsResponses[keyof GetTrialAppsByAppIdMessagesByMessageIdSuggestedQuestionsResponses] + +export type GetTrialAppsByAppIdParametersData = { + body?: never + path: { + app_id: string + } + query?: never + url: '/trial-apps/{app_id}/parameters' +} + +export type GetTrialAppsByAppIdParametersResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetTrialAppsByAppIdParametersResponse + = GetTrialAppsByAppIdParametersResponses[keyof GetTrialAppsByAppIdParametersResponses] + +export type GetTrialAppsByAppIdSiteData = { + body?: never + path: { + app_id: string + } + query?: never + url: '/trial-apps/{app_id}/site' +} + +export type GetTrialAppsByAppIdSiteResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetTrialAppsByAppIdSiteResponse + = GetTrialAppsByAppIdSiteResponses[keyof GetTrialAppsByAppIdSiteResponses] + +export type PostTrialAppsByAppIdTextToAudioData = { + body: TextToSpeechRequest + path: { + app_id: string + } + query?: never + url: '/trial-apps/{app_id}/text-to-audio' +} + +export type PostTrialAppsByAppIdTextToAudioResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostTrialAppsByAppIdTextToAudioResponse + = PostTrialAppsByAppIdTextToAudioResponses[keyof PostTrialAppsByAppIdTextToAudioResponses] + +export type GetTrialAppsByAppIdWorkflowsData = { + body?: never + path: { + app_id: string + } + query?: never + url: '/trial-apps/{app_id}/workflows' +} + +export type GetTrialAppsByAppIdWorkflowsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetTrialAppsByAppIdWorkflowsResponse + = GetTrialAppsByAppIdWorkflowsResponses[keyof GetTrialAppsByAppIdWorkflowsResponses] + +export type PostTrialAppsByAppIdWorkflowsRunData = { + body: WorkflowRunRequest + path: { + app_id: string + } + query?: never + url: '/trial-apps/{app_id}/workflows/run' +} + +export type PostTrialAppsByAppIdWorkflowsRunResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostTrialAppsByAppIdWorkflowsRunResponse + = PostTrialAppsByAppIdWorkflowsRunResponses[keyof PostTrialAppsByAppIdWorkflowsRunResponses] + +export type PostTrialAppsByAppIdWorkflowsTasksByTaskIdStopData = { + body?: never + path: { + app_id: string + task_id: string + } + query?: never + url: '/trial-apps/{app_id}/workflows/tasks/{task_id}/stop' +} + +export type PostTrialAppsByAppIdWorkflowsTasksByTaskIdStopResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostTrialAppsByAppIdWorkflowsTasksByTaskIdStopResponse + = PostTrialAppsByAppIdWorkflowsTasksByTaskIdStopResponses[keyof PostTrialAppsByAppIdWorkflowsTasksByTaskIdStopResponses] diff --git a/packages/contracts/generated/api/console/trial-apps/zod.gen.ts b/packages/contracts/generated/api/console/trial-apps/zod.gen.ts new file mode 100644 index 0000000000..f7a52425a2 --- /dev/null +++ b/packages/contracts/generated/api/console/trial-apps/zod.gen.ts @@ -0,0 +1,168 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * ChatRequest + */ +export const zChatRequest = z.object({ + conversation_id: z.string().nullish(), + files: z.array(z.unknown()).nullish(), + inputs: z.record(z.string(), z.unknown()), + parent_message_id: z.string().nullish(), + query: z.string(), + retriever_from: z.string().optional().default('explore_app'), +}) + +/** + * CompletionRequest + */ +export const zCompletionRequest = z.object({ + files: z.array(z.unknown()).nullish(), + inputs: z.record(z.string(), z.unknown()), + query: z.string().optional().default(''), + response_mode: z.enum(['blocking', 'streaming']).nullish(), + retriever_from: z.string().optional().default('explore_app'), +}) + +/** + * TextToSpeechRequest + */ +export const zTextToSpeechRequest = z.object({ + message_id: z.string().nullish(), + streaming: z.boolean().nullish(), + text: z.string().nullish(), + voice: z.string().nullish(), +}) + +/** + * WorkflowRunRequest + */ +export const zWorkflowRunRequest = z.object({ + files: z.array(z.unknown()).nullish(), + inputs: z.record(z.string(), z.unknown()), +}) + +export const zGetTrialAppsByAppIdPath = z.object({ + app_id: z.string(), +}) + +/** + * Success + */ +export const zGetTrialAppsByAppIdResponse = z.record(z.string(), z.unknown()) + +export const zPostTrialAppsByAppIdAudioToTextPath = z.object({ + app_id: z.string(), +}) + +/** + * Success + */ +export const zPostTrialAppsByAppIdAudioToTextResponse = z.record(z.string(), z.unknown()) + +export const zPostTrialAppsByAppIdChatMessagesBody = zChatRequest + +export const zPostTrialAppsByAppIdChatMessagesPath = z.object({ + app_id: z.string(), +}) + +/** + * Success + */ +export const zPostTrialAppsByAppIdChatMessagesResponse = z.record(z.string(), z.unknown()) + +export const zPostTrialAppsByAppIdCompletionMessagesBody = zCompletionRequest + +export const zPostTrialAppsByAppIdCompletionMessagesPath = z.object({ + app_id: z.string(), +}) + +/** + * Success + */ +export const zPostTrialAppsByAppIdCompletionMessagesResponse = z.record(z.string(), z.unknown()) + +export const zGetTrialAppsByAppIdDatasetsPath = z.object({ + app_id: z.string(), +}) + +/** + * Success + */ +export const zGetTrialAppsByAppIdDatasetsResponse = z.record(z.string(), z.unknown()) + +export const zGetTrialAppsByAppIdMessagesByMessageIdSuggestedQuestionsPath = z.object({ + app_id: z.string(), + message_id: z.string(), +}) + +/** + * Success + */ +export const zGetTrialAppsByAppIdMessagesByMessageIdSuggestedQuestionsResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetTrialAppsByAppIdParametersPath = z.object({ + app_id: z.string(), +}) + +/** + * Success + */ +export const zGetTrialAppsByAppIdParametersResponse = z.record(z.string(), z.unknown()) + +export const zGetTrialAppsByAppIdSitePath = z.object({ + app_id: z.string(), +}) + +/** + * Success + */ +export const zGetTrialAppsByAppIdSiteResponse = z.record(z.string(), z.unknown()) + +export const zPostTrialAppsByAppIdTextToAudioBody = zTextToSpeechRequest + +export const zPostTrialAppsByAppIdTextToAudioPath = z.object({ + app_id: z.string(), +}) + +/** + * Success + */ +export const zPostTrialAppsByAppIdTextToAudioResponse = z.record(z.string(), z.unknown()) + +export const zGetTrialAppsByAppIdWorkflowsPath = z.object({ + app_id: z.string(), +}) + +/** + * Success + */ +export const zGetTrialAppsByAppIdWorkflowsResponse = z.record(z.string(), z.unknown()) + +export const zPostTrialAppsByAppIdWorkflowsRunBody = zWorkflowRunRequest + +export const zPostTrialAppsByAppIdWorkflowsRunPath = z.object({ + app_id: z.string(), +}) + +/** + * Success + */ +export const zPostTrialAppsByAppIdWorkflowsRunResponse = z.record(z.string(), z.unknown()) + +export const zPostTrialAppsByAppIdWorkflowsTasksByTaskIdStopPath = z.object({ + app_id: z.string(), + task_id: z.string(), +}) + +/** + * Success + */ +export const zPostTrialAppsByAppIdWorkflowsTasksByTaskIdStopResponse = z.record( + z.string(), + z.unknown(), +) diff --git a/packages/contracts/generated/api/console/website/orpc.gen.ts b/packages/contracts/generated/api/console/website/orpc.gen.ts new file mode 100644 index 0000000000..698f656967 --- /dev/null +++ b/packages/contracts/generated/api/console/website/orpc.gen.ts @@ -0,0 +1,68 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { + zGetWebsiteCrawlStatusByJobIdPath, + zGetWebsiteCrawlStatusByJobIdQuery, + zGetWebsiteCrawlStatusByJobIdResponse, + zPostWebsiteCrawlBody, + zPostWebsiteCrawlResponse, +} from './zod.gen' + +/** + * Get website crawl status + */ +export const get = oc + .route({ + description: 'Get website crawl status', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWebsiteCrawlStatusByJobId', + path: '/website/crawl/status/{job_id}', + tags: ['console'], + }) + .input( + z.object({ + params: zGetWebsiteCrawlStatusByJobIdPath, + query: zGetWebsiteCrawlStatusByJobIdQuery, + }), + ) + .output(zGetWebsiteCrawlStatusByJobIdResponse) + +export const byJobId = { + get, +} + +export const status = { + byJobId, +} + +/** + * Crawl website content + */ +export const post = oc + .route({ + description: 'Crawl website content', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWebsiteCrawl', + path: '/website/crawl', + tags: ['console'], + }) + .input(z.object({ body: zPostWebsiteCrawlBody })) + .output(zPostWebsiteCrawlResponse) + +export const crawl = { + post, + status, +} + +export const website = { + crawl, +} + +export const contract = { + website, +} diff --git a/packages/contracts/generated/api/console/website/types.gen.ts b/packages/contracts/generated/api/console/website/types.gen.ts new file mode 100644 index 0000000000..e47b11a819 --- /dev/null +++ b/packages/contracts/generated/api/console/website/types.gen.ts @@ -0,0 +1,68 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type WebsiteCrawlPayload = { + options: { + [key: string]: unknown + } + provider: 'firecrawl' | 'watercrawl' | 'jinareader' + url: string +} + +export type PostWebsiteCrawlData = { + body: WebsiteCrawlPayload + path?: never + query?: never + url: '/website/crawl' +} + +export type PostWebsiteCrawlErrors = { + 400: { + [key: string]: unknown + } +} + +export type PostWebsiteCrawlError = PostWebsiteCrawlErrors[keyof PostWebsiteCrawlErrors] + +export type PostWebsiteCrawlResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWebsiteCrawlResponse = PostWebsiteCrawlResponses[keyof PostWebsiteCrawlResponses] + +export type GetWebsiteCrawlStatusByJobIdData = { + body?: never + path: { + job_id: string + } + query: { + provider: 'firecrawl' | 'watercrawl' | 'jinareader' + } + url: '/website/crawl/status/{job_id}' +} + +export type GetWebsiteCrawlStatusByJobIdErrors = { + 400: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type GetWebsiteCrawlStatusByJobIdError + = GetWebsiteCrawlStatusByJobIdErrors[keyof GetWebsiteCrawlStatusByJobIdErrors] + +export type GetWebsiteCrawlStatusByJobIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWebsiteCrawlStatusByJobIdResponse + = GetWebsiteCrawlStatusByJobIdResponses[keyof GetWebsiteCrawlStatusByJobIdResponses] diff --git a/packages/contracts/generated/api/console/website/zod.gen.ts b/packages/contracts/generated/api/console/website/zod.gen.ts new file mode 100644 index 0000000000..a7590ec9ee --- /dev/null +++ b/packages/contracts/generated/api/console/website/zod.gen.ts @@ -0,0 +1,32 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * WebsiteCrawlPayload + */ +export const zWebsiteCrawlPayload = z.object({ + options: z.record(z.string(), z.unknown()), + provider: z.enum(['firecrawl', 'watercrawl', 'jinareader']), + url: z.string(), +}) + +export const zPostWebsiteCrawlBody = zWebsiteCrawlPayload + +/** + * Website crawl initiated successfully + */ +export const zPostWebsiteCrawlResponse = z.record(z.string(), z.unknown()) + +export const zGetWebsiteCrawlStatusByJobIdPath = z.object({ + job_id: z.string(), +}) + +export const zGetWebsiteCrawlStatusByJobIdQuery = z.object({ + provider: z.enum(['firecrawl', 'watercrawl', 'jinareader']), +}) + +/** + * Crawl status retrieved successfully + */ +export const zGetWebsiteCrawlStatusByJobIdResponse = z.record(z.string(), z.unknown()) diff --git a/packages/contracts/generated/api/console/workflow/orpc.gen.ts b/packages/contracts/generated/api/console/workflow/orpc.gen.ts new file mode 100644 index 0000000000..bf139e6ac1 --- /dev/null +++ b/packages/contracts/generated/api/console/workflow/orpc.gen.ts @@ -0,0 +1,74 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { + zGetWorkflowByWorkflowRunIdEventsPath, + zGetWorkflowByWorkflowRunIdEventsResponse, + zGetWorkflowByWorkflowRunIdPauseDetailsPath, + zGetWorkflowByWorkflowRunIdPauseDetailsResponse, +} from './zod.gen' + +/** + * Get workflow execution events stream after resume + * + * GET /console/api/workflow//events + * + * Returns Server-Sent Events stream. + */ +export const get = oc + .route({ + description: + 'GET /console/api/workflow//events\n\nReturns Server-Sent Events stream.', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkflowByWorkflowRunIdEvents', + path: '/workflow/{workflow_run_id}/events', + summary: 'Get workflow execution events stream after resume', + tags: ['console'], + }) + .input(z.object({ params: zGetWorkflowByWorkflowRunIdEventsPath })) + .output(zGetWorkflowByWorkflowRunIdEventsResponse) + +export const events = { + get, +} + +/** + * Get workflow pause details + * + * GET /console/api/workflow//pause-details + * + * Returns information about why and where the workflow is paused. + */ +export const get2 = oc + .route({ + description: + 'GET /console/api/workflow//pause-details\n\nReturns information about why and where the workflow is paused.', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkflowByWorkflowRunIdPauseDetails', + path: '/workflow/{workflow_run_id}/pause-details', + summary: 'Get workflow pause details', + tags: ['console'], + }) + .input(z.object({ params: zGetWorkflowByWorkflowRunIdPauseDetailsPath })) + .output(zGetWorkflowByWorkflowRunIdPauseDetailsResponse) + +export const pauseDetails = { + get: get2, +} + +export const byWorkflowRunId = { + events, + pauseDetails, +} + +export const workflow = { + byWorkflowRunId, +} + +export const contract = { + workflow, +} diff --git a/packages/contracts/generated/api/console/workflow/types.gen.ts b/packages/contracts/generated/api/console/workflow/types.gen.ts new file mode 100644 index 0000000000..a3fae60eae --- /dev/null +++ b/packages/contracts/generated/api/console/workflow/types.gen.ts @@ -0,0 +1,41 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type GetWorkflowByWorkflowRunIdEventsData = { + body?: never + path: { + workflow_run_id: string + } + query?: never + url: '/workflow/{workflow_run_id}/events' +} + +export type GetWorkflowByWorkflowRunIdEventsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkflowByWorkflowRunIdEventsResponse + = GetWorkflowByWorkflowRunIdEventsResponses[keyof GetWorkflowByWorkflowRunIdEventsResponses] + +export type GetWorkflowByWorkflowRunIdPauseDetailsData = { + body?: never + path: { + workflow_run_id: string + } + query?: never + url: '/workflow/{workflow_run_id}/pause-details' +} + +export type GetWorkflowByWorkflowRunIdPauseDetailsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkflowByWorkflowRunIdPauseDetailsResponse + = GetWorkflowByWorkflowRunIdPauseDetailsResponses[keyof GetWorkflowByWorkflowRunIdPauseDetailsResponses] diff --git a/packages/contracts/generated/api/console/workflow/zod.gen.ts b/packages/contracts/generated/api/console/workflow/zod.gen.ts new file mode 100644 index 0000000000..315085f60f --- /dev/null +++ b/packages/contracts/generated/api/console/workflow/zod.gen.ts @@ -0,0 +1,21 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +export const zGetWorkflowByWorkflowRunIdEventsPath = z.object({ + workflow_run_id: z.string(), +}) + +/** + * Success + */ +export const zGetWorkflowByWorkflowRunIdEventsResponse = z.record(z.string(), z.unknown()) + +export const zGetWorkflowByWorkflowRunIdPauseDetailsPath = z.object({ + workflow_run_id: z.string(), +}) + +/** + * Success + */ +export const zGetWorkflowByWorkflowRunIdPauseDetailsResponse = z.record(z.string(), z.unknown()) diff --git a/packages/contracts/generated/api/console/workspaces/orpc.gen.ts b/packages/contracts/generated/api/console/workspaces/orpc.gen.ts new file mode 100644 index 0000000000..4d16e3120f --- /dev/null +++ b/packages/contracts/generated/api/console/workspaces/orpc.gen.ts @@ -0,0 +1,3012 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { + zDeleteWorkspacesCurrentEndpointsByIdPath, + zDeleteWorkspacesCurrentEndpointsByIdResponse, + zDeleteWorkspacesCurrentMembersByMemberIdPath, + zDeleteWorkspacesCurrentMembersByMemberIdResponse, + zDeleteWorkspacesCurrentModelProvidersByProviderCredentialsBody, + zDeleteWorkspacesCurrentModelProvidersByProviderCredentialsPath, + zDeleteWorkspacesCurrentModelProvidersByProviderCredentialsResponse, + zDeleteWorkspacesCurrentModelProvidersByProviderModelsBody, + zDeleteWorkspacesCurrentModelProvidersByProviderModelsCredentialsBody, + zDeleteWorkspacesCurrentModelProvidersByProviderModelsCredentialsPath, + zDeleteWorkspacesCurrentModelProvidersByProviderModelsCredentialsResponse, + zDeleteWorkspacesCurrentModelProvidersByProviderModelsPath, + zDeleteWorkspacesCurrentModelProvidersByProviderModelsResponse, + zDeleteWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientPath, + zDeleteWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientResponse, + zDeleteWorkspacesCurrentToolProviderMcpBody, + zDeleteWorkspacesCurrentToolProviderMcpResponse, + zDeleteWorkspacesCurrentTriggerProviderByProviderOauthClientPath, + zDeleteWorkspacesCurrentTriggerProviderByProviderOauthClientResponse, + zGetWorkspacesByTenantIdModelProvidersByProviderByIconTypeByLangPath, + zGetWorkspacesByTenantIdModelProvidersByProviderByIconTypeByLangResponse, + zGetWorkspacesCurrentAgentProviderByProviderNamePath, + zGetWorkspacesCurrentAgentProviderByProviderNameResponse, + zGetWorkspacesCurrentAgentProvidersResponse, + zGetWorkspacesCurrentDatasetOperatorsResponse, + zGetWorkspacesCurrentDefaultModelQuery, + zGetWorkspacesCurrentDefaultModelResponse, + zGetWorkspacesCurrentEndpointsListPluginQuery, + zGetWorkspacesCurrentEndpointsListPluginResponse, + zGetWorkspacesCurrentEndpointsListQuery, + zGetWorkspacesCurrentEndpointsListResponse, + zGetWorkspacesCurrentMembersResponse, + zGetWorkspacesCurrentModelProvidersByProviderCheckoutUrlPath, + zGetWorkspacesCurrentModelProvidersByProviderCheckoutUrlResponse, + zGetWorkspacesCurrentModelProvidersByProviderCredentialsPath, + zGetWorkspacesCurrentModelProvidersByProviderCredentialsQuery, + zGetWorkspacesCurrentModelProvidersByProviderCredentialsResponse, + zGetWorkspacesCurrentModelProvidersByProviderModelsCredentialsPath, + zGetWorkspacesCurrentModelProvidersByProviderModelsCredentialsQuery, + zGetWorkspacesCurrentModelProvidersByProviderModelsCredentialsResponse, + zGetWorkspacesCurrentModelProvidersByProviderModelsParameterRulesPath, + zGetWorkspacesCurrentModelProvidersByProviderModelsParameterRulesQuery, + zGetWorkspacesCurrentModelProvidersByProviderModelsParameterRulesResponse, + zGetWorkspacesCurrentModelProvidersByProviderModelsPath, + zGetWorkspacesCurrentModelProvidersByProviderModelsResponse, + zGetWorkspacesCurrentModelProvidersQuery, + zGetWorkspacesCurrentModelProvidersResponse, + zGetWorkspacesCurrentModelsModelTypesByModelTypePath, + zGetWorkspacesCurrentModelsModelTypesByModelTypeResponse, + zGetWorkspacesCurrentPermissionResponse, + zGetWorkspacesCurrentPluginAssetQuery, + zGetWorkspacesCurrentPluginAssetResponse, + zGetWorkspacesCurrentPluginDebuggingKeyResponse, + zGetWorkspacesCurrentPluginFetchManifestQuery, + zGetWorkspacesCurrentPluginFetchManifestResponse, + zGetWorkspacesCurrentPluginIconQuery, + zGetWorkspacesCurrentPluginIconResponse, + zGetWorkspacesCurrentPluginListQuery, + zGetWorkspacesCurrentPluginListResponse, + zGetWorkspacesCurrentPluginMarketplacePkgQuery, + zGetWorkspacesCurrentPluginMarketplacePkgResponse, + zGetWorkspacesCurrentPluginParametersDynamicOptionsQuery, + zGetWorkspacesCurrentPluginParametersDynamicOptionsResponse, + zGetWorkspacesCurrentPluginPermissionFetchResponse, + zGetWorkspacesCurrentPluginPreferencesFetchResponse, + zGetWorkspacesCurrentPluginReadmeQuery, + zGetWorkspacesCurrentPluginReadmeResponse, + zGetWorkspacesCurrentPluginTasksByTaskIdPath, + zGetWorkspacesCurrentPluginTasksByTaskIdResponse, + zGetWorkspacesCurrentPluginTasksQuery, + zGetWorkspacesCurrentPluginTasksResponse, + zGetWorkspacesCurrentToolLabelsResponse, + zGetWorkspacesCurrentToolProviderApiGetResponse, + zGetWorkspacesCurrentToolProviderApiRemoteResponse, + zGetWorkspacesCurrentToolProviderApiToolsResponse, + zGetWorkspacesCurrentToolProviderBuiltinByProviderCredentialInfoPath, + zGetWorkspacesCurrentToolProviderBuiltinByProviderCredentialInfoResponse, + zGetWorkspacesCurrentToolProviderBuiltinByProviderCredentialSchemaByCredentialTypePath, + zGetWorkspacesCurrentToolProviderBuiltinByProviderCredentialSchemaByCredentialTypeResponse, + zGetWorkspacesCurrentToolProviderBuiltinByProviderCredentialsPath, + zGetWorkspacesCurrentToolProviderBuiltinByProviderCredentialsResponse, + zGetWorkspacesCurrentToolProviderBuiltinByProviderIconPath, + zGetWorkspacesCurrentToolProviderBuiltinByProviderIconResponse, + zGetWorkspacesCurrentToolProviderBuiltinByProviderInfoPath, + zGetWorkspacesCurrentToolProviderBuiltinByProviderInfoResponse, + zGetWorkspacesCurrentToolProviderBuiltinByProviderOauthClientSchemaPath, + zGetWorkspacesCurrentToolProviderBuiltinByProviderOauthClientSchemaResponse, + zGetWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientPath, + zGetWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientResponse, + zGetWorkspacesCurrentToolProviderBuiltinByProviderToolsPath, + zGetWorkspacesCurrentToolProviderBuiltinByProviderToolsResponse, + zGetWorkspacesCurrentToolProviderMcpToolsByProviderIdPath, + zGetWorkspacesCurrentToolProviderMcpToolsByProviderIdResponse, + zGetWorkspacesCurrentToolProviderMcpUpdateByProviderIdPath, + zGetWorkspacesCurrentToolProviderMcpUpdateByProviderIdResponse, + zGetWorkspacesCurrentToolProvidersResponse, + zGetWorkspacesCurrentToolProviderWorkflowGetResponse, + zGetWorkspacesCurrentToolProviderWorkflowToolsResponse, + zGetWorkspacesCurrentToolsApiResponse, + zGetWorkspacesCurrentToolsBuiltinResponse, + zGetWorkspacesCurrentToolsMcpResponse, + zGetWorkspacesCurrentToolsWorkflowResponse, + zGetWorkspacesCurrentTriggerProviderByProviderIconPath, + zGetWorkspacesCurrentTriggerProviderByProviderIconResponse, + zGetWorkspacesCurrentTriggerProviderByProviderInfoPath, + zGetWorkspacesCurrentTriggerProviderByProviderInfoResponse, + zGetWorkspacesCurrentTriggerProviderByProviderOauthClientPath, + zGetWorkspacesCurrentTriggerProviderByProviderOauthClientResponse, + zGetWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderBySubscriptionBuilderIdPath, + zGetWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderBySubscriptionBuilderIdResponse, + zGetWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderLogsBySubscriptionBuilderIdPath, + zGetWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderLogsBySubscriptionBuilderIdResponse, + zGetWorkspacesCurrentTriggerProviderByProviderSubscriptionsListPath, + zGetWorkspacesCurrentTriggerProviderByProviderSubscriptionsListResponse, + zGetWorkspacesCurrentTriggerProviderByProviderSubscriptionsOauthAuthorizePath, + zGetWorkspacesCurrentTriggerProviderByProviderSubscriptionsOauthAuthorizeResponse, + zGetWorkspacesCurrentTriggersResponse, + zGetWorkspacesResponse, + zPatchWorkspacesCurrentEndpointsByIdBody, + zPatchWorkspacesCurrentEndpointsByIdPath, + zPatchWorkspacesCurrentEndpointsByIdResponse, + zPatchWorkspacesCurrentModelProvidersByProviderModelsDisableBody, + zPatchWorkspacesCurrentModelProvidersByProviderModelsDisablePath, + zPatchWorkspacesCurrentModelProvidersByProviderModelsDisableResponse, + zPatchWorkspacesCurrentModelProvidersByProviderModelsEnableBody, + zPatchWorkspacesCurrentModelProvidersByProviderModelsEnablePath, + zPatchWorkspacesCurrentModelProvidersByProviderModelsEnableResponse, + zPostWorkspacesCurrentDefaultModelBody, + zPostWorkspacesCurrentDefaultModelResponse, + zPostWorkspacesCurrentEndpointsBody, + zPostWorkspacesCurrentEndpointsCreateBody, + zPostWorkspacesCurrentEndpointsCreateResponse, + zPostWorkspacesCurrentEndpointsDeleteBody, + zPostWorkspacesCurrentEndpointsDeleteResponse, + zPostWorkspacesCurrentEndpointsDisableBody, + zPostWorkspacesCurrentEndpointsDisableResponse, + zPostWorkspacesCurrentEndpointsEnableBody, + zPostWorkspacesCurrentEndpointsEnableResponse, + zPostWorkspacesCurrentEndpointsResponse, + zPostWorkspacesCurrentEndpointsUpdateBody, + zPostWorkspacesCurrentEndpointsUpdateResponse, + zPostWorkspacesCurrentMembersByMemberIdOwnerTransferBody, + zPostWorkspacesCurrentMembersByMemberIdOwnerTransferPath, + zPostWorkspacesCurrentMembersByMemberIdOwnerTransferResponse, + zPostWorkspacesCurrentMembersInviteEmailBody, + zPostWorkspacesCurrentMembersInviteEmailResponse, + zPostWorkspacesCurrentMembersOwnerTransferCheckBody, + zPostWorkspacesCurrentMembersOwnerTransferCheckResponse, + zPostWorkspacesCurrentMembersSendOwnerTransferConfirmEmailBody, + zPostWorkspacesCurrentMembersSendOwnerTransferConfirmEmailResponse, + zPostWorkspacesCurrentModelProvidersByProviderCredentialsBody, + zPostWorkspacesCurrentModelProvidersByProviderCredentialsPath, + zPostWorkspacesCurrentModelProvidersByProviderCredentialsResponse, + zPostWorkspacesCurrentModelProvidersByProviderCredentialsSwitchBody, + zPostWorkspacesCurrentModelProvidersByProviderCredentialsSwitchPath, + zPostWorkspacesCurrentModelProvidersByProviderCredentialsSwitchResponse, + zPostWorkspacesCurrentModelProvidersByProviderCredentialsValidateBody, + zPostWorkspacesCurrentModelProvidersByProviderCredentialsValidatePath, + zPostWorkspacesCurrentModelProvidersByProviderCredentialsValidateResponse, + zPostWorkspacesCurrentModelProvidersByProviderModelsBody, + zPostWorkspacesCurrentModelProvidersByProviderModelsCredentialsBody, + zPostWorkspacesCurrentModelProvidersByProviderModelsCredentialsPath, + zPostWorkspacesCurrentModelProvidersByProviderModelsCredentialsResponse, + zPostWorkspacesCurrentModelProvidersByProviderModelsCredentialsSwitchBody, + zPostWorkspacesCurrentModelProvidersByProviderModelsCredentialsSwitchPath, + zPostWorkspacesCurrentModelProvidersByProviderModelsCredentialsSwitchResponse, + zPostWorkspacesCurrentModelProvidersByProviderModelsCredentialsValidateBody, + zPostWorkspacesCurrentModelProvidersByProviderModelsCredentialsValidatePath, + zPostWorkspacesCurrentModelProvidersByProviderModelsCredentialsValidateResponse, + zPostWorkspacesCurrentModelProvidersByProviderModelsLoadBalancingConfigsByConfigIdCredentialsValidateBody, + zPostWorkspacesCurrentModelProvidersByProviderModelsLoadBalancingConfigsByConfigIdCredentialsValidatePath, + zPostWorkspacesCurrentModelProvidersByProviderModelsLoadBalancingConfigsByConfigIdCredentialsValidateResponse, + zPostWorkspacesCurrentModelProvidersByProviderModelsLoadBalancingConfigsCredentialsValidateBody, + zPostWorkspacesCurrentModelProvidersByProviderModelsLoadBalancingConfigsCredentialsValidatePath, + zPostWorkspacesCurrentModelProvidersByProviderModelsLoadBalancingConfigsCredentialsValidateResponse, + zPostWorkspacesCurrentModelProvidersByProviderModelsPath, + zPostWorkspacesCurrentModelProvidersByProviderModelsResponse, + zPostWorkspacesCurrentModelProvidersByProviderPreferredProviderTypeBody, + zPostWorkspacesCurrentModelProvidersByProviderPreferredProviderTypePath, + zPostWorkspacesCurrentModelProvidersByProviderPreferredProviderTypeResponse, + zPostWorkspacesCurrentPluginInstallGithubBody, + zPostWorkspacesCurrentPluginInstallGithubResponse, + zPostWorkspacesCurrentPluginInstallMarketplaceBody, + zPostWorkspacesCurrentPluginInstallMarketplaceResponse, + zPostWorkspacesCurrentPluginInstallPkgBody, + zPostWorkspacesCurrentPluginInstallPkgResponse, + zPostWorkspacesCurrentPluginListInstallationsIdsBody, + zPostWorkspacesCurrentPluginListInstallationsIdsResponse, + zPostWorkspacesCurrentPluginListLatestVersionsBody, + zPostWorkspacesCurrentPluginListLatestVersionsResponse, + zPostWorkspacesCurrentPluginParametersDynamicOptionsWithCredentialsBody, + zPostWorkspacesCurrentPluginParametersDynamicOptionsWithCredentialsResponse, + zPostWorkspacesCurrentPluginPermissionChangeBody, + zPostWorkspacesCurrentPluginPermissionChangeResponse, + zPostWorkspacesCurrentPluginPreferencesAutoupgradeExcludeBody, + zPostWorkspacesCurrentPluginPreferencesAutoupgradeExcludeResponse, + zPostWorkspacesCurrentPluginPreferencesChangeBody, + zPostWorkspacesCurrentPluginPreferencesChangeResponse, + zPostWorkspacesCurrentPluginTasksByTaskIdDeleteByIdentifierPath, + zPostWorkspacesCurrentPluginTasksByTaskIdDeleteByIdentifierResponse, + zPostWorkspacesCurrentPluginTasksByTaskIdDeletePath, + zPostWorkspacesCurrentPluginTasksByTaskIdDeleteResponse, + zPostWorkspacesCurrentPluginTasksDeleteAllResponse, + zPostWorkspacesCurrentPluginUninstallBody, + zPostWorkspacesCurrentPluginUninstallResponse, + zPostWorkspacesCurrentPluginUpgradeGithubBody, + zPostWorkspacesCurrentPluginUpgradeGithubResponse, + zPostWorkspacesCurrentPluginUpgradeMarketplaceBody, + zPostWorkspacesCurrentPluginUpgradeMarketplaceResponse, + zPostWorkspacesCurrentPluginUploadBundleResponse, + zPostWorkspacesCurrentPluginUploadGithubBody, + zPostWorkspacesCurrentPluginUploadGithubResponse, + zPostWorkspacesCurrentPluginUploadPkgResponse, + zPostWorkspacesCurrentResponse, + zPostWorkspacesCurrentToolProviderApiAddBody, + zPostWorkspacesCurrentToolProviderApiAddResponse, + zPostWorkspacesCurrentToolProviderApiDeleteBody, + zPostWorkspacesCurrentToolProviderApiDeleteResponse, + zPostWorkspacesCurrentToolProviderApiSchemaBody, + zPostWorkspacesCurrentToolProviderApiSchemaResponse, + zPostWorkspacesCurrentToolProviderApiTestPreBody, + zPostWorkspacesCurrentToolProviderApiTestPreResponse, + zPostWorkspacesCurrentToolProviderApiUpdateBody, + zPostWorkspacesCurrentToolProviderApiUpdateResponse, + zPostWorkspacesCurrentToolProviderBuiltinByProviderAddBody, + zPostWorkspacesCurrentToolProviderBuiltinByProviderAddPath, + zPostWorkspacesCurrentToolProviderBuiltinByProviderAddResponse, + zPostWorkspacesCurrentToolProviderBuiltinByProviderDefaultCredentialBody, + zPostWorkspacesCurrentToolProviderBuiltinByProviderDefaultCredentialPath, + zPostWorkspacesCurrentToolProviderBuiltinByProviderDefaultCredentialResponse, + zPostWorkspacesCurrentToolProviderBuiltinByProviderDeleteBody, + zPostWorkspacesCurrentToolProviderBuiltinByProviderDeletePath, + zPostWorkspacesCurrentToolProviderBuiltinByProviderDeleteResponse, + zPostWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientBody, + zPostWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientPath, + zPostWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientResponse, + zPostWorkspacesCurrentToolProviderBuiltinByProviderUpdateBody, + zPostWorkspacesCurrentToolProviderBuiltinByProviderUpdatePath, + zPostWorkspacesCurrentToolProviderBuiltinByProviderUpdateResponse, + zPostWorkspacesCurrentToolProviderMcpAuthBody, + zPostWorkspacesCurrentToolProviderMcpAuthResponse, + zPostWorkspacesCurrentToolProviderMcpBody, + zPostWorkspacesCurrentToolProviderMcpResponse, + zPostWorkspacesCurrentToolProviderWorkflowCreateBody, + zPostWorkspacesCurrentToolProviderWorkflowCreateResponse, + zPostWorkspacesCurrentToolProviderWorkflowDeleteBody, + zPostWorkspacesCurrentToolProviderWorkflowDeleteResponse, + zPostWorkspacesCurrentToolProviderWorkflowUpdateBody, + zPostWorkspacesCurrentToolProviderWorkflowUpdateResponse, + zPostWorkspacesCurrentTriggerProviderByProviderOauthClientBody, + zPostWorkspacesCurrentTriggerProviderByProviderOauthClientPath, + zPostWorkspacesCurrentTriggerProviderByProviderOauthClientResponse, + zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderBuildBySubscriptionBuilderIdBody, + zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderBuildBySubscriptionBuilderIdPath, + zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderBuildBySubscriptionBuilderIdResponse, + zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderCreateBody, + zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderCreatePath, + zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderCreateResponse, + zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderUpdateBySubscriptionBuilderIdBody, + zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderUpdateBySubscriptionBuilderIdPath, + zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderUpdateBySubscriptionBuilderIdResponse, + zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderVerifyAndUpdateBySubscriptionBuilderIdBody, + zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderVerifyAndUpdateBySubscriptionBuilderIdPath, + zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderVerifyAndUpdateBySubscriptionBuilderIdResponse, + zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsVerifyBySubscriptionIdBody, + zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsVerifyBySubscriptionIdPath, + zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsVerifyBySubscriptionIdResponse, + zPostWorkspacesCurrentTriggerProviderBySubscriptionIdSubscriptionsDeletePath, + zPostWorkspacesCurrentTriggerProviderBySubscriptionIdSubscriptionsDeleteResponse, + zPostWorkspacesCurrentTriggerProviderBySubscriptionIdSubscriptionsUpdateBody, + zPostWorkspacesCurrentTriggerProviderBySubscriptionIdSubscriptionsUpdatePath, + zPostWorkspacesCurrentTriggerProviderBySubscriptionIdSubscriptionsUpdateResponse, + zPostWorkspacesCustomConfigBody, + zPostWorkspacesCustomConfigResponse, + zPostWorkspacesCustomConfigWebappLogoUploadResponse, + zPostWorkspacesInfoBody, + zPostWorkspacesInfoResponse, + zPostWorkspacesSwitchBody, + zPostWorkspacesSwitchResponse, + zPutWorkspacesCurrentMembersByMemberIdUpdateRoleBody, + zPutWorkspacesCurrentMembersByMemberIdUpdateRolePath, + zPutWorkspacesCurrentMembersByMemberIdUpdateRoleResponse, + zPutWorkspacesCurrentModelProvidersByProviderCredentialsBody, + zPutWorkspacesCurrentModelProvidersByProviderCredentialsPath, + zPutWorkspacesCurrentModelProvidersByProviderCredentialsResponse, + zPutWorkspacesCurrentModelProvidersByProviderModelsCredentialsBody, + zPutWorkspacesCurrentModelProvidersByProviderModelsCredentialsPath, + zPutWorkspacesCurrentModelProvidersByProviderModelsCredentialsResponse, + zPutWorkspacesCurrentToolProviderMcpBody, + zPutWorkspacesCurrentToolProviderMcpResponse, +} from './zod.gen' + +/** + * Get specific agent provider details + */ +export const get = oc + .route({ + description: 'Get specific agent provider details', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentAgentProviderByProviderName', + path: '/workspaces/current/agent-provider/{provider_name}', + tags: ['console'], + }) + .input(z.object({ params: zGetWorkspacesCurrentAgentProviderByProviderNamePath })) + .output(zGetWorkspacesCurrentAgentProviderByProviderNameResponse) + +export const byProviderName = { + get, +} + +export const agentProvider = { + byProviderName, +} + +/** + * Get list of available agent providers + */ +export const get2 = oc + .route({ + description: 'Get list of available agent providers', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentAgentProviders', + path: '/workspaces/current/agent-providers', + tags: ['console'], + }) + .output(zGetWorkspacesCurrentAgentProvidersResponse) + +export const agentProviders = { + get: get2, +} + +export const get3 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentDatasetOperators', + path: '/workspaces/current/dataset-operators', + tags: ['console'], + }) + .output(zGetWorkspacesCurrentDatasetOperatorsResponse) + +export const datasetOperators = { + get: get3, +} + +export const get4 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentDefaultModel', + path: '/workspaces/current/default-model', + tags: ['console'], + }) + .input(z.object({ query: zGetWorkspacesCurrentDefaultModelQuery })) + .output(zGetWorkspacesCurrentDefaultModelResponse) + +export const post = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentDefaultModel', + path: '/workspaces/current/default-model', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentDefaultModelBody })) + .output(zPostWorkspacesCurrentDefaultModelResponse) + +export const defaultModel = { + get: get4, + post, +} + +/** + * Deprecated legacy alias for creating a plugin endpoint. Use POST /workspaces/current/endpoints instead. + * + * @deprecated + */ +export const post2 = oc + .route({ + deprecated: true, + description: + 'Deprecated legacy alias for creating a plugin endpoint. Use POST /workspaces/current/endpoints instead.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentEndpointsCreate', + path: '/workspaces/current/endpoints/create', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentEndpointsCreateBody })) + .output(zPostWorkspacesCurrentEndpointsCreateResponse) + +export const create = { + post: post2, +} + +/** + * Deprecated legacy alias for deleting a plugin endpoint. Use DELETE /workspaces/current/endpoints/{id} instead. + * + * @deprecated + */ +export const post3 = oc + .route({ + deprecated: true, + description: + 'Deprecated legacy alias for deleting a plugin endpoint. Use DELETE /workspaces/current/endpoints/{id} instead.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentEndpointsDelete', + path: '/workspaces/current/endpoints/delete', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentEndpointsDeleteBody })) + .output(zPostWorkspacesCurrentEndpointsDeleteResponse) + +export const delete_ = { + post: post3, +} + +/** + * Disable a plugin endpoint + */ +export const post4 = oc + .route({ + description: 'Disable a plugin endpoint', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentEndpointsDisable', + path: '/workspaces/current/endpoints/disable', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentEndpointsDisableBody })) + .output(zPostWorkspacesCurrentEndpointsDisableResponse) + +export const disable = { + post: post4, +} + +/** + * Enable a plugin endpoint + */ +export const post5 = oc + .route({ + description: 'Enable a plugin endpoint', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentEndpointsEnable', + path: '/workspaces/current/endpoints/enable', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentEndpointsEnableBody })) + .output(zPostWorkspacesCurrentEndpointsEnableResponse) + +export const enable = { + post: post5, +} + +/** + * List endpoints for a specific plugin + */ +export const get5 = oc + .route({ + description: 'List endpoints for a specific plugin', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentEndpointsListPlugin', + path: '/workspaces/current/endpoints/list/plugin', + tags: ['console'], + }) + .input(z.object({ query: zGetWorkspacesCurrentEndpointsListPluginQuery })) + .output(zGetWorkspacesCurrentEndpointsListPluginResponse) + +export const plugin = { + get: get5, +} + +/** + * List plugin endpoints with pagination + */ +export const get6 = oc + .route({ + description: 'List plugin endpoints with pagination', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentEndpointsList', + path: '/workspaces/current/endpoints/list', + tags: ['console'], + }) + .input(z.object({ query: zGetWorkspacesCurrentEndpointsListQuery })) + .output(zGetWorkspacesCurrentEndpointsListResponse) + +export const list = { + get: get6, + plugin, +} + +/** + * Deprecated legacy alias for updating a plugin endpoint. Use PATCH /workspaces/current/endpoints/{id} instead. + * + * @deprecated + */ +export const post6 = oc + .route({ + deprecated: true, + description: + 'Deprecated legacy alias for updating a plugin endpoint. Use PATCH /workspaces/current/endpoints/{id} instead.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentEndpointsUpdate', + path: '/workspaces/current/endpoints/update', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentEndpointsUpdateBody })) + .output(zPostWorkspacesCurrentEndpointsUpdateResponse) + +export const update = { + post: post6, +} + +/** + * Delete a plugin endpoint + */ +export const delete2 = oc + .route({ + description: 'Delete a plugin endpoint', + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteWorkspacesCurrentEndpointsById', + path: '/workspaces/current/endpoints/{id}', + tags: ['console'], + }) + .input(z.object({ params: zDeleteWorkspacesCurrentEndpointsByIdPath })) + .output(zDeleteWorkspacesCurrentEndpointsByIdResponse) + +/** + * Update a plugin endpoint + */ +export const patch = oc + .route({ + description: 'Update a plugin endpoint', + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchWorkspacesCurrentEndpointsById', + path: '/workspaces/current/endpoints/{id}', + tags: ['console'], + }) + .input( + z.object({ + body: zPatchWorkspacesCurrentEndpointsByIdBody, + params: zPatchWorkspacesCurrentEndpointsByIdPath, + }), + ) + .output(zPatchWorkspacesCurrentEndpointsByIdResponse) + +export const byId = { + delete: delete2, + patch, +} + +/** + * Create a new plugin endpoint + */ +export const post7 = oc + .route({ + description: 'Create a new plugin endpoint', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentEndpoints', + path: '/workspaces/current/endpoints', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentEndpointsBody })) + .output(zPostWorkspacesCurrentEndpointsResponse) + +export const endpoints = { + post: post7, + create, + delete: delete_, + disable, + enable, + list, + update, + byId, +} + +export const post8 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentMembersInviteEmail', + path: '/workspaces/current/members/invite-email', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentMembersInviteEmailBody })) + .output(zPostWorkspacesCurrentMembersInviteEmailResponse) + +export const inviteEmail = { + post: post8, +} + +export const post9 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentMembersOwnerTransferCheck', + path: '/workspaces/current/members/owner-transfer-check', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentMembersOwnerTransferCheckBody })) + .output(zPostWorkspacesCurrentMembersOwnerTransferCheckResponse) + +export const ownerTransferCheck = { + post: post9, +} + +export const post10 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentMembersSendOwnerTransferConfirmEmail', + path: '/workspaces/current/members/send-owner-transfer-confirm-email', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentMembersSendOwnerTransferConfirmEmailBody })) + .output(zPostWorkspacesCurrentMembersSendOwnerTransferConfirmEmailResponse) + +export const sendOwnerTransferConfirmEmail = { + post: post10, +} + +export const post11 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentMembersByMemberIdOwnerTransfer', + path: '/workspaces/current/members/{member_id}/owner-transfer', + tags: ['console'], + }) + .input( + z.object({ + body: zPostWorkspacesCurrentMembersByMemberIdOwnerTransferBody, + params: zPostWorkspacesCurrentMembersByMemberIdOwnerTransferPath, + }), + ) + .output(zPostWorkspacesCurrentMembersByMemberIdOwnerTransferResponse) + +export const ownerTransfer = { + post: post11, +} + +export const put = oc + .route({ + inputStructure: 'detailed', + method: 'PUT', + operationId: 'putWorkspacesCurrentMembersByMemberIdUpdateRole', + path: '/workspaces/current/members/{member_id}/update-role', + tags: ['console'], + }) + .input( + z.object({ + body: zPutWorkspacesCurrentMembersByMemberIdUpdateRoleBody, + params: zPutWorkspacesCurrentMembersByMemberIdUpdateRolePath, + }), + ) + .output(zPutWorkspacesCurrentMembersByMemberIdUpdateRoleResponse) + +export const updateRole = { + put, +} + +export const delete3 = oc + .route({ + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteWorkspacesCurrentMembersByMemberId', + path: '/workspaces/current/members/{member_id}', + tags: ['console'], + }) + .input(z.object({ params: zDeleteWorkspacesCurrentMembersByMemberIdPath })) + .output(zDeleteWorkspacesCurrentMembersByMemberIdResponse) + +export const byMemberId = { + delete: delete3, + ownerTransfer, + updateRole, +} + +export const get7 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentMembers', + path: '/workspaces/current/members', + tags: ['console'], + }) + .output(zGetWorkspacesCurrentMembersResponse) + +export const members = { + get: get7, + inviteEmail, + ownerTransferCheck, + sendOwnerTransferConfirmEmail, + byMemberId, +} + +export const get8 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentModelProvidersByProviderCheckoutUrl', + path: '/workspaces/current/model-providers/{provider}/checkout-url', + tags: ['console'], + }) + .input(z.object({ params: zGetWorkspacesCurrentModelProvidersByProviderCheckoutUrlPath })) + .output(zGetWorkspacesCurrentModelProvidersByProviderCheckoutUrlResponse) + +export const checkoutUrl = { + get: get8, +} + +export const post12 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentModelProvidersByProviderCredentialsSwitch', + path: '/workspaces/current/model-providers/{provider}/credentials/switch', + tags: ['console'], + }) + .input( + z.object({ + body: zPostWorkspacesCurrentModelProvidersByProviderCredentialsSwitchBody, + params: zPostWorkspacesCurrentModelProvidersByProviderCredentialsSwitchPath, + }), + ) + .output(zPostWorkspacesCurrentModelProvidersByProviderCredentialsSwitchResponse) + +export const switch_ = { + post: post12, +} + +export const post13 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentModelProvidersByProviderCredentialsValidate', + path: '/workspaces/current/model-providers/{provider}/credentials/validate', + tags: ['console'], + }) + .input( + z.object({ + body: zPostWorkspacesCurrentModelProvidersByProviderCredentialsValidateBody, + params: zPostWorkspacesCurrentModelProvidersByProviderCredentialsValidatePath, + }), + ) + .output(zPostWorkspacesCurrentModelProvidersByProviderCredentialsValidateResponse) + +export const validate = { + post: post13, +} + +export const delete4 = oc + .route({ + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteWorkspacesCurrentModelProvidersByProviderCredentials', + path: '/workspaces/current/model-providers/{provider}/credentials', + tags: ['console'], + }) + .input( + z.object({ + body: zDeleteWorkspacesCurrentModelProvidersByProviderCredentialsBody, + params: zDeleteWorkspacesCurrentModelProvidersByProviderCredentialsPath, + }), + ) + .output(zDeleteWorkspacesCurrentModelProvidersByProviderCredentialsResponse) + +export const get9 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentModelProvidersByProviderCredentials', + path: '/workspaces/current/model-providers/{provider}/credentials', + tags: ['console'], + }) + .input( + z.object({ + params: zGetWorkspacesCurrentModelProvidersByProviderCredentialsPath, + query: zGetWorkspacesCurrentModelProvidersByProviderCredentialsQuery.optional(), + }), + ) + .output(zGetWorkspacesCurrentModelProvidersByProviderCredentialsResponse) + +export const post14 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentModelProvidersByProviderCredentials', + path: '/workspaces/current/model-providers/{provider}/credentials', + tags: ['console'], + }) + .input( + z.object({ + body: zPostWorkspacesCurrentModelProvidersByProviderCredentialsBody, + params: zPostWorkspacesCurrentModelProvidersByProviderCredentialsPath, + }), + ) + .output(zPostWorkspacesCurrentModelProvidersByProviderCredentialsResponse) + +export const put2 = oc + .route({ + inputStructure: 'detailed', + method: 'PUT', + operationId: 'putWorkspacesCurrentModelProvidersByProviderCredentials', + path: '/workspaces/current/model-providers/{provider}/credentials', + tags: ['console'], + }) + .input( + z.object({ + body: zPutWorkspacesCurrentModelProvidersByProviderCredentialsBody, + params: zPutWorkspacesCurrentModelProvidersByProviderCredentialsPath, + }), + ) + .output(zPutWorkspacesCurrentModelProvidersByProviderCredentialsResponse) + +export const credentials = { + delete: delete4, + get: get9, + post: post14, + put: put2, + switch: switch_, + validate, +} + +export const post15 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentModelProvidersByProviderModelsCredentialsSwitch', + path: '/workspaces/current/model-providers/{provider}/models/credentials/switch', + tags: ['console'], + }) + .input( + z.object({ + body: zPostWorkspacesCurrentModelProvidersByProviderModelsCredentialsSwitchBody, + params: zPostWorkspacesCurrentModelProvidersByProviderModelsCredentialsSwitchPath, + }), + ) + .output(zPostWorkspacesCurrentModelProvidersByProviderModelsCredentialsSwitchResponse) + +export const switch2 = { + post: post15, +} + +export const post16 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentModelProvidersByProviderModelsCredentialsValidate', + path: '/workspaces/current/model-providers/{provider}/models/credentials/validate', + tags: ['console'], + }) + .input( + z.object({ + body: zPostWorkspacesCurrentModelProvidersByProviderModelsCredentialsValidateBody, + params: zPostWorkspacesCurrentModelProvidersByProviderModelsCredentialsValidatePath, + }), + ) + .output(zPostWorkspacesCurrentModelProvidersByProviderModelsCredentialsValidateResponse) + +export const validate2 = { + post: post16, +} + +export const delete5 = oc + .route({ + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteWorkspacesCurrentModelProvidersByProviderModelsCredentials', + path: '/workspaces/current/model-providers/{provider}/models/credentials', + tags: ['console'], + }) + .input( + z.object({ + body: zDeleteWorkspacesCurrentModelProvidersByProviderModelsCredentialsBody, + params: zDeleteWorkspacesCurrentModelProvidersByProviderModelsCredentialsPath, + }), + ) + .output(zDeleteWorkspacesCurrentModelProvidersByProviderModelsCredentialsResponse) + +export const get10 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentModelProvidersByProviderModelsCredentials', + path: '/workspaces/current/model-providers/{provider}/models/credentials', + tags: ['console'], + }) + .input( + z.object({ + params: zGetWorkspacesCurrentModelProvidersByProviderModelsCredentialsPath, + query: zGetWorkspacesCurrentModelProvidersByProviderModelsCredentialsQuery, + }), + ) + .output(zGetWorkspacesCurrentModelProvidersByProviderModelsCredentialsResponse) + +export const post17 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentModelProvidersByProviderModelsCredentials', + path: '/workspaces/current/model-providers/{provider}/models/credentials', + tags: ['console'], + }) + .input( + z.object({ + body: zPostWorkspacesCurrentModelProvidersByProviderModelsCredentialsBody, + params: zPostWorkspacesCurrentModelProvidersByProviderModelsCredentialsPath, + }), + ) + .output(zPostWorkspacesCurrentModelProvidersByProviderModelsCredentialsResponse) + +export const put3 = oc + .route({ + inputStructure: 'detailed', + method: 'PUT', + operationId: 'putWorkspacesCurrentModelProvidersByProviderModelsCredentials', + path: '/workspaces/current/model-providers/{provider}/models/credentials', + tags: ['console'], + }) + .input( + z.object({ + body: zPutWorkspacesCurrentModelProvidersByProviderModelsCredentialsBody, + params: zPutWorkspacesCurrentModelProvidersByProviderModelsCredentialsPath, + }), + ) + .output(zPutWorkspacesCurrentModelProvidersByProviderModelsCredentialsResponse) + +export const credentials2 = { + delete: delete5, + get: get10, + post: post17, + put: put3, + switch: switch2, + validate: validate2, +} + +export const patch2 = oc + .route({ + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchWorkspacesCurrentModelProvidersByProviderModelsDisable', + path: '/workspaces/current/model-providers/{provider}/models/disable', + tags: ['console'], + }) + .input( + z.object({ + body: zPatchWorkspacesCurrentModelProvidersByProviderModelsDisableBody, + params: zPatchWorkspacesCurrentModelProvidersByProviderModelsDisablePath, + }), + ) + .output(zPatchWorkspacesCurrentModelProvidersByProviderModelsDisableResponse) + +export const disable2 = { + patch: patch2, +} + +export const patch3 = oc + .route({ + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchWorkspacesCurrentModelProvidersByProviderModelsEnable', + path: '/workspaces/current/model-providers/{provider}/models/enable', + tags: ['console'], + }) + .input( + z.object({ + body: zPatchWorkspacesCurrentModelProvidersByProviderModelsEnableBody, + params: zPatchWorkspacesCurrentModelProvidersByProviderModelsEnablePath, + }), + ) + .output(zPatchWorkspacesCurrentModelProvidersByProviderModelsEnableResponse) + +export const enable2 = { + patch: patch3, +} + +export const post18 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: + 'postWorkspacesCurrentModelProvidersByProviderModelsLoadBalancingConfigsCredentialsValidate', + path: '/workspaces/current/model-providers/{provider}/models/load-balancing-configs/credentials-validate', + tags: ['console'], + }) + .input( + z.object({ + body: zPostWorkspacesCurrentModelProvidersByProviderModelsLoadBalancingConfigsCredentialsValidateBody, + params: + zPostWorkspacesCurrentModelProvidersByProviderModelsLoadBalancingConfigsCredentialsValidatePath, + }), + ) + .output( + zPostWorkspacesCurrentModelProvidersByProviderModelsLoadBalancingConfigsCredentialsValidateResponse, + ) + +export const credentialsValidate = { + post: post18, +} + +export const post19 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: + 'postWorkspacesCurrentModelProvidersByProviderModelsLoadBalancingConfigsByConfigIdCredentialsValidate', + path: '/workspaces/current/model-providers/{provider}/models/load-balancing-configs/{config_id}/credentials-validate', + tags: ['console'], + }) + .input( + z.object({ + body: zPostWorkspacesCurrentModelProvidersByProviderModelsLoadBalancingConfigsByConfigIdCredentialsValidateBody, + params: + zPostWorkspacesCurrentModelProvidersByProviderModelsLoadBalancingConfigsByConfigIdCredentialsValidatePath, + }), + ) + .output( + zPostWorkspacesCurrentModelProvidersByProviderModelsLoadBalancingConfigsByConfigIdCredentialsValidateResponse, + ) + +export const credentialsValidate2 = { + post: post19, +} + +export const byConfigId = { + credentialsValidate: credentialsValidate2, +} + +export const loadBalancingConfigs = { + credentialsValidate, + byConfigId, +} + +export const get11 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentModelProvidersByProviderModelsParameterRules', + path: '/workspaces/current/model-providers/{provider}/models/parameter-rules', + tags: ['console'], + }) + .input( + z.object({ + params: zGetWorkspacesCurrentModelProvidersByProviderModelsParameterRulesPath, + query: zGetWorkspacesCurrentModelProvidersByProviderModelsParameterRulesQuery, + }), + ) + .output(zGetWorkspacesCurrentModelProvidersByProviderModelsParameterRulesResponse) + +export const parameterRules = { + get: get11, +} + +export const delete6 = oc + .route({ + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteWorkspacesCurrentModelProvidersByProviderModels', + path: '/workspaces/current/model-providers/{provider}/models', + tags: ['console'], + }) + .input( + z.object({ + body: zDeleteWorkspacesCurrentModelProvidersByProviderModelsBody, + params: zDeleteWorkspacesCurrentModelProvidersByProviderModelsPath, + }), + ) + .output(zDeleteWorkspacesCurrentModelProvidersByProviderModelsResponse) + +export const get12 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentModelProvidersByProviderModels', + path: '/workspaces/current/model-providers/{provider}/models', + tags: ['console'], + }) + .input(z.object({ params: zGetWorkspacesCurrentModelProvidersByProviderModelsPath })) + .output(zGetWorkspacesCurrentModelProvidersByProviderModelsResponse) + +export const post20 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentModelProvidersByProviderModels', + path: '/workspaces/current/model-providers/{provider}/models', + tags: ['console'], + }) + .input( + z.object({ + body: zPostWorkspacesCurrentModelProvidersByProviderModelsBody, + params: zPostWorkspacesCurrentModelProvidersByProviderModelsPath, + }), + ) + .output(zPostWorkspacesCurrentModelProvidersByProviderModelsResponse) + +export const models = { + delete: delete6, + get: get12, + post: post20, + credentials: credentials2, + disable: disable2, + enable: enable2, + loadBalancingConfigs, + parameterRules, +} + +export const post21 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentModelProvidersByProviderPreferredProviderType', + path: '/workspaces/current/model-providers/{provider}/preferred-provider-type', + tags: ['console'], + }) + .input( + z.object({ + body: zPostWorkspacesCurrentModelProvidersByProviderPreferredProviderTypeBody, + params: zPostWorkspacesCurrentModelProvidersByProviderPreferredProviderTypePath, + }), + ) + .output(zPostWorkspacesCurrentModelProvidersByProviderPreferredProviderTypeResponse) + +export const preferredProviderType = { + post: post21, +} + +export const byProvider = { + checkoutUrl, + credentials, + models, + preferredProviderType, +} + +export const get13 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentModelProviders', + path: '/workspaces/current/model-providers', + tags: ['console'], + }) + .input(z.object({ query: zGetWorkspacesCurrentModelProvidersQuery.optional() })) + .output(zGetWorkspacesCurrentModelProvidersResponse) + +export const modelProviders = { + get: get13, + byProvider, +} + +export const get14 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentModelsModelTypesByModelType', + path: '/workspaces/current/models/model-types/{model_type}', + tags: ['console'], + }) + .input(z.object({ params: zGetWorkspacesCurrentModelsModelTypesByModelTypePath })) + .output(zGetWorkspacesCurrentModelsModelTypesByModelTypeResponse) + +export const byModelType = { + get: get14, +} + +export const modelTypes = { + byModelType, +} + +export const models2 = { + modelTypes, +} + +/** + * Get workspace permission settings + * + * Returns permission flags that control workspace features like member invitations and owner transfer. + */ +export const get15 = oc + .route({ + description: + 'Returns permission flags that control workspace features like member invitations and owner transfer.', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentPermission', + path: '/workspaces/current/permission', + summary: 'Get workspace permission settings', + tags: ['console'], + }) + .output(zGetWorkspacesCurrentPermissionResponse) + +export const permission = { + get: get15, +} + +export const get16 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentPluginAsset', + path: '/workspaces/current/plugin/asset', + tags: ['console'], + }) + .input(z.object({ query: zGetWorkspacesCurrentPluginAssetQuery })) + .output(zGetWorkspacesCurrentPluginAssetResponse) + +export const asset = { + get: get16, +} + +export const get17 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentPluginDebuggingKey', + path: '/workspaces/current/plugin/debugging-key', + tags: ['console'], + }) + .output(zGetWorkspacesCurrentPluginDebuggingKeyResponse) + +export const debuggingKey = { + get: get17, +} + +export const get18 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentPluginFetchManifest', + path: '/workspaces/current/plugin/fetch-manifest', + tags: ['console'], + }) + .input(z.object({ query: zGetWorkspacesCurrentPluginFetchManifestQuery })) + .output(zGetWorkspacesCurrentPluginFetchManifestResponse) + +export const fetchManifest = { + get: get18, +} + +export const get19 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentPluginIcon', + path: '/workspaces/current/plugin/icon', + tags: ['console'], + }) + .input(z.object({ query: zGetWorkspacesCurrentPluginIconQuery })) + .output(zGetWorkspacesCurrentPluginIconResponse) + +export const icon = { + get: get19, +} + +export const post22 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentPluginInstallGithub', + path: '/workspaces/current/plugin/install/github', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentPluginInstallGithubBody })) + .output(zPostWorkspacesCurrentPluginInstallGithubResponse) + +export const github = { + post: post22, +} + +export const post23 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentPluginInstallMarketplace', + path: '/workspaces/current/plugin/install/marketplace', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentPluginInstallMarketplaceBody })) + .output(zPostWorkspacesCurrentPluginInstallMarketplaceResponse) + +export const marketplace = { + post: post23, +} + +export const post24 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentPluginInstallPkg', + path: '/workspaces/current/plugin/install/pkg', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentPluginInstallPkgBody })) + .output(zPostWorkspacesCurrentPluginInstallPkgResponse) + +export const pkg = { + post: post24, +} + +export const install = { + github, + marketplace, + pkg, +} + +export const post25 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentPluginListInstallationsIds', + path: '/workspaces/current/plugin/list/installations/ids', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentPluginListInstallationsIdsBody })) + .output(zPostWorkspacesCurrentPluginListInstallationsIdsResponse) + +export const ids = { + post: post25, +} + +export const installations = { + ids, +} + +export const post26 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentPluginListLatestVersions', + path: '/workspaces/current/plugin/list/latest-versions', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentPluginListLatestVersionsBody })) + .output(zPostWorkspacesCurrentPluginListLatestVersionsResponse) + +export const latestVersions = { + post: post26, +} + +export const get20 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentPluginList', + path: '/workspaces/current/plugin/list', + tags: ['console'], + }) + .input(z.object({ query: zGetWorkspacesCurrentPluginListQuery.optional() })) + .output(zGetWorkspacesCurrentPluginListResponse) + +export const list2 = { + get: get20, + installations, + latestVersions, +} + +export const get21 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentPluginMarketplacePkg', + path: '/workspaces/current/plugin/marketplace/pkg', + tags: ['console'], + }) + .input(z.object({ query: zGetWorkspacesCurrentPluginMarketplacePkgQuery })) + .output(zGetWorkspacesCurrentPluginMarketplacePkgResponse) + +export const pkg2 = { + get: get21, +} + +export const marketplace2 = { + pkg: pkg2, +} + +export const get22 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentPluginParametersDynamicOptions', + path: '/workspaces/current/plugin/parameters/dynamic-options', + tags: ['console'], + }) + .input(z.object({ query: zGetWorkspacesCurrentPluginParametersDynamicOptionsQuery })) + .output(zGetWorkspacesCurrentPluginParametersDynamicOptionsResponse) + +export const dynamicOptions = { + get: get22, +} + +/** + * Fetch dynamic options using credentials directly (for edit mode) + */ +export const post27 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentPluginParametersDynamicOptionsWithCredentials', + path: '/workspaces/current/plugin/parameters/dynamic-options-with-credentials', + summary: 'Fetch dynamic options using credentials directly (for edit mode)', + tags: ['console'], + }) + .input( + z.object({ body: zPostWorkspacesCurrentPluginParametersDynamicOptionsWithCredentialsBody }), + ) + .output(zPostWorkspacesCurrentPluginParametersDynamicOptionsWithCredentialsResponse) + +export const dynamicOptionsWithCredentials = { + post: post27, +} + +export const parameters = { + dynamicOptions, + dynamicOptionsWithCredentials, +} + +export const post28 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentPluginPermissionChange', + path: '/workspaces/current/plugin/permission/change', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentPluginPermissionChangeBody })) + .output(zPostWorkspacesCurrentPluginPermissionChangeResponse) + +export const change = { + post: post28, +} + +export const get23 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentPluginPermissionFetch', + path: '/workspaces/current/plugin/permission/fetch', + tags: ['console'], + }) + .output(zGetWorkspacesCurrentPluginPermissionFetchResponse) + +export const fetch_ = { + get: get23, +} + +export const permission2 = { + change, + fetch: fetch_, +} + +export const post29 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentPluginPreferencesAutoupgradeExclude', + path: '/workspaces/current/plugin/preferences/autoupgrade/exclude', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentPluginPreferencesAutoupgradeExcludeBody })) + .output(zPostWorkspacesCurrentPluginPreferencesAutoupgradeExcludeResponse) + +export const exclude = { + post: post29, +} + +export const autoupgrade = { + exclude, +} + +export const post30 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentPluginPreferencesChange', + path: '/workspaces/current/plugin/preferences/change', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentPluginPreferencesChangeBody })) + .output(zPostWorkspacesCurrentPluginPreferencesChangeResponse) + +export const change2 = { + post: post30, +} + +export const get24 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentPluginPreferencesFetch', + path: '/workspaces/current/plugin/preferences/fetch', + tags: ['console'], + }) + .output(zGetWorkspacesCurrentPluginPreferencesFetchResponse) + +export const fetch2 = { + get: get24, +} + +export const preferences = { + autoupgrade, + change: change2, + fetch: fetch2, +} + +export const get25 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentPluginReadme', + path: '/workspaces/current/plugin/readme', + tags: ['console'], + }) + .input(z.object({ query: zGetWorkspacesCurrentPluginReadmeQuery })) + .output(zGetWorkspacesCurrentPluginReadmeResponse) + +export const readme = { + get: get25, +} + +export const post31 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentPluginTasksDeleteAll', + path: '/workspaces/current/plugin/tasks/delete_all', + tags: ['console'], + }) + .output(zPostWorkspacesCurrentPluginTasksDeleteAllResponse) + +export const deleteAll = { + post: post31, +} + +export const post32 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentPluginTasksByTaskIdDeleteByIdentifier', + path: '/workspaces/current/plugin/tasks/{task_id}/delete/{identifier}', + tags: ['console'], + }) + .input(z.object({ params: zPostWorkspacesCurrentPluginTasksByTaskIdDeleteByIdentifierPath })) + .output(zPostWorkspacesCurrentPluginTasksByTaskIdDeleteByIdentifierResponse) + +export const byIdentifier = { + post: post32, +} + +export const post33 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentPluginTasksByTaskIdDelete', + path: '/workspaces/current/plugin/tasks/{task_id}/delete', + tags: ['console'], + }) + .input(z.object({ params: zPostWorkspacesCurrentPluginTasksByTaskIdDeletePath })) + .output(zPostWorkspacesCurrentPluginTasksByTaskIdDeleteResponse) + +export const delete7 = { + post: post33, + byIdentifier, +} + +export const get26 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentPluginTasksByTaskId', + path: '/workspaces/current/plugin/tasks/{task_id}', + tags: ['console'], + }) + .input(z.object({ params: zGetWorkspacesCurrentPluginTasksByTaskIdPath })) + .output(zGetWorkspacesCurrentPluginTasksByTaskIdResponse) + +export const byTaskId = { + get: get26, + delete: delete7, +} + +export const get27 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentPluginTasks', + path: '/workspaces/current/plugin/tasks', + tags: ['console'], + }) + .input(z.object({ query: zGetWorkspacesCurrentPluginTasksQuery.optional() })) + .output(zGetWorkspacesCurrentPluginTasksResponse) + +export const tasks = { + get: get27, + deleteAll, + byTaskId, +} + +export const post34 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentPluginUninstall', + path: '/workspaces/current/plugin/uninstall', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentPluginUninstallBody })) + .output(zPostWorkspacesCurrentPluginUninstallResponse) + +export const uninstall = { + post: post34, +} + +export const post35 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentPluginUpgradeGithub', + path: '/workspaces/current/plugin/upgrade/github', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentPluginUpgradeGithubBody })) + .output(zPostWorkspacesCurrentPluginUpgradeGithubResponse) + +export const github2 = { + post: post35, +} + +export const post36 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentPluginUpgradeMarketplace', + path: '/workspaces/current/plugin/upgrade/marketplace', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentPluginUpgradeMarketplaceBody })) + .output(zPostWorkspacesCurrentPluginUpgradeMarketplaceResponse) + +export const marketplace3 = { + post: post36, +} + +export const upgrade = { + github: github2, + marketplace: marketplace3, +} + +export const post37 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentPluginUploadBundle', + path: '/workspaces/current/plugin/upload/bundle', + tags: ['console'], + }) + .output(zPostWorkspacesCurrentPluginUploadBundleResponse) + +export const bundle = { + post: post37, +} + +export const post38 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentPluginUploadGithub', + path: '/workspaces/current/plugin/upload/github', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentPluginUploadGithubBody })) + .output(zPostWorkspacesCurrentPluginUploadGithubResponse) + +export const github3 = { + post: post38, +} + +export const post39 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentPluginUploadPkg', + path: '/workspaces/current/plugin/upload/pkg', + tags: ['console'], + }) + .output(zPostWorkspacesCurrentPluginUploadPkgResponse) + +export const pkg3 = { + post: post39, +} + +export const upload = { + bundle, + github: github3, + pkg: pkg3, +} + +export const plugin2 = { + asset, + debuggingKey, + fetchManifest, + icon, + install, + list: list2, + marketplace: marketplace2, + parameters, + permission: permission2, + preferences, + readme, + tasks, + uninstall, + upgrade, + upload, +} + +export const get28 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentToolLabels', + path: '/workspaces/current/tool-labels', + tags: ['console'], + }) + .output(zGetWorkspacesCurrentToolLabelsResponse) + +export const toolLabels = { + get: get28, +} + +export const post40 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentToolProviderApiAdd', + path: '/workspaces/current/tool-provider/api/add', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentToolProviderApiAddBody })) + .output(zPostWorkspacesCurrentToolProviderApiAddResponse) + +export const add = { + post: post40, +} + +export const post41 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentToolProviderApiDelete', + path: '/workspaces/current/tool-provider/api/delete', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentToolProviderApiDeleteBody })) + .output(zPostWorkspacesCurrentToolProviderApiDeleteResponse) + +export const delete8 = { + post: post41, +} + +export const get29 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentToolProviderApiGet', + path: '/workspaces/current/tool-provider/api/get', + tags: ['console'], + }) + .output(zGetWorkspacesCurrentToolProviderApiGetResponse) + +export const get30 = { + get: get29, +} + +export const get31 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentToolProviderApiRemote', + path: '/workspaces/current/tool-provider/api/remote', + tags: ['console'], + }) + .output(zGetWorkspacesCurrentToolProviderApiRemoteResponse) + +export const remote = { + get: get31, +} + +export const post42 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentToolProviderApiSchema', + path: '/workspaces/current/tool-provider/api/schema', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentToolProviderApiSchemaBody })) + .output(zPostWorkspacesCurrentToolProviderApiSchemaResponse) + +export const schema = { + post: post42, +} + +export const post43 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentToolProviderApiTestPre', + path: '/workspaces/current/tool-provider/api/test/pre', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentToolProviderApiTestPreBody })) + .output(zPostWorkspacesCurrentToolProviderApiTestPreResponse) + +export const pre = { + post: post43, +} + +export const test = { + pre, +} + +export const get32 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentToolProviderApiTools', + path: '/workspaces/current/tool-provider/api/tools', + tags: ['console'], + }) + .output(zGetWorkspacesCurrentToolProviderApiToolsResponse) + +export const tools = { + get: get32, +} + +export const post44 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentToolProviderApiUpdate', + path: '/workspaces/current/tool-provider/api/update', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentToolProviderApiUpdateBody })) + .output(zPostWorkspacesCurrentToolProviderApiUpdateResponse) + +export const update2 = { + post: post44, +} + +export const api = { + add, + delete: delete8, + get: get30, + remote, + schema, + test, + tools, + update: update2, +} + +export const post45 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentToolProviderBuiltinByProviderAdd', + path: '/workspaces/current/tool-provider/builtin/{provider}/add', + tags: ['console'], + }) + .input( + z.object({ + body: zPostWorkspacesCurrentToolProviderBuiltinByProviderAddBody, + params: zPostWorkspacesCurrentToolProviderBuiltinByProviderAddPath, + }), + ) + .output(zPostWorkspacesCurrentToolProviderBuiltinByProviderAddResponse) + +export const add2 = { + post: post45, +} + +export const get33 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentToolProviderBuiltinByProviderCredentialInfo', + path: '/workspaces/current/tool-provider/builtin/{provider}/credential/info', + tags: ['console'], + }) + .input(z.object({ params: zGetWorkspacesCurrentToolProviderBuiltinByProviderCredentialInfoPath })) + .output(zGetWorkspacesCurrentToolProviderBuiltinByProviderCredentialInfoResponse) + +export const info = { + get: get33, +} + +export const get34 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: + 'getWorkspacesCurrentToolProviderBuiltinByProviderCredentialSchemaByCredentialType', + path: '/workspaces/current/tool-provider/builtin/{provider}/credential/schema/{credential_type}', + tags: ['console'], + }) + .input( + z.object({ + params: + zGetWorkspacesCurrentToolProviderBuiltinByProviderCredentialSchemaByCredentialTypePath, + }), + ) + .output( + zGetWorkspacesCurrentToolProviderBuiltinByProviderCredentialSchemaByCredentialTypeResponse, + ) + +export const byCredentialType = { + get: get34, +} + +export const schema2 = { + byCredentialType, +} + +export const credential = { + info, + schema: schema2, +} + +export const get35 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentToolProviderBuiltinByProviderCredentials', + path: '/workspaces/current/tool-provider/builtin/{provider}/credentials', + tags: ['console'], + }) + .input(z.object({ params: zGetWorkspacesCurrentToolProviderBuiltinByProviderCredentialsPath })) + .output(zGetWorkspacesCurrentToolProviderBuiltinByProviderCredentialsResponse) + +export const credentials3 = { + get: get35, +} + +export const post46 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentToolProviderBuiltinByProviderDefaultCredential', + path: '/workspaces/current/tool-provider/builtin/{provider}/default-credential', + tags: ['console'], + }) + .input( + z.object({ + body: zPostWorkspacesCurrentToolProviderBuiltinByProviderDefaultCredentialBody, + params: zPostWorkspacesCurrentToolProviderBuiltinByProviderDefaultCredentialPath, + }), + ) + .output(zPostWorkspacesCurrentToolProviderBuiltinByProviderDefaultCredentialResponse) + +export const defaultCredential = { + post: post46, +} + +export const post47 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentToolProviderBuiltinByProviderDelete', + path: '/workspaces/current/tool-provider/builtin/{provider}/delete', + tags: ['console'], + }) + .input( + z.object({ + body: zPostWorkspacesCurrentToolProviderBuiltinByProviderDeleteBody, + params: zPostWorkspacesCurrentToolProviderBuiltinByProviderDeletePath, + }), + ) + .output(zPostWorkspacesCurrentToolProviderBuiltinByProviderDeleteResponse) + +export const delete9 = { + post: post47, +} + +export const get36 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentToolProviderBuiltinByProviderIcon', + path: '/workspaces/current/tool-provider/builtin/{provider}/icon', + tags: ['console'], + }) + .input(z.object({ params: zGetWorkspacesCurrentToolProviderBuiltinByProviderIconPath })) + .output(zGetWorkspacesCurrentToolProviderBuiltinByProviderIconResponse) + +export const icon2 = { + get: get36, +} + +export const get37 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentToolProviderBuiltinByProviderInfo', + path: '/workspaces/current/tool-provider/builtin/{provider}/info', + tags: ['console'], + }) + .input(z.object({ params: zGetWorkspacesCurrentToolProviderBuiltinByProviderInfoPath })) + .output(zGetWorkspacesCurrentToolProviderBuiltinByProviderInfoResponse) + +export const info2 = { + get: get37, +} + +export const get38 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentToolProviderBuiltinByProviderOauthClientSchema', + path: '/workspaces/current/tool-provider/builtin/{provider}/oauth/client-schema', + tags: ['console'], + }) + .input( + z.object({ params: zGetWorkspacesCurrentToolProviderBuiltinByProviderOauthClientSchemaPath }), + ) + .output(zGetWorkspacesCurrentToolProviderBuiltinByProviderOauthClientSchemaResponse) + +export const clientSchema = { + get: get38, +} + +export const delete10 = oc + .route({ + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClient', + path: '/workspaces/current/tool-provider/builtin/{provider}/oauth/custom-client', + tags: ['console'], + }) + .input( + z.object({ + params: zDeleteWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientPath, + }), + ) + .output(zDeleteWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientResponse) + +export const get39 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClient', + path: '/workspaces/current/tool-provider/builtin/{provider}/oauth/custom-client', + tags: ['console'], + }) + .input( + z.object({ params: zGetWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientPath }), + ) + .output(zGetWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientResponse) + +export const post48 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClient', + path: '/workspaces/current/tool-provider/builtin/{provider}/oauth/custom-client', + tags: ['console'], + }) + .input( + z.object({ + body: zPostWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientBody, + params: zPostWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientPath, + }), + ) + .output(zPostWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientResponse) + +export const customClient = { + delete: delete10, + get: get39, + post: post48, +} + +export const oauth = { + clientSchema, + customClient, +} + +export const get40 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentToolProviderBuiltinByProviderTools', + path: '/workspaces/current/tool-provider/builtin/{provider}/tools', + tags: ['console'], + }) + .input(z.object({ params: zGetWorkspacesCurrentToolProviderBuiltinByProviderToolsPath })) + .output(zGetWorkspacesCurrentToolProviderBuiltinByProviderToolsResponse) + +export const tools2 = { + get: get40, +} + +export const post49 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentToolProviderBuiltinByProviderUpdate', + path: '/workspaces/current/tool-provider/builtin/{provider}/update', + tags: ['console'], + }) + .input( + z.object({ + body: zPostWorkspacesCurrentToolProviderBuiltinByProviderUpdateBody, + params: zPostWorkspacesCurrentToolProviderBuiltinByProviderUpdatePath, + }), + ) + .output(zPostWorkspacesCurrentToolProviderBuiltinByProviderUpdateResponse) + +export const update3 = { + post: post49, +} + +export const byProvider2 = { + add: add2, + credential, + credentials: credentials3, + defaultCredential, + delete: delete9, + icon: icon2, + info: info2, + oauth, + tools: tools2, + update: update3, +} + +export const builtin = { + byProvider: byProvider2, +} + +export const post50 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentToolProviderMcpAuth', + path: '/workspaces/current/tool-provider/mcp/auth', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentToolProviderMcpAuthBody })) + .output(zPostWorkspacesCurrentToolProviderMcpAuthResponse) + +export const auth = { + post: post50, +} + +export const get41 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentToolProviderMcpToolsByProviderId', + path: '/workspaces/current/tool-provider/mcp/tools/{provider_id}', + tags: ['console'], + }) + .input(z.object({ params: zGetWorkspacesCurrentToolProviderMcpToolsByProviderIdPath })) + .output(zGetWorkspacesCurrentToolProviderMcpToolsByProviderIdResponse) + +export const byProviderId = { + get: get41, +} + +export const tools3 = { + byProviderId, +} + +export const get42 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentToolProviderMcpUpdateByProviderId', + path: '/workspaces/current/tool-provider/mcp/update/{provider_id}', + tags: ['console'], + }) + .input(z.object({ params: zGetWorkspacesCurrentToolProviderMcpUpdateByProviderIdPath })) + .output(zGetWorkspacesCurrentToolProviderMcpUpdateByProviderIdResponse) + +export const byProviderId2 = { + get: get42, +} + +export const update4 = { + byProviderId: byProviderId2, +} + +export const delete11 = oc + .route({ + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteWorkspacesCurrentToolProviderMcp', + path: '/workspaces/current/tool-provider/mcp', + tags: ['console'], + }) + .input(z.object({ body: zDeleteWorkspacesCurrentToolProviderMcpBody })) + .output(zDeleteWorkspacesCurrentToolProviderMcpResponse) + +export const post51 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentToolProviderMcp', + path: '/workspaces/current/tool-provider/mcp', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentToolProviderMcpBody })) + .output(zPostWorkspacesCurrentToolProviderMcpResponse) + +export const put4 = oc + .route({ + inputStructure: 'detailed', + method: 'PUT', + operationId: 'putWorkspacesCurrentToolProviderMcp', + path: '/workspaces/current/tool-provider/mcp', + tags: ['console'], + }) + .input(z.object({ body: zPutWorkspacesCurrentToolProviderMcpBody })) + .output(zPutWorkspacesCurrentToolProviderMcpResponse) + +export const mcp = { + delete: delete11, + post: post51, + put: put4, + auth, + tools: tools3, + update: update4, +} + +export const post52 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentToolProviderWorkflowCreate', + path: '/workspaces/current/tool-provider/workflow/create', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentToolProviderWorkflowCreateBody })) + .output(zPostWorkspacesCurrentToolProviderWorkflowCreateResponse) + +export const create2 = { + post: post52, +} + +export const post53 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentToolProviderWorkflowDelete', + path: '/workspaces/current/tool-provider/workflow/delete', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentToolProviderWorkflowDeleteBody })) + .output(zPostWorkspacesCurrentToolProviderWorkflowDeleteResponse) + +export const delete12 = { + post: post53, +} + +export const get43 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentToolProviderWorkflowGet', + path: '/workspaces/current/tool-provider/workflow/get', + tags: ['console'], + }) + .output(zGetWorkspacesCurrentToolProviderWorkflowGetResponse) + +export const get44 = { + get: get43, +} + +export const get45 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentToolProviderWorkflowTools', + path: '/workspaces/current/tool-provider/workflow/tools', + tags: ['console'], + }) + .output(zGetWorkspacesCurrentToolProviderWorkflowToolsResponse) + +export const tools4 = { + get: get45, +} + +export const post54 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentToolProviderWorkflowUpdate', + path: '/workspaces/current/tool-provider/workflow/update', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCurrentToolProviderWorkflowUpdateBody })) + .output(zPostWorkspacesCurrentToolProviderWorkflowUpdateResponse) + +export const update5 = { + post: post54, +} + +export const workflow = { + create: create2, + delete: delete12, + get: get44, + tools: tools4, + update: update5, +} + +export const toolProvider = { + api, + builtin, + mcp, + workflow, +} + +export const get46 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentToolProviders', + path: '/workspaces/current/tool-providers', + tags: ['console'], + }) + .output(zGetWorkspacesCurrentToolProvidersResponse) + +export const toolProviders = { + get: get46, +} + +export const get47 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentToolsApi', + path: '/workspaces/current/tools/api', + tags: ['console'], + }) + .output(zGetWorkspacesCurrentToolsApiResponse) + +export const api2 = { + get: get47, +} + +export const get48 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentToolsBuiltin', + path: '/workspaces/current/tools/builtin', + tags: ['console'], + }) + .output(zGetWorkspacesCurrentToolsBuiltinResponse) + +export const builtin2 = { + get: get48, +} + +export const get49 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentToolsMcp', + path: '/workspaces/current/tools/mcp', + tags: ['console'], + }) + .output(zGetWorkspacesCurrentToolsMcpResponse) + +export const mcp2 = { + get: get49, +} + +export const get50 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentToolsWorkflow', + path: '/workspaces/current/tools/workflow', + tags: ['console'], + }) + .output(zGetWorkspacesCurrentToolsWorkflowResponse) + +export const workflow2 = { + get: get50, +} + +export const tools5 = { + api: api2, + builtin: builtin2, + mcp: mcp2, + workflow: workflow2, +} + +export const get51 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentTriggerProviderByProviderIcon', + path: '/workspaces/current/trigger-provider/{provider}/icon', + tags: ['console'], + }) + .input(z.object({ params: zGetWorkspacesCurrentTriggerProviderByProviderIconPath })) + .output(zGetWorkspacesCurrentTriggerProviderByProviderIconResponse) + +export const icon3 = { + get: get51, +} + +/** + * Get info for a trigger provider + */ +export const get52 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentTriggerProviderByProviderInfo', + path: '/workspaces/current/trigger-provider/{provider}/info', + summary: 'Get info for a trigger provider', + tags: ['console'], + }) + .input(z.object({ params: zGetWorkspacesCurrentTriggerProviderByProviderInfoPath })) + .output(zGetWorkspacesCurrentTriggerProviderByProviderInfoResponse) + +export const info3 = { + get: get52, +} + +/** + * Remove custom OAuth client configuration + */ +export const delete13 = oc + .route({ + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteWorkspacesCurrentTriggerProviderByProviderOauthClient', + path: '/workspaces/current/trigger-provider/{provider}/oauth/client', + summary: 'Remove custom OAuth client configuration', + tags: ['console'], + }) + .input(z.object({ params: zDeleteWorkspacesCurrentTriggerProviderByProviderOauthClientPath })) + .output(zDeleteWorkspacesCurrentTriggerProviderByProviderOauthClientResponse) + +/** + * Get OAuth client configuration for a provider + */ +export const get53 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentTriggerProviderByProviderOauthClient', + path: '/workspaces/current/trigger-provider/{provider}/oauth/client', + summary: 'Get OAuth client configuration for a provider', + tags: ['console'], + }) + .input(z.object({ params: zGetWorkspacesCurrentTriggerProviderByProviderOauthClientPath })) + .output(zGetWorkspacesCurrentTriggerProviderByProviderOauthClientResponse) + +/** + * Configure custom OAuth client for a provider + */ +export const post55 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentTriggerProviderByProviderOauthClient', + path: '/workspaces/current/trigger-provider/{provider}/oauth/client', + summary: 'Configure custom OAuth client for a provider', + tags: ['console'], + }) + .input( + z.object({ + body: zPostWorkspacesCurrentTriggerProviderByProviderOauthClientBody, + params: zPostWorkspacesCurrentTriggerProviderByProviderOauthClientPath, + }), + ) + .output(zPostWorkspacesCurrentTriggerProviderByProviderOauthClientResponse) + +export const client = { + delete: delete13, + get: get53, + post: post55, +} + +export const oauth2 = { + client, +} + +/** + * Build a subscription instance for a trigger provider + */ +export const post56 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: + 'postWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderBuildBySubscriptionBuilderId', + path: '/workspaces/current/trigger-provider/{provider}/subscriptions/builder/build/{subscription_builder_id}', + summary: 'Build a subscription instance for a trigger provider', + tags: ['console'], + }) + .input( + z.object({ + body: zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderBuildBySubscriptionBuilderIdBody, + params: + zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderBuildBySubscriptionBuilderIdPath, + }), + ) + .output( + zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderBuildBySubscriptionBuilderIdResponse, + ) + +export const bySubscriptionBuilderId = { + post: post56, +} + +export const build = { + bySubscriptionBuilderId, +} + +/** + * Add a new subscription instance for a trigger provider + */ +export const post57 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderCreate', + path: '/workspaces/current/trigger-provider/{provider}/subscriptions/builder/create', + summary: 'Add a new subscription instance for a trigger provider', + tags: ['console'], + }) + .input( + z.object({ + body: zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderCreateBody, + params: zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderCreatePath, + }), + ) + .output(zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderCreateResponse) + +export const create3 = { + post: post57, +} + +/** + * Get the request logs for a subscription instance for a trigger provider + */ +export const get54 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: + 'getWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderLogsBySubscriptionBuilderId', + path: '/workspaces/current/trigger-provider/{provider}/subscriptions/builder/logs/{subscription_builder_id}', + summary: 'Get the request logs for a subscription instance for a trigger provider', + tags: ['console'], + }) + .input( + z.object({ + params: + zGetWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderLogsBySubscriptionBuilderIdPath, + }), + ) + .output( + zGetWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderLogsBySubscriptionBuilderIdResponse, + ) + +export const bySubscriptionBuilderId2 = { + get: get54, +} + +export const logs = { + bySubscriptionBuilderId: bySubscriptionBuilderId2, +} + +/** + * Update a subscription instance for a trigger provider + */ +export const post58 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: + 'postWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderUpdateBySubscriptionBuilderId', + path: '/workspaces/current/trigger-provider/{provider}/subscriptions/builder/update/{subscription_builder_id}', + summary: 'Update a subscription instance for a trigger provider', + tags: ['console'], + }) + .input( + z.object({ + body: zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderUpdateBySubscriptionBuilderIdBody, + params: + zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderUpdateBySubscriptionBuilderIdPath, + }), + ) + .output( + zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderUpdateBySubscriptionBuilderIdResponse, + ) + +export const bySubscriptionBuilderId3 = { + post: post58, +} + +export const update6 = { + bySubscriptionBuilderId: bySubscriptionBuilderId3, +} + +/** + * Verify and update a subscription instance for a trigger provider + */ +export const post59 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: + 'postWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderVerifyAndUpdateBySubscriptionBuilderId', + path: '/workspaces/current/trigger-provider/{provider}/subscriptions/builder/verify-and-update/{subscription_builder_id}', + summary: 'Verify and update a subscription instance for a trigger provider', + tags: ['console'], + }) + .input( + z.object({ + body: zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderVerifyAndUpdateBySubscriptionBuilderIdBody, + params: + zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderVerifyAndUpdateBySubscriptionBuilderIdPath, + }), + ) + .output( + zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderVerifyAndUpdateBySubscriptionBuilderIdResponse, + ) + +export const bySubscriptionBuilderId4 = { + post: post59, +} + +export const verifyAndUpdate = { + bySubscriptionBuilderId: bySubscriptionBuilderId4, +} + +/** + * Get a subscription instance for a trigger provider + */ +export const get55 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: + 'getWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderBySubscriptionBuilderId', + path: '/workspaces/current/trigger-provider/{provider}/subscriptions/builder/{subscription_builder_id}', + summary: 'Get a subscription instance for a trigger provider', + tags: ['console'], + }) + .input( + z.object({ + params: + zGetWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderBySubscriptionBuilderIdPath, + }), + ) + .output( + zGetWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderBySubscriptionBuilderIdResponse, + ) + +export const bySubscriptionBuilderId5 = { + get: get55, +} + +export const builder = { + build, + create: create3, + logs, + update: update6, + verifyAndUpdate, + bySubscriptionBuilderId: bySubscriptionBuilderId5, +} + +/** + * List all trigger subscriptions for the current tenant's provider + */ +export const get56 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentTriggerProviderByProviderSubscriptionsList', + path: '/workspaces/current/trigger-provider/{provider}/subscriptions/list', + summary: 'List all trigger subscriptions for the current tenant\'s provider', + tags: ['console'], + }) + .input(z.object({ params: zGetWorkspacesCurrentTriggerProviderByProviderSubscriptionsListPath })) + .output(zGetWorkspacesCurrentTriggerProviderByProviderSubscriptionsListResponse) + +export const list3 = { + get: get56, +} + +/** + * Initiate OAuth authorization flow for a trigger provider + */ +export const get57 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentTriggerProviderByProviderSubscriptionsOauthAuthorize', + path: '/workspaces/current/trigger-provider/{provider}/subscriptions/oauth/authorize', + summary: 'Initiate OAuth authorization flow for a trigger provider', + tags: ['console'], + }) + .input( + z.object({ + params: zGetWorkspacesCurrentTriggerProviderByProviderSubscriptionsOauthAuthorizePath, + }), + ) + .output(zGetWorkspacesCurrentTriggerProviderByProviderSubscriptionsOauthAuthorizeResponse) + +export const authorize = { + get: get57, +} + +export const oauth3 = { + authorize, +} + +/** + * Verify credentials for an existing subscription (edit mode only) + */ +export const post60 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: + 'postWorkspacesCurrentTriggerProviderByProviderSubscriptionsVerifyBySubscriptionId', + path: '/workspaces/current/trigger-provider/{provider}/subscriptions/verify/{subscription_id}', + summary: 'Verify credentials for an existing subscription (edit mode only)', + tags: ['console'], + }) + .input( + z.object({ + body: zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsVerifyBySubscriptionIdBody, + params: + zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsVerifyBySubscriptionIdPath, + }), + ) + .output( + zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsVerifyBySubscriptionIdResponse, + ) + +export const bySubscriptionId = { + post: post60, +} + +export const verify = { + bySubscriptionId, +} + +export const subscriptions = { + builder, + list: list3, + oauth: oauth3, + verify, +} + +export const byProvider3 = { + icon: icon3, + info: info3, + oauth: oauth2, + subscriptions, +} + +/** + * Delete a subscription instance + */ +export const post61 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentTriggerProviderBySubscriptionIdSubscriptionsDelete', + path: '/workspaces/current/trigger-provider/{subscription_id}/subscriptions/delete', + summary: 'Delete a subscription instance', + tags: ['console'], + }) + .input( + z.object({ + params: zPostWorkspacesCurrentTriggerProviderBySubscriptionIdSubscriptionsDeletePath, + }), + ) + .output(zPostWorkspacesCurrentTriggerProviderBySubscriptionIdSubscriptionsDeleteResponse) + +export const delete14 = { + post: post61, +} + +/** + * Update a subscription instance + */ +export const post62 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrentTriggerProviderBySubscriptionIdSubscriptionsUpdate', + path: '/workspaces/current/trigger-provider/{subscription_id}/subscriptions/update', + summary: 'Update a subscription instance', + tags: ['console'], + }) + .input( + z.object({ + body: zPostWorkspacesCurrentTriggerProviderBySubscriptionIdSubscriptionsUpdateBody, + params: zPostWorkspacesCurrentTriggerProviderBySubscriptionIdSubscriptionsUpdatePath, + }), + ) + .output(zPostWorkspacesCurrentTriggerProviderBySubscriptionIdSubscriptionsUpdateResponse) + +export const update7 = { + post: post62, +} + +export const subscriptions2 = { + delete: delete14, + update: update7, +} + +export const bySubscriptionId2 = { + subscriptions: subscriptions2, +} + +export const triggerProvider = { + byProvider: byProvider3, + bySubscriptionId: bySubscriptionId2, +} + +/** + * List all trigger providers for the current tenant + */ +export const get58 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentTriggers', + path: '/workspaces/current/triggers', + summary: 'List all trigger providers for the current tenant', + tags: ['console'], + }) + .output(zGetWorkspacesCurrentTriggersResponse) + +export const triggers = { + get: get58, +} + +export const post63 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCurrent', + path: '/workspaces/current', + tags: ['console'], + }) + .output(zPostWorkspacesCurrentResponse) + +export const current = { + post: post63, + agentProvider, + agentProviders, + datasetOperators, + defaultModel, + endpoints, + members, + modelProviders, + models: models2, + permission, + plugin: plugin2, + toolLabels, + toolProvider, + toolProviders, + tools: tools5, + triggerProvider, + triggers, +} + +export const post64 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCustomConfigWebappLogoUpload', + path: '/workspaces/custom-config/webapp-logo/upload', + tags: ['console'], + }) + .output(zPostWorkspacesCustomConfigWebappLogoUploadResponse) + +export const upload2 = { + post: post64, +} + +export const webappLogo = { + upload: upload2, +} + +export const post65 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesCustomConfig', + path: '/workspaces/custom-config', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesCustomConfigBody })) + .output(zPostWorkspacesCustomConfigResponse) + +export const customConfig = { + post: post65, + webappLogo, +} + +export const post66 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesInfo', + path: '/workspaces/info', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesInfoBody })) + .output(zPostWorkspacesInfoResponse) + +export const info4 = { + post: post66, +} + +export const post67 = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkspacesSwitch', + path: '/workspaces/switch', + tags: ['console'], + }) + .input(z.object({ body: zPostWorkspacesSwitchBody })) + .output(zPostWorkspacesSwitchResponse) + +export const switch3 = { + post: post67, +} + +export const get59 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesByTenantIdModelProvidersByProviderByIconTypeByLang', + path: '/workspaces/{tenant_id}/model-providers/{provider}/{icon_type}/{lang}', + tags: ['console'], + }) + .input(z.object({ params: zGetWorkspacesByTenantIdModelProvidersByProviderByIconTypeByLangPath })) + .output(zGetWorkspacesByTenantIdModelProvidersByProviderByIconTypeByLangResponse) + +export const byLang = { + get: get59, +} + +export const byIconType = { + byLang, +} + +export const byProvider4 = { + byIconType, +} + +export const modelProviders2 = { + byProvider: byProvider4, +} + +export const byTenantId = { + modelProviders: modelProviders2, +} + +export const get60 = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspaces', + path: '/workspaces', + tags: ['console'], + }) + .output(zGetWorkspacesResponse) + +export const workspaces = { + get: get60, + current, + customConfig, + info: info4, + switch: switch3, + byTenantId, +} + +export const contract = { + workspaces, +} diff --git a/packages/contracts/generated/api/console/workspaces/types.gen.ts b/packages/contracts/generated/api/console/workspaces/types.gen.ts new file mode 100644 index 0000000000..a90db98f22 --- /dev/null +++ b/packages/contracts/generated/api/console/workspaces/types.gen.ts @@ -0,0 +1,3023 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/console/api` | (string & {}) +} + +export type TenantInfoResponse = { + created_at?: number | null + custom_config?: { + [key: string]: unknown + } | null + id: string + in_trial?: boolean | null + name?: string | null + next_credit_reset_date?: number | null + plan?: string | null + role?: string | null + status?: string | null + trial_credits?: number | null + trial_credits_used?: number | null + trial_end_reason?: string | null +} + +export type AccountWithRoleList = { + accounts: Array +} + +export type ParserPostDefault = { + model_settings: Array +} + +export type EndpointCreatePayload = { + name: string + plugin_unique_identifier: string + settings: { + [key: string]: unknown + } +} + +export type EndpointCreateResponse = { + success: boolean +} + +export type EndpointIdPayload = { + endpoint_id: string +} + +export type EndpointDeleteResponse = { + success: boolean +} + +export type EndpointDisableResponse = { + success: boolean +} + +export type EndpointEnableResponse = { + success: boolean +} + +export type EndpointListResponse = { + endpoints: Array<{ + [key: string]: unknown + }> +} + +export type PluginEndpointListResponse = { + endpoints: Array<{ + [key: string]: unknown + }> +} + +export type LegacyEndpointUpdatePayload = { + endpoint_id: string + name: string + settings: { + [key: string]: unknown + } +} + +export type EndpointUpdateResponse = { + success: boolean +} + +export type EndpointUpdatePayload = { + name: string + settings: { + [key: string]: unknown + } +} + +export type MemberInvitePayload = { + emails?: Array + language?: string | null + role: TenantAccountRole +} + +export type OwnerTransferCheckPayload = { + code: string + token: string +} + +export type OwnerTransferEmailPayload = { + language?: string | null +} + +export type OwnerTransferPayload = { + token: string +} + +export type MemberRoleUpdatePayload = { + role: string +} + +export type ParserCredentialDelete = { + credential_id: string +} + +export type ParserCredentialCreate = { + credentials: { + [key: string]: unknown + } + name?: string | null +} + +export type ParserCredentialUpdate = { + credential_id: string + credentials: { + [key: string]: unknown + } + name?: string | null +} + +export type ParserCredentialSwitch = { + credential_id: string +} + +export type ParserCredentialValidate = { + credentials: { + [key: string]: unknown + } +} + +export type ParserDeleteModels = { + model: string + model_type: ModelType +} + +export type ParserPostModels = { + config_from?: string | null + credential_id?: string | null + load_balancing?: LoadBalancingPayload + model: string + model_type: ModelType +} + +export type ParserDeleteCredential = { + credential_id: string + model: string + model_type: ModelType +} + +export type ParserCreateCredential = { + credentials: { + [key: string]: unknown + } + model: string + model_type: ModelType + name?: string | null +} + +export type ParserUpdateCredential = { + credential_id: string + credentials: { + [key: string]: unknown + } + model: string + model_type: ModelType + name?: string | null +} + +export type ParserSwitch = { + credential_id: string + model: string + model_type: ModelType +} + +export type ParserValidate = { + credentials: { + [key: string]: unknown + } + model: string + model_type: ModelType +} + +export type LoadBalancingCredentialPayload = { + credentials: { + [key: string]: unknown + } + model: string + model_type: ModelType +} + +export type ParserPreferredProviderType = { + preferred_provider_type: 'system' | 'custom' +} + +export type ParserGithubInstall = { + package: string + plugin_unique_identifier: string + repo: string + version: string +} + +export type ParserPluginIdentifiers = { + plugin_unique_identifiers: Array +} + +export type ParserLatest = { + plugin_ids: Array +} + +export type ParserDynamicOptionsWithCredentials = { + action: string + credential_id: string + credentials: { + [key: string]: unknown + } + parameter: string + plugin_id: string + provider: string +} + +export type ParserPermissionChange = { + debug_permission: DebugPermission + install_permission: InstallPermission +} + +export type ParserExcludePlugin = { + plugin_id: string +} + +export type ParserPreferencesChange = { + auto_upgrade: PluginAutoUpgradeSettingsPayload + permission: PluginPermissionSettingsPayload +} + +export type ParserUninstall = { + plugin_installation_id: string +} + +export type ParserGithubUpgrade = { + new_plugin_unique_identifier: string + original_plugin_unique_identifier: string + package: string + repo: string + version: string +} + +export type ParserMarketplaceUpgrade = { + new_plugin_unique_identifier: string + original_plugin_unique_identifier: string +} + +export type ParserGithubUpload = { + package: string + repo: string + version: string +} + +export type ApiToolProviderAddPayload = { + credentials: { + [key: string]: unknown + } + custom_disclaimer?: string + icon: { + [key: string]: unknown + } + labels?: Array | null + privacy_policy?: string | null + provider: string + schema: string + schema_type: ApiProviderSchemaType +} + +export type ApiToolProviderDeletePayload = { + provider: string +} + +export type ApiToolSchemaPayload = { + schema: string +} + +export type ApiToolTestPayload = { + credentials: { + [key: string]: unknown + } + parameters: { + [key: string]: unknown + } + provider_name?: string | null + schema: string + schema_type: ApiProviderSchemaType + tool_name: string +} + +export type ApiToolProviderUpdatePayload = { + credentials: { + [key: string]: unknown + } + custom_disclaimer?: string + icon: { + [key: string]: unknown + } + labels?: Array | null + original_provider: string + privacy_policy?: string | null + provider: string + schema: string + schema_type: ApiProviderSchemaType +} + +export type BuiltinToolAddPayload = { + credentials: { + [key: string]: unknown + } + name?: string | null + type: CredentialType +} + +export type BuiltinProviderDefaultCredentialPayload = { + id: string +} + +export type BuiltinToolCredentialDeletePayload = { + credential_id: string +} + +export type ToolOAuthCustomClientPayload = { + client_params?: { + [key: string]: unknown + } | null + enable_oauth_custom_client?: boolean | null +} + +export type BuiltinToolUpdatePayload = { + credential_id: string + credentials?: { + [key: string]: unknown + } | null + name?: string | null +} + +export type McpProviderDeletePayload = { + provider_id: string +} + +export type McpProviderCreatePayload = { + authentication?: { + [key: string]: unknown + } | null + configuration?: { + [key: string]: unknown + } | null + headers?: { + [key: string]: unknown + } | null + icon: string + icon_background?: string + icon_type: string + name: string + server_identifier: string + server_url: string +} + +export type McpProviderUpdatePayload = { + authentication?: { + [key: string]: unknown + } | null + configuration?: { + [key: string]: unknown + } | null + headers?: { + [key: string]: unknown + } | null + icon: string + icon_background?: string + icon_type: string + name: string + provider_id: string + server_identifier: string + server_url: string +} + +export type McpAuthPayload = { + authorization_code?: string | null + provider_id: string +} + +export type WorkflowToolCreatePayload = { + description: string + icon: { + [key: string]: unknown + } + label: string + labels?: Array | null + name: string + parameters?: Array + privacy_policy?: string | null + workflow_app_id: string +} + +export type WorkflowToolDeletePayload = { + workflow_tool_id: string +} + +export type WorkflowToolUpdatePayload = { + description: string + icon: { + [key: string]: unknown + } + label: string + labels?: Array | null + name: string + parameters?: Array + privacy_policy?: string | null + workflow_tool_id: string +} + +export type TriggerOAuthClientPayload = { + client_params?: { + [key: string]: unknown + } | null + enabled?: boolean | null +} + +export type TriggerSubscriptionBuilderUpdatePayload = { + credentials?: { + [key: string]: unknown + } | null + name?: string | null + parameters?: { + [key: string]: unknown + } | null + properties?: { + [key: string]: unknown + } | null +} + +export type TriggerSubscriptionBuilderCreatePayload = { + credential_type?: string +} + +export type TriggerSubscriptionBuilderVerifyPayload = { + credentials: { + [key: string]: unknown + } +} + +export type WorkspaceCustomConfigPayload = { + remove_webapp_brand?: boolean | null + replace_webapp_logo?: string | null +} + +export type WorkspaceInfoPayload = { + name: string +} + +export type SwitchWorkspacePayload = { + tenant_id: string +} + +export type AccountWithRole = { + avatar?: string | null + created_at?: number | null + email: string + id: string + last_active_at?: number | null + last_login_at?: number | null + name: string + role: string + status: string +} + +export type Inner = { + model?: string | null + model_type: ModelType + provider?: string | null +} + +export type TenantAccountRole = 'owner' | 'admin' | 'editor' | 'normal' | 'dataset_operator' + +export type ModelType = 'llm' | 'text-embedding' | 'rerank' | 'speech2text' | 'moderation' | 'tts' + +export type LoadBalancingPayload = { + configs?: Array<{ + [key: string]: unknown + }> | null + enabled?: boolean | null +} + +export type DebugPermission = 'everyone' | 'admins' | 'noone' + +export type InstallPermission = 'everyone' | 'admins' | 'noone' + +export type PluginAutoUpgradeSettingsPayload = { + exclude_plugins?: Array + include_plugins?: Array + strategy_setting?: StrategySetting + upgrade_mode?: UpgradeMode + upgrade_time_of_day?: number +} + +export type PluginPermissionSettingsPayload = { + debug_permission?: DebugPermission + install_permission?: InstallPermission +} + +export type ApiProviderSchemaType = 'openapi' | 'swagger' | 'openai_plugin' | 'openai_actions' + +export type CredentialType = 'api-key' | 'oauth2' | 'unauthorized' + +export type WorkflowToolParameterConfiguration = { + description: string + form: ToolParameterForm + name: string +} + +export type StrategySetting = 'disabled' | 'fix_only' | 'latest' + +export type UpgradeMode = 'all' | 'partial' | 'exclude' + +export type ToolParameterForm = 'schema' | 'form' | 'llm' + +export type GetWorkspacesData = { + body?: never + path?: never + query?: never + url: '/workspaces' +} + +export type GetWorkspacesResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesResponse = GetWorkspacesResponses[keyof GetWorkspacesResponses] + +export type PostWorkspacesCurrentData = { + body?: never + path?: never + query?: never + url: '/workspaces/current' +} + +export type PostWorkspacesCurrentResponses = { + 200: TenantInfoResponse +} + +export type PostWorkspacesCurrentResponse + = PostWorkspacesCurrentResponses[keyof PostWorkspacesCurrentResponses] + +export type GetWorkspacesCurrentAgentProviderByProviderNameData = { + body?: never + path: { + provider_name: string + } + query?: never + url: '/workspaces/current/agent-provider/{provider_name}' +} + +export type GetWorkspacesCurrentAgentProviderByProviderNameResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentAgentProviderByProviderNameResponse + = GetWorkspacesCurrentAgentProviderByProviderNameResponses[keyof GetWorkspacesCurrentAgentProviderByProviderNameResponses] + +export type GetWorkspacesCurrentAgentProvidersData = { + body?: never + path?: never + query?: never + url: '/workspaces/current/agent-providers' +} + +export type GetWorkspacesCurrentAgentProvidersResponses = { + 200: Array<{ + [key: string]: unknown + }> +} + +export type GetWorkspacesCurrentAgentProvidersResponse + = GetWorkspacesCurrentAgentProvidersResponses[keyof GetWorkspacesCurrentAgentProvidersResponses] + +export type GetWorkspacesCurrentDatasetOperatorsData = { + body?: never + path?: never + query?: never + url: '/workspaces/current/dataset-operators' +} + +export type GetWorkspacesCurrentDatasetOperatorsResponses = { + 200: AccountWithRoleList +} + +export type GetWorkspacesCurrentDatasetOperatorsResponse + = GetWorkspacesCurrentDatasetOperatorsResponses[keyof GetWorkspacesCurrentDatasetOperatorsResponses] + +export type GetWorkspacesCurrentDefaultModelData = { + body?: never + path?: never + query: { + model_type: string + } + url: '/workspaces/current/default-model' +} + +export type GetWorkspacesCurrentDefaultModelResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentDefaultModelResponse + = GetWorkspacesCurrentDefaultModelResponses[keyof GetWorkspacesCurrentDefaultModelResponses] + +export type PostWorkspacesCurrentDefaultModelData = { + body: ParserPostDefault + path?: never + query?: never + url: '/workspaces/current/default-model' +} + +export type PostWorkspacesCurrentDefaultModelResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentDefaultModelResponse + = PostWorkspacesCurrentDefaultModelResponses[keyof PostWorkspacesCurrentDefaultModelResponses] + +export type PostWorkspacesCurrentEndpointsData = { + body: EndpointCreatePayload + path?: never + query?: never + url: '/workspaces/current/endpoints' +} + +export type PostWorkspacesCurrentEndpointsErrors = { + 403: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentEndpointsError + = PostWorkspacesCurrentEndpointsErrors[keyof PostWorkspacesCurrentEndpointsErrors] + +export type PostWorkspacesCurrentEndpointsResponses = { + 200: EndpointCreateResponse +} + +export type PostWorkspacesCurrentEndpointsResponse + = PostWorkspacesCurrentEndpointsResponses[keyof PostWorkspacesCurrentEndpointsResponses] + +export type PostWorkspacesCurrentEndpointsCreateData = { + body: EndpointCreatePayload + path?: never + query?: never + url: '/workspaces/current/endpoints/create' +} + +export type PostWorkspacesCurrentEndpointsCreateErrors = { + 403: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentEndpointsCreateError + = PostWorkspacesCurrentEndpointsCreateErrors[keyof PostWorkspacesCurrentEndpointsCreateErrors] + +export type PostWorkspacesCurrentEndpointsCreateResponses = { + 200: EndpointCreateResponse +} + +export type PostWorkspacesCurrentEndpointsCreateResponse + = PostWorkspacesCurrentEndpointsCreateResponses[keyof PostWorkspacesCurrentEndpointsCreateResponses] + +export type PostWorkspacesCurrentEndpointsDeleteData = { + body: EndpointIdPayload + path?: never + query?: never + url: '/workspaces/current/endpoints/delete' +} + +export type PostWorkspacesCurrentEndpointsDeleteErrors = { + 403: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentEndpointsDeleteError + = PostWorkspacesCurrentEndpointsDeleteErrors[keyof PostWorkspacesCurrentEndpointsDeleteErrors] + +export type PostWorkspacesCurrentEndpointsDeleteResponses = { + 200: EndpointDeleteResponse +} + +export type PostWorkspacesCurrentEndpointsDeleteResponse + = PostWorkspacesCurrentEndpointsDeleteResponses[keyof PostWorkspacesCurrentEndpointsDeleteResponses] + +export type PostWorkspacesCurrentEndpointsDisableData = { + body: EndpointIdPayload + path?: never + query?: never + url: '/workspaces/current/endpoints/disable' +} + +export type PostWorkspacesCurrentEndpointsDisableErrors = { + 403: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentEndpointsDisableError + = PostWorkspacesCurrentEndpointsDisableErrors[keyof PostWorkspacesCurrentEndpointsDisableErrors] + +export type PostWorkspacesCurrentEndpointsDisableResponses = { + 200: EndpointDisableResponse +} + +export type PostWorkspacesCurrentEndpointsDisableResponse + = PostWorkspacesCurrentEndpointsDisableResponses[keyof PostWorkspacesCurrentEndpointsDisableResponses] + +export type PostWorkspacesCurrentEndpointsEnableData = { + body: EndpointIdPayload + path?: never + query?: never + url: '/workspaces/current/endpoints/enable' +} + +export type PostWorkspacesCurrentEndpointsEnableErrors = { + 403: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentEndpointsEnableError + = PostWorkspacesCurrentEndpointsEnableErrors[keyof PostWorkspacesCurrentEndpointsEnableErrors] + +export type PostWorkspacesCurrentEndpointsEnableResponses = { + 200: EndpointEnableResponse +} + +export type PostWorkspacesCurrentEndpointsEnableResponse + = PostWorkspacesCurrentEndpointsEnableResponses[keyof PostWorkspacesCurrentEndpointsEnableResponses] + +export type GetWorkspacesCurrentEndpointsListData = { + body?: never + path?: never + query: { + page: number + page_size: number + } + url: '/workspaces/current/endpoints/list' +} + +export type GetWorkspacesCurrentEndpointsListResponses = { + 200: EndpointListResponse +} + +export type GetWorkspacesCurrentEndpointsListResponse + = GetWorkspacesCurrentEndpointsListResponses[keyof GetWorkspacesCurrentEndpointsListResponses] + +export type GetWorkspacesCurrentEndpointsListPluginData = { + body?: never + path?: never + query: { + page: number + page_size: number + plugin_id: string + } + url: '/workspaces/current/endpoints/list/plugin' +} + +export type GetWorkspacesCurrentEndpointsListPluginResponses = { + 200: PluginEndpointListResponse +} + +export type GetWorkspacesCurrentEndpointsListPluginResponse + = GetWorkspacesCurrentEndpointsListPluginResponses[keyof GetWorkspacesCurrentEndpointsListPluginResponses] + +export type PostWorkspacesCurrentEndpointsUpdateData = { + body: LegacyEndpointUpdatePayload + path?: never + query?: never + url: '/workspaces/current/endpoints/update' +} + +export type PostWorkspacesCurrentEndpointsUpdateErrors = { + 403: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentEndpointsUpdateError + = PostWorkspacesCurrentEndpointsUpdateErrors[keyof PostWorkspacesCurrentEndpointsUpdateErrors] + +export type PostWorkspacesCurrentEndpointsUpdateResponses = { + 200: EndpointUpdateResponse +} + +export type PostWorkspacesCurrentEndpointsUpdateResponse + = PostWorkspacesCurrentEndpointsUpdateResponses[keyof PostWorkspacesCurrentEndpointsUpdateResponses] + +export type DeleteWorkspacesCurrentEndpointsByIdData = { + body?: never + path: { + id: string + } + query?: never + url: '/workspaces/current/endpoints/{id}' +} + +export type DeleteWorkspacesCurrentEndpointsByIdErrors = { + 403: { + [key: string]: unknown + } +} + +export type DeleteWorkspacesCurrentEndpointsByIdError + = DeleteWorkspacesCurrentEndpointsByIdErrors[keyof DeleteWorkspacesCurrentEndpointsByIdErrors] + +export type DeleteWorkspacesCurrentEndpointsByIdResponses = { + 200: EndpointDeleteResponse +} + +export type DeleteWorkspacesCurrentEndpointsByIdResponse + = DeleteWorkspacesCurrentEndpointsByIdResponses[keyof DeleteWorkspacesCurrentEndpointsByIdResponses] + +export type PatchWorkspacesCurrentEndpointsByIdData = { + body: EndpointUpdatePayload + path: { + id: string + } + query?: never + url: '/workspaces/current/endpoints/{id}' +} + +export type PatchWorkspacesCurrentEndpointsByIdErrors = { + 403: { + [key: string]: unknown + } +} + +export type PatchWorkspacesCurrentEndpointsByIdError + = PatchWorkspacesCurrentEndpointsByIdErrors[keyof PatchWorkspacesCurrentEndpointsByIdErrors] + +export type PatchWorkspacesCurrentEndpointsByIdResponses = { + 200: EndpointUpdateResponse +} + +export type PatchWorkspacesCurrentEndpointsByIdResponse + = PatchWorkspacesCurrentEndpointsByIdResponses[keyof PatchWorkspacesCurrentEndpointsByIdResponses] + +export type GetWorkspacesCurrentMembersData = { + body?: never + path?: never + query?: never + url: '/workspaces/current/members' +} + +export type GetWorkspacesCurrentMembersResponses = { + 200: AccountWithRoleList +} + +export type GetWorkspacesCurrentMembersResponse + = GetWorkspacesCurrentMembersResponses[keyof GetWorkspacesCurrentMembersResponses] + +export type PostWorkspacesCurrentMembersInviteEmailData = { + body: MemberInvitePayload + path?: never + query?: never + url: '/workspaces/current/members/invite-email' +} + +export type PostWorkspacesCurrentMembersInviteEmailResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentMembersInviteEmailResponse + = PostWorkspacesCurrentMembersInviteEmailResponses[keyof PostWorkspacesCurrentMembersInviteEmailResponses] + +export type PostWorkspacesCurrentMembersOwnerTransferCheckData = { + body: OwnerTransferCheckPayload + path?: never + query?: never + url: '/workspaces/current/members/owner-transfer-check' +} + +export type PostWorkspacesCurrentMembersOwnerTransferCheckResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentMembersOwnerTransferCheckResponse + = PostWorkspacesCurrentMembersOwnerTransferCheckResponses[keyof PostWorkspacesCurrentMembersOwnerTransferCheckResponses] + +export type PostWorkspacesCurrentMembersSendOwnerTransferConfirmEmailData = { + body: OwnerTransferEmailPayload + path?: never + query?: never + url: '/workspaces/current/members/send-owner-transfer-confirm-email' +} + +export type PostWorkspacesCurrentMembersSendOwnerTransferConfirmEmailResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentMembersSendOwnerTransferConfirmEmailResponse + = PostWorkspacesCurrentMembersSendOwnerTransferConfirmEmailResponses[keyof PostWorkspacesCurrentMembersSendOwnerTransferConfirmEmailResponses] + +export type DeleteWorkspacesCurrentMembersByMemberIdData = { + body?: never + path: { + member_id: string + } + query?: never + url: '/workspaces/current/members/{member_id}' +} + +export type DeleteWorkspacesCurrentMembersByMemberIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type DeleteWorkspacesCurrentMembersByMemberIdResponse + = DeleteWorkspacesCurrentMembersByMemberIdResponses[keyof DeleteWorkspacesCurrentMembersByMemberIdResponses] + +export type PostWorkspacesCurrentMembersByMemberIdOwnerTransferData = { + body: OwnerTransferPayload + path: { + member_id: string + } + query?: never + url: '/workspaces/current/members/{member_id}/owner-transfer' +} + +export type PostWorkspacesCurrentMembersByMemberIdOwnerTransferResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentMembersByMemberIdOwnerTransferResponse + = PostWorkspacesCurrentMembersByMemberIdOwnerTransferResponses[keyof PostWorkspacesCurrentMembersByMemberIdOwnerTransferResponses] + +export type PutWorkspacesCurrentMembersByMemberIdUpdateRoleData = { + body: MemberRoleUpdatePayload + path: { + member_id: string + } + query?: never + url: '/workspaces/current/members/{member_id}/update-role' +} + +export type PutWorkspacesCurrentMembersByMemberIdUpdateRoleResponses = { + 200: { + [key: string]: unknown + } +} + +export type PutWorkspacesCurrentMembersByMemberIdUpdateRoleResponse + = PutWorkspacesCurrentMembersByMemberIdUpdateRoleResponses[keyof PutWorkspacesCurrentMembersByMemberIdUpdateRoleResponses] + +export type GetWorkspacesCurrentModelProvidersData = { + body?: never + path?: never + query?: { + model_type?: string | null + } + url: '/workspaces/current/model-providers' +} + +export type GetWorkspacesCurrentModelProvidersResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentModelProvidersResponse + = GetWorkspacesCurrentModelProvidersResponses[keyof GetWorkspacesCurrentModelProvidersResponses] + +export type GetWorkspacesCurrentModelProvidersByProviderCheckoutUrlData = { + body?: never + path: { + provider: string + } + query?: never + url: '/workspaces/current/model-providers/{provider}/checkout-url' +} + +export type GetWorkspacesCurrentModelProvidersByProviderCheckoutUrlResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentModelProvidersByProviderCheckoutUrlResponse + = GetWorkspacesCurrentModelProvidersByProviderCheckoutUrlResponses[keyof GetWorkspacesCurrentModelProvidersByProviderCheckoutUrlResponses] + +export type DeleteWorkspacesCurrentModelProvidersByProviderCredentialsData = { + body: ParserCredentialDelete + path: { + provider: string + } + query?: never + url: '/workspaces/current/model-providers/{provider}/credentials' +} + +export type DeleteWorkspacesCurrentModelProvidersByProviderCredentialsResponses = { + 200: { + [key: string]: unknown + } +} + +export type DeleteWorkspacesCurrentModelProvidersByProviderCredentialsResponse + = DeleteWorkspacesCurrentModelProvidersByProviderCredentialsResponses[keyof DeleteWorkspacesCurrentModelProvidersByProviderCredentialsResponses] + +export type GetWorkspacesCurrentModelProvidersByProviderCredentialsData = { + body?: never + path: { + provider: string + } + query?: { + credential_id?: string | null + } + url: '/workspaces/current/model-providers/{provider}/credentials' +} + +export type GetWorkspacesCurrentModelProvidersByProviderCredentialsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentModelProvidersByProviderCredentialsResponse + = GetWorkspacesCurrentModelProvidersByProviderCredentialsResponses[keyof GetWorkspacesCurrentModelProvidersByProviderCredentialsResponses] + +export type PostWorkspacesCurrentModelProvidersByProviderCredentialsData = { + body: ParserCredentialCreate + path: { + provider: string + } + query?: never + url: '/workspaces/current/model-providers/{provider}/credentials' +} + +export type PostWorkspacesCurrentModelProvidersByProviderCredentialsResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentModelProvidersByProviderCredentialsResponse + = PostWorkspacesCurrentModelProvidersByProviderCredentialsResponses[keyof PostWorkspacesCurrentModelProvidersByProviderCredentialsResponses] + +export type PutWorkspacesCurrentModelProvidersByProviderCredentialsData = { + body: ParserCredentialUpdate + path: { + provider: string + } + query?: never + url: '/workspaces/current/model-providers/{provider}/credentials' +} + +export type PutWorkspacesCurrentModelProvidersByProviderCredentialsResponses = { + 200: { + [key: string]: unknown + } +} + +export type PutWorkspacesCurrentModelProvidersByProviderCredentialsResponse + = PutWorkspacesCurrentModelProvidersByProviderCredentialsResponses[keyof PutWorkspacesCurrentModelProvidersByProviderCredentialsResponses] + +export type PostWorkspacesCurrentModelProvidersByProviderCredentialsSwitchData = { + body: ParserCredentialSwitch + path: { + provider: string + } + query?: never + url: '/workspaces/current/model-providers/{provider}/credentials/switch' +} + +export type PostWorkspacesCurrentModelProvidersByProviderCredentialsSwitchResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentModelProvidersByProviderCredentialsSwitchResponse + = PostWorkspacesCurrentModelProvidersByProviderCredentialsSwitchResponses[keyof PostWorkspacesCurrentModelProvidersByProviderCredentialsSwitchResponses] + +export type PostWorkspacesCurrentModelProvidersByProviderCredentialsValidateData = { + body: ParserCredentialValidate + path: { + provider: string + } + query?: never + url: '/workspaces/current/model-providers/{provider}/credentials/validate' +} + +export type PostWorkspacesCurrentModelProvidersByProviderCredentialsValidateResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentModelProvidersByProviderCredentialsValidateResponse + = PostWorkspacesCurrentModelProvidersByProviderCredentialsValidateResponses[keyof PostWorkspacesCurrentModelProvidersByProviderCredentialsValidateResponses] + +export type DeleteWorkspacesCurrentModelProvidersByProviderModelsData = { + body: ParserDeleteModels + path: { + provider: string + } + query?: never + url: '/workspaces/current/model-providers/{provider}/models' +} + +export type DeleteWorkspacesCurrentModelProvidersByProviderModelsResponses = { + 200: { + [key: string]: unknown + } +} + +export type DeleteWorkspacesCurrentModelProvidersByProviderModelsResponse + = DeleteWorkspacesCurrentModelProvidersByProviderModelsResponses[keyof DeleteWorkspacesCurrentModelProvidersByProviderModelsResponses] + +export type GetWorkspacesCurrentModelProvidersByProviderModelsData = { + body?: never + path: { + provider: string + } + query?: never + url: '/workspaces/current/model-providers/{provider}/models' +} + +export type GetWorkspacesCurrentModelProvidersByProviderModelsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentModelProvidersByProviderModelsResponse + = GetWorkspacesCurrentModelProvidersByProviderModelsResponses[keyof GetWorkspacesCurrentModelProvidersByProviderModelsResponses] + +export type PostWorkspacesCurrentModelProvidersByProviderModelsData = { + body: ParserPostModels + path: { + provider: string + } + query?: never + url: '/workspaces/current/model-providers/{provider}/models' +} + +export type PostWorkspacesCurrentModelProvidersByProviderModelsResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentModelProvidersByProviderModelsResponse + = PostWorkspacesCurrentModelProvidersByProviderModelsResponses[keyof PostWorkspacesCurrentModelProvidersByProviderModelsResponses] + +export type DeleteWorkspacesCurrentModelProvidersByProviderModelsCredentialsData = { + body: ParserDeleteCredential + path: { + provider: string + } + query?: never + url: '/workspaces/current/model-providers/{provider}/models/credentials' +} + +export type DeleteWorkspacesCurrentModelProvidersByProviderModelsCredentialsResponses = { + 200: { + [key: string]: unknown + } +} + +export type DeleteWorkspacesCurrentModelProvidersByProviderModelsCredentialsResponse + = DeleteWorkspacesCurrentModelProvidersByProviderModelsCredentialsResponses[keyof DeleteWorkspacesCurrentModelProvidersByProviderModelsCredentialsResponses] + +export type GetWorkspacesCurrentModelProvidersByProviderModelsCredentialsData = { + body?: never + path: { + provider: string + } + query: { + config_from?: string | null + credential_id?: string | null + model: string + model_type: string + } + url: '/workspaces/current/model-providers/{provider}/models/credentials' +} + +export type GetWorkspacesCurrentModelProvidersByProviderModelsCredentialsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentModelProvidersByProviderModelsCredentialsResponse + = GetWorkspacesCurrentModelProvidersByProviderModelsCredentialsResponses[keyof GetWorkspacesCurrentModelProvidersByProviderModelsCredentialsResponses] + +export type PostWorkspacesCurrentModelProvidersByProviderModelsCredentialsData = { + body: ParserCreateCredential + path: { + provider: string + } + query?: never + url: '/workspaces/current/model-providers/{provider}/models/credentials' +} + +export type PostWorkspacesCurrentModelProvidersByProviderModelsCredentialsResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentModelProvidersByProviderModelsCredentialsResponse + = PostWorkspacesCurrentModelProvidersByProviderModelsCredentialsResponses[keyof PostWorkspacesCurrentModelProvidersByProviderModelsCredentialsResponses] + +export type PutWorkspacesCurrentModelProvidersByProviderModelsCredentialsData = { + body: ParserUpdateCredential + path: { + provider: string + } + query?: never + url: '/workspaces/current/model-providers/{provider}/models/credentials' +} + +export type PutWorkspacesCurrentModelProvidersByProviderModelsCredentialsResponses = { + 200: { + [key: string]: unknown + } +} + +export type PutWorkspacesCurrentModelProvidersByProviderModelsCredentialsResponse + = PutWorkspacesCurrentModelProvidersByProviderModelsCredentialsResponses[keyof PutWorkspacesCurrentModelProvidersByProviderModelsCredentialsResponses] + +export type PostWorkspacesCurrentModelProvidersByProviderModelsCredentialsSwitchData = { + body: ParserSwitch + path: { + provider: string + } + query?: never + url: '/workspaces/current/model-providers/{provider}/models/credentials/switch' +} + +export type PostWorkspacesCurrentModelProvidersByProviderModelsCredentialsSwitchResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentModelProvidersByProviderModelsCredentialsSwitchResponse + = PostWorkspacesCurrentModelProvidersByProviderModelsCredentialsSwitchResponses[keyof PostWorkspacesCurrentModelProvidersByProviderModelsCredentialsSwitchResponses] + +export type PostWorkspacesCurrentModelProvidersByProviderModelsCredentialsValidateData = { + body: ParserValidate + path: { + provider: string + } + query?: never + url: '/workspaces/current/model-providers/{provider}/models/credentials/validate' +} + +export type PostWorkspacesCurrentModelProvidersByProviderModelsCredentialsValidateResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentModelProvidersByProviderModelsCredentialsValidateResponse + = PostWorkspacesCurrentModelProvidersByProviderModelsCredentialsValidateResponses[keyof PostWorkspacesCurrentModelProvidersByProviderModelsCredentialsValidateResponses] + +export type PatchWorkspacesCurrentModelProvidersByProviderModelsDisableData = { + body: ParserDeleteModels + path: { + provider: string + } + query?: never + url: '/workspaces/current/model-providers/{provider}/models/disable' +} + +export type PatchWorkspacesCurrentModelProvidersByProviderModelsDisableResponses = { + 200: { + [key: string]: unknown + } +} + +export type PatchWorkspacesCurrentModelProvidersByProviderModelsDisableResponse + = PatchWorkspacesCurrentModelProvidersByProviderModelsDisableResponses[keyof PatchWorkspacesCurrentModelProvidersByProviderModelsDisableResponses] + +export type PatchWorkspacesCurrentModelProvidersByProviderModelsEnableData = { + body: ParserDeleteModels + path: { + provider: string + } + query?: never + url: '/workspaces/current/model-providers/{provider}/models/enable' +} + +export type PatchWorkspacesCurrentModelProvidersByProviderModelsEnableResponses = { + 200: { + [key: string]: unknown + } +} + +export type PatchWorkspacesCurrentModelProvidersByProviderModelsEnableResponse + = PatchWorkspacesCurrentModelProvidersByProviderModelsEnableResponses[keyof PatchWorkspacesCurrentModelProvidersByProviderModelsEnableResponses] + +export type PostWorkspacesCurrentModelProvidersByProviderModelsLoadBalancingConfigsCredentialsValidateData + = { + body: LoadBalancingCredentialPayload + path: { + provider: string + } + query?: never + url: '/workspaces/current/model-providers/{provider}/models/load-balancing-configs/credentials-validate' + } + +export type PostWorkspacesCurrentModelProvidersByProviderModelsLoadBalancingConfigsCredentialsValidateResponses + = { + 200: { + [key: string]: unknown + } + } + +export type PostWorkspacesCurrentModelProvidersByProviderModelsLoadBalancingConfigsCredentialsValidateResponse + = PostWorkspacesCurrentModelProvidersByProviderModelsLoadBalancingConfigsCredentialsValidateResponses[keyof PostWorkspacesCurrentModelProvidersByProviderModelsLoadBalancingConfigsCredentialsValidateResponses] + +export type PostWorkspacesCurrentModelProvidersByProviderModelsLoadBalancingConfigsByConfigIdCredentialsValidateData + = { + body: LoadBalancingCredentialPayload + path: { + provider: string + config_id: string + } + query?: never + url: '/workspaces/current/model-providers/{provider}/models/load-balancing-configs/{config_id}/credentials-validate' + } + +export type PostWorkspacesCurrentModelProvidersByProviderModelsLoadBalancingConfigsByConfigIdCredentialsValidateResponses + = { + 200: { + [key: string]: unknown + } + } + +export type PostWorkspacesCurrentModelProvidersByProviderModelsLoadBalancingConfigsByConfigIdCredentialsValidateResponse + = PostWorkspacesCurrentModelProvidersByProviderModelsLoadBalancingConfigsByConfigIdCredentialsValidateResponses[keyof PostWorkspacesCurrentModelProvidersByProviderModelsLoadBalancingConfigsByConfigIdCredentialsValidateResponses] + +export type GetWorkspacesCurrentModelProvidersByProviderModelsParameterRulesData = { + body?: never + path: { + provider: string + } + query: { + model: string + } + url: '/workspaces/current/model-providers/{provider}/models/parameter-rules' +} + +export type GetWorkspacesCurrentModelProvidersByProviderModelsParameterRulesResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentModelProvidersByProviderModelsParameterRulesResponse + = GetWorkspacesCurrentModelProvidersByProviderModelsParameterRulesResponses[keyof GetWorkspacesCurrentModelProvidersByProviderModelsParameterRulesResponses] + +export type PostWorkspacesCurrentModelProvidersByProviderPreferredProviderTypeData = { + body: ParserPreferredProviderType + path: { + provider: string + } + query?: never + url: '/workspaces/current/model-providers/{provider}/preferred-provider-type' +} + +export type PostWorkspacesCurrentModelProvidersByProviderPreferredProviderTypeResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentModelProvidersByProviderPreferredProviderTypeResponse + = PostWorkspacesCurrentModelProvidersByProviderPreferredProviderTypeResponses[keyof PostWorkspacesCurrentModelProvidersByProviderPreferredProviderTypeResponses] + +export type GetWorkspacesCurrentModelsModelTypesByModelTypeData = { + body?: never + path: { + model_type: string + } + query?: never + url: '/workspaces/current/models/model-types/{model_type}' +} + +export type GetWorkspacesCurrentModelsModelTypesByModelTypeResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentModelsModelTypesByModelTypeResponse + = GetWorkspacesCurrentModelsModelTypesByModelTypeResponses[keyof GetWorkspacesCurrentModelsModelTypesByModelTypeResponses] + +export type GetWorkspacesCurrentPermissionData = { + body?: never + path?: never + query?: never + url: '/workspaces/current/permission' +} + +export type GetWorkspacesCurrentPermissionResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentPermissionResponse + = GetWorkspacesCurrentPermissionResponses[keyof GetWorkspacesCurrentPermissionResponses] + +export type GetWorkspacesCurrentPluginAssetData = { + body?: never + path?: never + query: { + file_name: string + plugin_unique_identifier: string + } + url: '/workspaces/current/plugin/asset' +} + +export type GetWorkspacesCurrentPluginAssetResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentPluginAssetResponse + = GetWorkspacesCurrentPluginAssetResponses[keyof GetWorkspacesCurrentPluginAssetResponses] + +export type GetWorkspacesCurrentPluginDebuggingKeyData = { + body?: never + path?: never + query?: never + url: '/workspaces/current/plugin/debugging-key' +} + +export type GetWorkspacesCurrentPluginDebuggingKeyResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentPluginDebuggingKeyResponse + = GetWorkspacesCurrentPluginDebuggingKeyResponses[keyof GetWorkspacesCurrentPluginDebuggingKeyResponses] + +export type GetWorkspacesCurrentPluginFetchManifestData = { + body?: never + path?: never + query: { + plugin_unique_identifier: string + } + url: '/workspaces/current/plugin/fetch-manifest' +} + +export type GetWorkspacesCurrentPluginFetchManifestResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentPluginFetchManifestResponse + = GetWorkspacesCurrentPluginFetchManifestResponses[keyof GetWorkspacesCurrentPluginFetchManifestResponses] + +export type GetWorkspacesCurrentPluginIconData = { + body?: never + path?: never + query: { + filename: string + tenant_id: string + } + url: '/workspaces/current/plugin/icon' +} + +export type GetWorkspacesCurrentPluginIconResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentPluginIconResponse + = GetWorkspacesCurrentPluginIconResponses[keyof GetWorkspacesCurrentPluginIconResponses] + +export type PostWorkspacesCurrentPluginInstallGithubData = { + body: ParserGithubInstall + path?: never + query?: never + url: '/workspaces/current/plugin/install/github' +} + +export type PostWorkspacesCurrentPluginInstallGithubResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentPluginInstallGithubResponse + = PostWorkspacesCurrentPluginInstallGithubResponses[keyof PostWorkspacesCurrentPluginInstallGithubResponses] + +export type PostWorkspacesCurrentPluginInstallMarketplaceData = { + body: ParserPluginIdentifiers + path?: never + query?: never + url: '/workspaces/current/plugin/install/marketplace' +} + +export type PostWorkspacesCurrentPluginInstallMarketplaceResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentPluginInstallMarketplaceResponse + = PostWorkspacesCurrentPluginInstallMarketplaceResponses[keyof PostWorkspacesCurrentPluginInstallMarketplaceResponses] + +export type PostWorkspacesCurrentPluginInstallPkgData = { + body: ParserPluginIdentifiers + path?: never + query?: never + url: '/workspaces/current/plugin/install/pkg' +} + +export type PostWorkspacesCurrentPluginInstallPkgResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentPluginInstallPkgResponse + = PostWorkspacesCurrentPluginInstallPkgResponses[keyof PostWorkspacesCurrentPluginInstallPkgResponses] + +export type GetWorkspacesCurrentPluginListData = { + body?: never + path?: never + query?: { + page?: number + page_size?: number + } + url: '/workspaces/current/plugin/list' +} + +export type GetWorkspacesCurrentPluginListResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentPluginListResponse + = GetWorkspacesCurrentPluginListResponses[keyof GetWorkspacesCurrentPluginListResponses] + +export type PostWorkspacesCurrentPluginListInstallationsIdsData = { + body: ParserLatest + path?: never + query?: never + url: '/workspaces/current/plugin/list/installations/ids' +} + +export type PostWorkspacesCurrentPluginListInstallationsIdsResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentPluginListInstallationsIdsResponse + = PostWorkspacesCurrentPluginListInstallationsIdsResponses[keyof PostWorkspacesCurrentPluginListInstallationsIdsResponses] + +export type PostWorkspacesCurrentPluginListLatestVersionsData = { + body: ParserLatest + path?: never + query?: never + url: '/workspaces/current/plugin/list/latest-versions' +} + +export type PostWorkspacesCurrentPluginListLatestVersionsResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentPluginListLatestVersionsResponse + = PostWorkspacesCurrentPluginListLatestVersionsResponses[keyof PostWorkspacesCurrentPluginListLatestVersionsResponses] + +export type GetWorkspacesCurrentPluginMarketplacePkgData = { + body?: never + path?: never + query: { + plugin_unique_identifier: string + } + url: '/workspaces/current/plugin/marketplace/pkg' +} + +export type GetWorkspacesCurrentPluginMarketplacePkgResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentPluginMarketplacePkgResponse + = GetWorkspacesCurrentPluginMarketplacePkgResponses[keyof GetWorkspacesCurrentPluginMarketplacePkgResponses] + +export type GetWorkspacesCurrentPluginParametersDynamicOptionsData = { + body?: never + path?: never + query: { + action: string + credential_id?: string | null + parameter: string + plugin_id: string + provider: string + provider_type: 'tool' | 'trigger' + } + url: '/workspaces/current/plugin/parameters/dynamic-options' +} + +export type GetWorkspacesCurrentPluginParametersDynamicOptionsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentPluginParametersDynamicOptionsResponse + = GetWorkspacesCurrentPluginParametersDynamicOptionsResponses[keyof GetWorkspacesCurrentPluginParametersDynamicOptionsResponses] + +export type PostWorkspacesCurrentPluginParametersDynamicOptionsWithCredentialsData = { + body: ParserDynamicOptionsWithCredentials + path?: never + query?: never + url: '/workspaces/current/plugin/parameters/dynamic-options-with-credentials' +} + +export type PostWorkspacesCurrentPluginParametersDynamicOptionsWithCredentialsResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentPluginParametersDynamicOptionsWithCredentialsResponse + = PostWorkspacesCurrentPluginParametersDynamicOptionsWithCredentialsResponses[keyof PostWorkspacesCurrentPluginParametersDynamicOptionsWithCredentialsResponses] + +export type PostWorkspacesCurrentPluginPermissionChangeData = { + body: ParserPermissionChange + path?: never + query?: never + url: '/workspaces/current/plugin/permission/change' +} + +export type PostWorkspacesCurrentPluginPermissionChangeResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentPluginPermissionChangeResponse + = PostWorkspacesCurrentPluginPermissionChangeResponses[keyof PostWorkspacesCurrentPluginPermissionChangeResponses] + +export type GetWorkspacesCurrentPluginPermissionFetchData = { + body?: never + path?: never + query?: never + url: '/workspaces/current/plugin/permission/fetch' +} + +export type GetWorkspacesCurrentPluginPermissionFetchResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentPluginPermissionFetchResponse + = GetWorkspacesCurrentPluginPermissionFetchResponses[keyof GetWorkspacesCurrentPluginPermissionFetchResponses] + +export type PostWorkspacesCurrentPluginPreferencesAutoupgradeExcludeData = { + body: ParserExcludePlugin + path?: never + query?: never + url: '/workspaces/current/plugin/preferences/autoupgrade/exclude' +} + +export type PostWorkspacesCurrentPluginPreferencesAutoupgradeExcludeResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentPluginPreferencesAutoupgradeExcludeResponse + = PostWorkspacesCurrentPluginPreferencesAutoupgradeExcludeResponses[keyof PostWorkspacesCurrentPluginPreferencesAutoupgradeExcludeResponses] + +export type PostWorkspacesCurrentPluginPreferencesChangeData = { + body: ParserPreferencesChange + path?: never + query?: never + url: '/workspaces/current/plugin/preferences/change' +} + +export type PostWorkspacesCurrentPluginPreferencesChangeResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentPluginPreferencesChangeResponse + = PostWorkspacesCurrentPluginPreferencesChangeResponses[keyof PostWorkspacesCurrentPluginPreferencesChangeResponses] + +export type GetWorkspacesCurrentPluginPreferencesFetchData = { + body?: never + path?: never + query?: never + url: '/workspaces/current/plugin/preferences/fetch' +} + +export type GetWorkspacesCurrentPluginPreferencesFetchResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentPluginPreferencesFetchResponse + = GetWorkspacesCurrentPluginPreferencesFetchResponses[keyof GetWorkspacesCurrentPluginPreferencesFetchResponses] + +export type GetWorkspacesCurrentPluginReadmeData = { + body?: never + path?: never + query: { + language?: string + plugin_unique_identifier: string + } + url: '/workspaces/current/plugin/readme' +} + +export type GetWorkspacesCurrentPluginReadmeResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentPluginReadmeResponse + = GetWorkspacesCurrentPluginReadmeResponses[keyof GetWorkspacesCurrentPluginReadmeResponses] + +export type GetWorkspacesCurrentPluginTasksData = { + body?: never + path?: never + query?: { + page?: number + page_size?: number + } + url: '/workspaces/current/plugin/tasks' +} + +export type GetWorkspacesCurrentPluginTasksResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentPluginTasksResponse + = GetWorkspacesCurrentPluginTasksResponses[keyof GetWorkspacesCurrentPluginTasksResponses] + +export type PostWorkspacesCurrentPluginTasksDeleteAllData = { + body?: never + path?: never + query?: never + url: '/workspaces/current/plugin/tasks/delete_all' +} + +export type PostWorkspacesCurrentPluginTasksDeleteAllResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentPluginTasksDeleteAllResponse + = PostWorkspacesCurrentPluginTasksDeleteAllResponses[keyof PostWorkspacesCurrentPluginTasksDeleteAllResponses] + +export type GetWorkspacesCurrentPluginTasksByTaskIdData = { + body?: never + path: { + task_id: string + } + query?: never + url: '/workspaces/current/plugin/tasks/{task_id}' +} + +export type GetWorkspacesCurrentPluginTasksByTaskIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentPluginTasksByTaskIdResponse + = GetWorkspacesCurrentPluginTasksByTaskIdResponses[keyof GetWorkspacesCurrentPluginTasksByTaskIdResponses] + +export type PostWorkspacesCurrentPluginTasksByTaskIdDeleteData = { + body?: never + path: { + task_id: string + } + query?: never + url: '/workspaces/current/plugin/tasks/{task_id}/delete' +} + +export type PostWorkspacesCurrentPluginTasksByTaskIdDeleteResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentPluginTasksByTaskIdDeleteResponse + = PostWorkspacesCurrentPluginTasksByTaskIdDeleteResponses[keyof PostWorkspacesCurrentPluginTasksByTaskIdDeleteResponses] + +export type PostWorkspacesCurrentPluginTasksByTaskIdDeleteByIdentifierData = { + body?: never + path: { + task_id: string + identifier: string + } + query?: never + url: '/workspaces/current/plugin/tasks/{task_id}/delete/{identifier}' +} + +export type PostWorkspacesCurrentPluginTasksByTaskIdDeleteByIdentifierResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentPluginTasksByTaskIdDeleteByIdentifierResponse + = PostWorkspacesCurrentPluginTasksByTaskIdDeleteByIdentifierResponses[keyof PostWorkspacesCurrentPluginTasksByTaskIdDeleteByIdentifierResponses] + +export type PostWorkspacesCurrentPluginUninstallData = { + body: ParserUninstall + path?: never + query?: never + url: '/workspaces/current/plugin/uninstall' +} + +export type PostWorkspacesCurrentPluginUninstallResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentPluginUninstallResponse + = PostWorkspacesCurrentPluginUninstallResponses[keyof PostWorkspacesCurrentPluginUninstallResponses] + +export type PostWorkspacesCurrentPluginUpgradeGithubData = { + body: ParserGithubUpgrade + path?: never + query?: never + url: '/workspaces/current/plugin/upgrade/github' +} + +export type PostWorkspacesCurrentPluginUpgradeGithubResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentPluginUpgradeGithubResponse + = PostWorkspacesCurrentPluginUpgradeGithubResponses[keyof PostWorkspacesCurrentPluginUpgradeGithubResponses] + +export type PostWorkspacesCurrentPluginUpgradeMarketplaceData = { + body: ParserMarketplaceUpgrade + path?: never + query?: never + url: '/workspaces/current/plugin/upgrade/marketplace' +} + +export type PostWorkspacesCurrentPluginUpgradeMarketplaceResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentPluginUpgradeMarketplaceResponse + = PostWorkspacesCurrentPluginUpgradeMarketplaceResponses[keyof PostWorkspacesCurrentPluginUpgradeMarketplaceResponses] + +export type PostWorkspacesCurrentPluginUploadBundleData = { + body?: never + path?: never + query?: never + url: '/workspaces/current/plugin/upload/bundle' +} + +export type PostWorkspacesCurrentPluginUploadBundleResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentPluginUploadBundleResponse + = PostWorkspacesCurrentPluginUploadBundleResponses[keyof PostWorkspacesCurrentPluginUploadBundleResponses] + +export type PostWorkspacesCurrentPluginUploadGithubData = { + body: ParserGithubUpload + path?: never + query?: never + url: '/workspaces/current/plugin/upload/github' +} + +export type PostWorkspacesCurrentPluginUploadGithubResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentPluginUploadGithubResponse + = PostWorkspacesCurrentPluginUploadGithubResponses[keyof PostWorkspacesCurrentPluginUploadGithubResponses] + +export type PostWorkspacesCurrentPluginUploadPkgData = { + body?: never + path?: never + query?: never + url: '/workspaces/current/plugin/upload/pkg' +} + +export type PostWorkspacesCurrentPluginUploadPkgResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentPluginUploadPkgResponse + = PostWorkspacesCurrentPluginUploadPkgResponses[keyof PostWorkspacesCurrentPluginUploadPkgResponses] + +export type GetWorkspacesCurrentToolLabelsData = { + body?: never + path?: never + query?: never + url: '/workspaces/current/tool-labels' +} + +export type GetWorkspacesCurrentToolLabelsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentToolLabelsResponse + = GetWorkspacesCurrentToolLabelsResponses[keyof GetWorkspacesCurrentToolLabelsResponses] + +export type PostWorkspacesCurrentToolProviderApiAddData = { + body: ApiToolProviderAddPayload + path?: never + query?: never + url: '/workspaces/current/tool-provider/api/add' +} + +export type PostWorkspacesCurrentToolProviderApiAddResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentToolProviderApiAddResponse + = PostWorkspacesCurrentToolProviderApiAddResponses[keyof PostWorkspacesCurrentToolProviderApiAddResponses] + +export type PostWorkspacesCurrentToolProviderApiDeleteData = { + body: ApiToolProviderDeletePayload + path?: never + query?: never + url: '/workspaces/current/tool-provider/api/delete' +} + +export type PostWorkspacesCurrentToolProviderApiDeleteResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentToolProviderApiDeleteResponse + = PostWorkspacesCurrentToolProviderApiDeleteResponses[keyof PostWorkspacesCurrentToolProviderApiDeleteResponses] + +export type GetWorkspacesCurrentToolProviderApiGetData = { + body?: never + path?: never + query?: never + url: '/workspaces/current/tool-provider/api/get' +} + +export type GetWorkspacesCurrentToolProviderApiGetResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentToolProviderApiGetResponse + = GetWorkspacesCurrentToolProviderApiGetResponses[keyof GetWorkspacesCurrentToolProviderApiGetResponses] + +export type GetWorkspacesCurrentToolProviderApiRemoteData = { + body?: never + path?: never + query?: never + url: '/workspaces/current/tool-provider/api/remote' +} + +export type GetWorkspacesCurrentToolProviderApiRemoteResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentToolProviderApiRemoteResponse + = GetWorkspacesCurrentToolProviderApiRemoteResponses[keyof GetWorkspacesCurrentToolProviderApiRemoteResponses] + +export type PostWorkspacesCurrentToolProviderApiSchemaData = { + body: ApiToolSchemaPayload + path?: never + query?: never + url: '/workspaces/current/tool-provider/api/schema' +} + +export type PostWorkspacesCurrentToolProviderApiSchemaResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentToolProviderApiSchemaResponse + = PostWorkspacesCurrentToolProviderApiSchemaResponses[keyof PostWorkspacesCurrentToolProviderApiSchemaResponses] + +export type PostWorkspacesCurrentToolProviderApiTestPreData = { + body: ApiToolTestPayload + path?: never + query?: never + url: '/workspaces/current/tool-provider/api/test/pre' +} + +export type PostWorkspacesCurrentToolProviderApiTestPreResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentToolProviderApiTestPreResponse + = PostWorkspacesCurrentToolProviderApiTestPreResponses[keyof PostWorkspacesCurrentToolProviderApiTestPreResponses] + +export type GetWorkspacesCurrentToolProviderApiToolsData = { + body?: never + path?: never + query?: never + url: '/workspaces/current/tool-provider/api/tools' +} + +export type GetWorkspacesCurrentToolProviderApiToolsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentToolProviderApiToolsResponse + = GetWorkspacesCurrentToolProviderApiToolsResponses[keyof GetWorkspacesCurrentToolProviderApiToolsResponses] + +export type PostWorkspacesCurrentToolProviderApiUpdateData = { + body: ApiToolProviderUpdatePayload + path?: never + query?: never + url: '/workspaces/current/tool-provider/api/update' +} + +export type PostWorkspacesCurrentToolProviderApiUpdateResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentToolProviderApiUpdateResponse + = PostWorkspacesCurrentToolProviderApiUpdateResponses[keyof PostWorkspacesCurrentToolProviderApiUpdateResponses] + +export type PostWorkspacesCurrentToolProviderBuiltinByProviderAddData = { + body: BuiltinToolAddPayload + path: { + provider: string + } + query?: never + url: '/workspaces/current/tool-provider/builtin/{provider}/add' +} + +export type PostWorkspacesCurrentToolProviderBuiltinByProviderAddResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentToolProviderBuiltinByProviderAddResponse + = PostWorkspacesCurrentToolProviderBuiltinByProviderAddResponses[keyof PostWorkspacesCurrentToolProviderBuiltinByProviderAddResponses] + +export type GetWorkspacesCurrentToolProviderBuiltinByProviderCredentialInfoData = { + body?: never + path: { + provider: string + } + query?: never + url: '/workspaces/current/tool-provider/builtin/{provider}/credential/info' +} + +export type GetWorkspacesCurrentToolProviderBuiltinByProviderCredentialInfoResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentToolProviderBuiltinByProviderCredentialInfoResponse + = GetWorkspacesCurrentToolProviderBuiltinByProviderCredentialInfoResponses[keyof GetWorkspacesCurrentToolProviderBuiltinByProviderCredentialInfoResponses] + +export type GetWorkspacesCurrentToolProviderBuiltinByProviderCredentialSchemaByCredentialTypeData + = { + body?: never + path: { + provider: string + credential_type: string + } + query?: never + url: '/workspaces/current/tool-provider/builtin/{provider}/credential/schema/{credential_type}' + } + +export type GetWorkspacesCurrentToolProviderBuiltinByProviderCredentialSchemaByCredentialTypeResponses + = { + 200: { + [key: string]: unknown + } + } + +export type GetWorkspacesCurrentToolProviderBuiltinByProviderCredentialSchemaByCredentialTypeResponse + = GetWorkspacesCurrentToolProviderBuiltinByProviderCredentialSchemaByCredentialTypeResponses[keyof GetWorkspacesCurrentToolProviderBuiltinByProviderCredentialSchemaByCredentialTypeResponses] + +export type GetWorkspacesCurrentToolProviderBuiltinByProviderCredentialsData = { + body?: never + path: { + provider: string + } + query?: never + url: '/workspaces/current/tool-provider/builtin/{provider}/credentials' +} + +export type GetWorkspacesCurrentToolProviderBuiltinByProviderCredentialsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentToolProviderBuiltinByProviderCredentialsResponse + = GetWorkspacesCurrentToolProviderBuiltinByProviderCredentialsResponses[keyof GetWorkspacesCurrentToolProviderBuiltinByProviderCredentialsResponses] + +export type PostWorkspacesCurrentToolProviderBuiltinByProviderDefaultCredentialData = { + body: BuiltinProviderDefaultCredentialPayload + path: { + provider: string + } + query?: never + url: '/workspaces/current/tool-provider/builtin/{provider}/default-credential' +} + +export type PostWorkspacesCurrentToolProviderBuiltinByProviderDefaultCredentialResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentToolProviderBuiltinByProviderDefaultCredentialResponse + = PostWorkspacesCurrentToolProviderBuiltinByProviderDefaultCredentialResponses[keyof PostWorkspacesCurrentToolProviderBuiltinByProviderDefaultCredentialResponses] + +export type PostWorkspacesCurrentToolProviderBuiltinByProviderDeleteData = { + body: BuiltinToolCredentialDeletePayload + path: { + provider: string + } + query?: never + url: '/workspaces/current/tool-provider/builtin/{provider}/delete' +} + +export type PostWorkspacesCurrentToolProviderBuiltinByProviderDeleteResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentToolProviderBuiltinByProviderDeleteResponse + = PostWorkspacesCurrentToolProviderBuiltinByProviderDeleteResponses[keyof PostWorkspacesCurrentToolProviderBuiltinByProviderDeleteResponses] + +export type GetWorkspacesCurrentToolProviderBuiltinByProviderIconData = { + body?: never + path: { + provider: string + } + query?: never + url: '/workspaces/current/tool-provider/builtin/{provider}/icon' +} + +export type GetWorkspacesCurrentToolProviderBuiltinByProviderIconResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentToolProviderBuiltinByProviderIconResponse + = GetWorkspacesCurrentToolProviderBuiltinByProviderIconResponses[keyof GetWorkspacesCurrentToolProviderBuiltinByProviderIconResponses] + +export type GetWorkspacesCurrentToolProviderBuiltinByProviderInfoData = { + body?: never + path: { + provider: string + } + query?: never + url: '/workspaces/current/tool-provider/builtin/{provider}/info' +} + +export type GetWorkspacesCurrentToolProviderBuiltinByProviderInfoResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentToolProviderBuiltinByProviderInfoResponse + = GetWorkspacesCurrentToolProviderBuiltinByProviderInfoResponses[keyof GetWorkspacesCurrentToolProviderBuiltinByProviderInfoResponses] + +export type GetWorkspacesCurrentToolProviderBuiltinByProviderOauthClientSchemaData = { + body?: never + path: { + provider: string + } + query?: never + url: '/workspaces/current/tool-provider/builtin/{provider}/oauth/client-schema' +} + +export type GetWorkspacesCurrentToolProviderBuiltinByProviderOauthClientSchemaResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentToolProviderBuiltinByProviderOauthClientSchemaResponse + = GetWorkspacesCurrentToolProviderBuiltinByProviderOauthClientSchemaResponses[keyof GetWorkspacesCurrentToolProviderBuiltinByProviderOauthClientSchemaResponses] + +export type DeleteWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientData = { + body?: never + path: { + provider: string + } + query?: never + url: '/workspaces/current/tool-provider/builtin/{provider}/oauth/custom-client' +} + +export type DeleteWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientResponses = { + 200: { + [key: string]: unknown + } +} + +export type DeleteWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientResponse + = DeleteWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientResponses[keyof DeleteWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientResponses] + +export type GetWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientData = { + body?: never + path: { + provider: string + } + query?: never + url: '/workspaces/current/tool-provider/builtin/{provider}/oauth/custom-client' +} + +export type GetWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientResponse + = GetWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientResponses[keyof GetWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientResponses] + +export type PostWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientData = { + body: ToolOAuthCustomClientPayload + path: { + provider: string + } + query?: never + url: '/workspaces/current/tool-provider/builtin/{provider}/oauth/custom-client' +} + +export type PostWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientResponse + = PostWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientResponses[keyof PostWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientResponses] + +export type GetWorkspacesCurrentToolProviderBuiltinByProviderToolsData = { + body?: never + path: { + provider: string + } + query?: never + url: '/workspaces/current/tool-provider/builtin/{provider}/tools' +} + +export type GetWorkspacesCurrentToolProviderBuiltinByProviderToolsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentToolProviderBuiltinByProviderToolsResponse + = GetWorkspacesCurrentToolProviderBuiltinByProviderToolsResponses[keyof GetWorkspacesCurrentToolProviderBuiltinByProviderToolsResponses] + +export type PostWorkspacesCurrentToolProviderBuiltinByProviderUpdateData = { + body: BuiltinToolUpdatePayload + path: { + provider: string + } + query?: never + url: '/workspaces/current/tool-provider/builtin/{provider}/update' +} + +export type PostWorkspacesCurrentToolProviderBuiltinByProviderUpdateResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentToolProviderBuiltinByProviderUpdateResponse + = PostWorkspacesCurrentToolProviderBuiltinByProviderUpdateResponses[keyof PostWorkspacesCurrentToolProviderBuiltinByProviderUpdateResponses] + +export type DeleteWorkspacesCurrentToolProviderMcpData = { + body: McpProviderDeletePayload + path?: never + query?: never + url: '/workspaces/current/tool-provider/mcp' +} + +export type DeleteWorkspacesCurrentToolProviderMcpResponses = { + 200: { + [key: string]: unknown + } +} + +export type DeleteWorkspacesCurrentToolProviderMcpResponse + = DeleteWorkspacesCurrentToolProviderMcpResponses[keyof DeleteWorkspacesCurrentToolProviderMcpResponses] + +export type PostWorkspacesCurrentToolProviderMcpData = { + body: McpProviderCreatePayload + path?: never + query?: never + url: '/workspaces/current/tool-provider/mcp' +} + +export type PostWorkspacesCurrentToolProviderMcpResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentToolProviderMcpResponse + = PostWorkspacesCurrentToolProviderMcpResponses[keyof PostWorkspacesCurrentToolProviderMcpResponses] + +export type PutWorkspacesCurrentToolProviderMcpData = { + body: McpProviderUpdatePayload + path?: never + query?: never + url: '/workspaces/current/tool-provider/mcp' +} + +export type PutWorkspacesCurrentToolProviderMcpResponses = { + 200: { + [key: string]: unknown + } +} + +export type PutWorkspacesCurrentToolProviderMcpResponse + = PutWorkspacesCurrentToolProviderMcpResponses[keyof PutWorkspacesCurrentToolProviderMcpResponses] + +export type PostWorkspacesCurrentToolProviderMcpAuthData = { + body: McpAuthPayload + path?: never + query?: never + url: '/workspaces/current/tool-provider/mcp/auth' +} + +export type PostWorkspacesCurrentToolProviderMcpAuthResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentToolProviderMcpAuthResponse + = PostWorkspacesCurrentToolProviderMcpAuthResponses[keyof PostWorkspacesCurrentToolProviderMcpAuthResponses] + +export type GetWorkspacesCurrentToolProviderMcpToolsByProviderIdData = { + body?: never + path: { + provider_id: string + } + query?: never + url: '/workspaces/current/tool-provider/mcp/tools/{provider_id}' +} + +export type GetWorkspacesCurrentToolProviderMcpToolsByProviderIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentToolProviderMcpToolsByProviderIdResponse + = GetWorkspacesCurrentToolProviderMcpToolsByProviderIdResponses[keyof GetWorkspacesCurrentToolProviderMcpToolsByProviderIdResponses] + +export type GetWorkspacesCurrentToolProviderMcpUpdateByProviderIdData = { + body?: never + path: { + provider_id: string + } + query?: never + url: '/workspaces/current/tool-provider/mcp/update/{provider_id}' +} + +export type GetWorkspacesCurrentToolProviderMcpUpdateByProviderIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentToolProviderMcpUpdateByProviderIdResponse + = GetWorkspacesCurrentToolProviderMcpUpdateByProviderIdResponses[keyof GetWorkspacesCurrentToolProviderMcpUpdateByProviderIdResponses] + +export type PostWorkspacesCurrentToolProviderWorkflowCreateData = { + body: WorkflowToolCreatePayload + path?: never + query?: never + url: '/workspaces/current/tool-provider/workflow/create' +} + +export type PostWorkspacesCurrentToolProviderWorkflowCreateResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentToolProviderWorkflowCreateResponse + = PostWorkspacesCurrentToolProviderWorkflowCreateResponses[keyof PostWorkspacesCurrentToolProviderWorkflowCreateResponses] + +export type PostWorkspacesCurrentToolProviderWorkflowDeleteData = { + body: WorkflowToolDeletePayload + path?: never + query?: never + url: '/workspaces/current/tool-provider/workflow/delete' +} + +export type PostWorkspacesCurrentToolProviderWorkflowDeleteResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentToolProviderWorkflowDeleteResponse + = PostWorkspacesCurrentToolProviderWorkflowDeleteResponses[keyof PostWorkspacesCurrentToolProviderWorkflowDeleteResponses] + +export type GetWorkspacesCurrentToolProviderWorkflowGetData = { + body?: never + path?: never + query?: never + url: '/workspaces/current/tool-provider/workflow/get' +} + +export type GetWorkspacesCurrentToolProviderWorkflowGetResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentToolProviderWorkflowGetResponse + = GetWorkspacesCurrentToolProviderWorkflowGetResponses[keyof GetWorkspacesCurrentToolProviderWorkflowGetResponses] + +export type GetWorkspacesCurrentToolProviderWorkflowToolsData = { + body?: never + path?: never + query?: never + url: '/workspaces/current/tool-provider/workflow/tools' +} + +export type GetWorkspacesCurrentToolProviderWorkflowToolsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentToolProviderWorkflowToolsResponse + = GetWorkspacesCurrentToolProviderWorkflowToolsResponses[keyof GetWorkspacesCurrentToolProviderWorkflowToolsResponses] + +export type PostWorkspacesCurrentToolProviderWorkflowUpdateData = { + body: WorkflowToolUpdatePayload + path?: never + query?: never + url: '/workspaces/current/tool-provider/workflow/update' +} + +export type PostWorkspacesCurrentToolProviderWorkflowUpdateResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentToolProviderWorkflowUpdateResponse + = PostWorkspacesCurrentToolProviderWorkflowUpdateResponses[keyof PostWorkspacesCurrentToolProviderWorkflowUpdateResponses] + +export type GetWorkspacesCurrentToolProvidersData = { + body?: never + path?: never + query?: never + url: '/workspaces/current/tool-providers' +} + +export type GetWorkspacesCurrentToolProvidersResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentToolProvidersResponse + = GetWorkspacesCurrentToolProvidersResponses[keyof GetWorkspacesCurrentToolProvidersResponses] + +export type GetWorkspacesCurrentToolsApiData = { + body?: never + path?: never + query?: never + url: '/workspaces/current/tools/api' +} + +export type GetWorkspacesCurrentToolsApiResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentToolsApiResponse + = GetWorkspacesCurrentToolsApiResponses[keyof GetWorkspacesCurrentToolsApiResponses] + +export type GetWorkspacesCurrentToolsBuiltinData = { + body?: never + path?: never + query?: never + url: '/workspaces/current/tools/builtin' +} + +export type GetWorkspacesCurrentToolsBuiltinResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentToolsBuiltinResponse + = GetWorkspacesCurrentToolsBuiltinResponses[keyof GetWorkspacesCurrentToolsBuiltinResponses] + +export type GetWorkspacesCurrentToolsMcpData = { + body?: never + path?: never + query?: never + url: '/workspaces/current/tools/mcp' +} + +export type GetWorkspacesCurrentToolsMcpResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentToolsMcpResponse + = GetWorkspacesCurrentToolsMcpResponses[keyof GetWorkspacesCurrentToolsMcpResponses] + +export type GetWorkspacesCurrentToolsWorkflowData = { + body?: never + path?: never + query?: never + url: '/workspaces/current/tools/workflow' +} + +export type GetWorkspacesCurrentToolsWorkflowResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentToolsWorkflowResponse + = GetWorkspacesCurrentToolsWorkflowResponses[keyof GetWorkspacesCurrentToolsWorkflowResponses] + +export type GetWorkspacesCurrentTriggerProviderByProviderIconData = { + body?: never + path: { + provider: string + } + query?: never + url: '/workspaces/current/trigger-provider/{provider}/icon' +} + +export type GetWorkspacesCurrentTriggerProviderByProviderIconResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentTriggerProviderByProviderIconResponse + = GetWorkspacesCurrentTriggerProviderByProviderIconResponses[keyof GetWorkspacesCurrentTriggerProviderByProviderIconResponses] + +export type GetWorkspacesCurrentTriggerProviderByProviderInfoData = { + body?: never + path: { + provider: string + } + query?: never + url: '/workspaces/current/trigger-provider/{provider}/info' +} + +export type GetWorkspacesCurrentTriggerProviderByProviderInfoResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentTriggerProviderByProviderInfoResponse + = GetWorkspacesCurrentTriggerProviderByProviderInfoResponses[keyof GetWorkspacesCurrentTriggerProviderByProviderInfoResponses] + +export type DeleteWorkspacesCurrentTriggerProviderByProviderOauthClientData = { + body?: never + path: { + provider: string + } + query?: never + url: '/workspaces/current/trigger-provider/{provider}/oauth/client' +} + +export type DeleteWorkspacesCurrentTriggerProviderByProviderOauthClientResponses = { + 200: { + [key: string]: unknown + } +} + +export type DeleteWorkspacesCurrentTriggerProviderByProviderOauthClientResponse + = DeleteWorkspacesCurrentTriggerProviderByProviderOauthClientResponses[keyof DeleteWorkspacesCurrentTriggerProviderByProviderOauthClientResponses] + +export type GetWorkspacesCurrentTriggerProviderByProviderOauthClientData = { + body?: never + path: { + provider: string + } + query?: never + url: '/workspaces/current/trigger-provider/{provider}/oauth/client' +} + +export type GetWorkspacesCurrentTriggerProviderByProviderOauthClientResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentTriggerProviderByProviderOauthClientResponse + = GetWorkspacesCurrentTriggerProviderByProviderOauthClientResponses[keyof GetWorkspacesCurrentTriggerProviderByProviderOauthClientResponses] + +export type PostWorkspacesCurrentTriggerProviderByProviderOauthClientData = { + body: TriggerOAuthClientPayload + path: { + provider: string + } + query?: never + url: '/workspaces/current/trigger-provider/{provider}/oauth/client' +} + +export type PostWorkspacesCurrentTriggerProviderByProviderOauthClientResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentTriggerProviderByProviderOauthClientResponse + = PostWorkspacesCurrentTriggerProviderByProviderOauthClientResponses[keyof PostWorkspacesCurrentTriggerProviderByProviderOauthClientResponses] + +export type PostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderBuildBySubscriptionBuilderIdData + = { + body: TriggerSubscriptionBuilderUpdatePayload + path: { + provider: string + subscription_builder_id: string + } + query?: never + url: '/workspaces/current/trigger-provider/{provider}/subscriptions/builder/build/{subscription_builder_id}' + } + +export type PostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderBuildBySubscriptionBuilderIdResponses + = { + 200: { + [key: string]: unknown + } + } + +export type PostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderBuildBySubscriptionBuilderIdResponse + = PostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderBuildBySubscriptionBuilderIdResponses[keyof PostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderBuildBySubscriptionBuilderIdResponses] + +export type PostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderCreateData = { + body: TriggerSubscriptionBuilderCreatePayload + path: { + provider: string + } + query?: never + url: '/workspaces/current/trigger-provider/{provider}/subscriptions/builder/create' +} + +export type PostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderCreateResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderCreateResponse + = PostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderCreateResponses[keyof PostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderCreateResponses] + +export type GetWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderLogsBySubscriptionBuilderIdData + = { + body?: never + path: { + provider: string + subscription_builder_id: string + } + query?: never + url: '/workspaces/current/trigger-provider/{provider}/subscriptions/builder/logs/{subscription_builder_id}' + } + +export type GetWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderLogsBySubscriptionBuilderIdResponses + = { + 200: { + [key: string]: unknown + } + } + +export type GetWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderLogsBySubscriptionBuilderIdResponse + = GetWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderLogsBySubscriptionBuilderIdResponses[keyof GetWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderLogsBySubscriptionBuilderIdResponses] + +export type PostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderUpdateBySubscriptionBuilderIdData + = { + body: TriggerSubscriptionBuilderUpdatePayload + path: { + provider: string + subscription_builder_id: string + } + query?: never + url: '/workspaces/current/trigger-provider/{provider}/subscriptions/builder/update/{subscription_builder_id}' + } + +export type PostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderUpdateBySubscriptionBuilderIdResponses + = { + 200: { + [key: string]: unknown + } + } + +export type PostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderUpdateBySubscriptionBuilderIdResponse + = PostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderUpdateBySubscriptionBuilderIdResponses[keyof PostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderUpdateBySubscriptionBuilderIdResponses] + +export type PostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderVerifyAndUpdateBySubscriptionBuilderIdData + = { + body: TriggerSubscriptionBuilderVerifyPayload + path: { + provider: string + subscription_builder_id: string + } + query?: never + url: '/workspaces/current/trigger-provider/{provider}/subscriptions/builder/verify-and-update/{subscription_builder_id}' + } + +export type PostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderVerifyAndUpdateBySubscriptionBuilderIdResponses + = { + 200: { + [key: string]: unknown + } + } + +export type PostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderVerifyAndUpdateBySubscriptionBuilderIdResponse + = PostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderVerifyAndUpdateBySubscriptionBuilderIdResponses[keyof PostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderVerifyAndUpdateBySubscriptionBuilderIdResponses] + +export type GetWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderBySubscriptionBuilderIdData + = { + body?: never + path: { + provider: string + subscription_builder_id: string + } + query?: never + url: '/workspaces/current/trigger-provider/{provider}/subscriptions/builder/{subscription_builder_id}' + } + +export type GetWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderBySubscriptionBuilderIdResponses + = { + 200: { + [key: string]: unknown + } + } + +export type GetWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderBySubscriptionBuilderIdResponse + = GetWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderBySubscriptionBuilderIdResponses[keyof GetWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderBySubscriptionBuilderIdResponses] + +export type GetWorkspacesCurrentTriggerProviderByProviderSubscriptionsListData = { + body?: never + path: { + provider: string + } + query?: never + url: '/workspaces/current/trigger-provider/{provider}/subscriptions/list' +} + +export type GetWorkspacesCurrentTriggerProviderByProviderSubscriptionsListResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentTriggerProviderByProviderSubscriptionsListResponse + = GetWorkspacesCurrentTriggerProviderByProviderSubscriptionsListResponses[keyof GetWorkspacesCurrentTriggerProviderByProviderSubscriptionsListResponses] + +export type GetWorkspacesCurrentTriggerProviderByProviderSubscriptionsOauthAuthorizeData = { + body?: never + path: { + provider: string + } + query?: never + url: '/workspaces/current/trigger-provider/{provider}/subscriptions/oauth/authorize' +} + +export type GetWorkspacesCurrentTriggerProviderByProviderSubscriptionsOauthAuthorizeResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentTriggerProviderByProviderSubscriptionsOauthAuthorizeResponse + = GetWorkspacesCurrentTriggerProviderByProviderSubscriptionsOauthAuthorizeResponses[keyof GetWorkspacesCurrentTriggerProviderByProviderSubscriptionsOauthAuthorizeResponses] + +export type PostWorkspacesCurrentTriggerProviderByProviderSubscriptionsVerifyBySubscriptionIdData + = { + body: TriggerSubscriptionBuilderVerifyPayload + path: { + provider: string + subscription_id: string + } + query?: never + url: '/workspaces/current/trigger-provider/{provider}/subscriptions/verify/{subscription_id}' + } + +export type PostWorkspacesCurrentTriggerProviderByProviderSubscriptionsVerifyBySubscriptionIdResponses + = { + 200: { + [key: string]: unknown + } + } + +export type PostWorkspacesCurrentTriggerProviderByProviderSubscriptionsVerifyBySubscriptionIdResponse + = PostWorkspacesCurrentTriggerProviderByProviderSubscriptionsVerifyBySubscriptionIdResponses[keyof PostWorkspacesCurrentTriggerProviderByProviderSubscriptionsVerifyBySubscriptionIdResponses] + +export type PostWorkspacesCurrentTriggerProviderBySubscriptionIdSubscriptionsDeleteData = { + body?: never + path: { + subscription_id: string + } + query?: never + url: '/workspaces/current/trigger-provider/{subscription_id}/subscriptions/delete' +} + +export type PostWorkspacesCurrentTriggerProviderBySubscriptionIdSubscriptionsDeleteResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentTriggerProviderBySubscriptionIdSubscriptionsDeleteResponse + = PostWorkspacesCurrentTriggerProviderBySubscriptionIdSubscriptionsDeleteResponses[keyof PostWorkspacesCurrentTriggerProviderBySubscriptionIdSubscriptionsDeleteResponses] + +export type PostWorkspacesCurrentTriggerProviderBySubscriptionIdSubscriptionsUpdateData = { + body: TriggerSubscriptionBuilderUpdatePayload + path: { + subscription_id: string + } + query?: never + url: '/workspaces/current/trigger-provider/{subscription_id}/subscriptions/update' +} + +export type PostWorkspacesCurrentTriggerProviderBySubscriptionIdSubscriptionsUpdateResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCurrentTriggerProviderBySubscriptionIdSubscriptionsUpdateResponse + = PostWorkspacesCurrentTriggerProviderBySubscriptionIdSubscriptionsUpdateResponses[keyof PostWorkspacesCurrentTriggerProviderBySubscriptionIdSubscriptionsUpdateResponses] + +export type GetWorkspacesCurrentTriggersData = { + body?: never + path?: never + query?: never + url: '/workspaces/current/triggers' +} + +export type GetWorkspacesCurrentTriggersResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentTriggersResponse + = GetWorkspacesCurrentTriggersResponses[keyof GetWorkspacesCurrentTriggersResponses] + +export type PostWorkspacesCustomConfigData = { + body: WorkspaceCustomConfigPayload + path?: never + query?: never + url: '/workspaces/custom-config' +} + +export type PostWorkspacesCustomConfigResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCustomConfigResponse + = PostWorkspacesCustomConfigResponses[keyof PostWorkspacesCustomConfigResponses] + +export type PostWorkspacesCustomConfigWebappLogoUploadData = { + body?: never + path?: never + query?: never + url: '/workspaces/custom-config/webapp-logo/upload' +} + +export type PostWorkspacesCustomConfigWebappLogoUploadResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesCustomConfigWebappLogoUploadResponse + = PostWorkspacesCustomConfigWebappLogoUploadResponses[keyof PostWorkspacesCustomConfigWebappLogoUploadResponses] + +export type PostWorkspacesInfoData = { + body: WorkspaceInfoPayload + path?: never + query?: never + url: '/workspaces/info' +} + +export type PostWorkspacesInfoResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesInfoResponse + = PostWorkspacesInfoResponses[keyof PostWorkspacesInfoResponses] + +export type PostWorkspacesSwitchData = { + body: SwitchWorkspacePayload + path?: never + query?: never + url: '/workspaces/switch' +} + +export type PostWorkspacesSwitchResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkspacesSwitchResponse + = PostWorkspacesSwitchResponses[keyof PostWorkspacesSwitchResponses] + +export type GetWorkspacesByTenantIdModelProvidersByProviderByIconTypeByLangData = { + body?: never + path: { + tenant_id: string + provider: string + icon_type: string + lang: string + } + query?: never + url: '/workspaces/{tenant_id}/model-providers/{provider}/{icon_type}/{lang}' +} + +export type GetWorkspacesByTenantIdModelProvidersByProviderByIconTypeByLangResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesByTenantIdModelProvidersByProviderByIconTypeByLangResponse + = GetWorkspacesByTenantIdModelProvidersByProviderByIconTypeByLangResponses[keyof GetWorkspacesByTenantIdModelProvidersByProviderByIconTypeByLangResponses] diff --git a/packages/contracts/generated/api/console/workspaces/zod.gen.ts b/packages/contracts/generated/api/console/workspaces/zod.gen.ts new file mode 100644 index 0000000000..a381824da7 --- /dev/null +++ b/packages/contracts/generated/api/console/workspaces/zod.gen.ts @@ -0,0 +1,2150 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * TenantInfoResponse + */ +export const zTenantInfoResponse = z.object({ + created_at: z.int().nullish(), + custom_config: z.record(z.string(), z.unknown()).nullish(), + id: z.string(), + in_trial: z.boolean().nullish(), + name: z.string().nullish(), + next_credit_reset_date: z.int().nullish(), + plan: z.string().nullish(), + role: z.string().nullish(), + status: z.string().nullish(), + trial_credits: z.int().nullish(), + trial_credits_used: z.int().nullish(), + trial_end_reason: z.string().nullish(), +}) + +/** + * EndpointCreatePayload + */ +export const zEndpointCreatePayload = z.object({ + name: z.string().min(1), + plugin_unique_identifier: z.string(), + settings: z.record(z.string(), z.unknown()), +}) + +/** + * EndpointCreateResponse + */ +export const zEndpointCreateResponse = z.object({ + success: z.boolean(), +}) + +/** + * EndpointIdPayload + */ +export const zEndpointIdPayload = z.object({ + endpoint_id: z.string(), +}) + +/** + * EndpointDeleteResponse + */ +export const zEndpointDeleteResponse = z.object({ + success: z.boolean(), +}) + +/** + * EndpointDisableResponse + */ +export const zEndpointDisableResponse = z.object({ + success: z.boolean(), +}) + +/** + * EndpointEnableResponse + */ +export const zEndpointEnableResponse = z.object({ + success: z.boolean(), +}) + +/** + * EndpointListResponse + */ +export const zEndpointListResponse = z.object({ + endpoints: z.array(z.record(z.string(), z.unknown())), +}) + +/** + * PluginEndpointListResponse + */ +export const zPluginEndpointListResponse = z.object({ + endpoints: z.array(z.record(z.string(), z.unknown())), +}) + +/** + * LegacyEndpointUpdatePayload + */ +export const zLegacyEndpointUpdatePayload = z.object({ + endpoint_id: z.string(), + name: z.string().min(1), + settings: z.record(z.string(), z.unknown()), +}) + +/** + * EndpointUpdateResponse + */ +export const zEndpointUpdateResponse = z.object({ + success: z.boolean(), +}) + +/** + * EndpointUpdatePayload + */ +export const zEndpointUpdatePayload = z.object({ + name: z.string().min(1), + settings: z.record(z.string(), z.unknown()), +}) + +/** + * OwnerTransferCheckPayload + */ +export const zOwnerTransferCheckPayload = z.object({ + code: z.string(), + token: z.string(), +}) + +/** + * OwnerTransferEmailPayload + */ +export const zOwnerTransferEmailPayload = z.object({ + language: z.string().nullish(), +}) + +/** + * OwnerTransferPayload + */ +export const zOwnerTransferPayload = z.object({ + token: z.string(), +}) + +/** + * MemberRoleUpdatePayload + */ +export const zMemberRoleUpdatePayload = z.object({ + role: z.string(), +}) + +/** + * ParserCredentialDelete + */ +export const zParserCredentialDelete = z.object({ + credential_id: z.string(), +}) + +/** + * ParserCredentialCreate + */ +export const zParserCredentialCreate = z.object({ + credentials: z.record(z.string(), z.unknown()), + name: z.string().max(30).nullish(), +}) + +/** + * ParserCredentialUpdate + */ +export const zParserCredentialUpdate = z.object({ + credential_id: z.string(), + credentials: z.record(z.string(), z.unknown()), + name: z.string().max(30).nullish(), +}) + +/** + * ParserCredentialSwitch + */ +export const zParserCredentialSwitch = z.object({ + credential_id: z.string(), +}) + +/** + * ParserCredentialValidate + */ +export const zParserCredentialValidate = z.object({ + credentials: z.record(z.string(), z.unknown()), +}) + +/** + * ParserPreferredProviderType + */ +export const zParserPreferredProviderType = z.object({ + preferred_provider_type: z.enum(['system', 'custom']), +}) + +/** + * ParserGithubInstall + */ +export const zParserGithubInstall = z.object({ + package: z.string(), + plugin_unique_identifier: z.string(), + repo: z.string(), + version: z.string(), +}) + +/** + * ParserPluginIdentifiers + */ +export const zParserPluginIdentifiers = z.object({ + plugin_unique_identifiers: z.array(z.string()), +}) + +/** + * ParserLatest + */ +export const zParserLatest = z.object({ + plugin_ids: z.array(z.string()), +}) + +/** + * ParserDynamicOptionsWithCredentials + */ +export const zParserDynamicOptionsWithCredentials = z.object({ + action: z.string(), + credential_id: z.string(), + credentials: z.record(z.string(), z.unknown()), + parameter: z.string(), + plugin_id: z.string(), + provider: z.string(), +}) + +/** + * ParserExcludePlugin + */ +export const zParserExcludePlugin = z.object({ + plugin_id: z.string(), +}) + +/** + * ParserUninstall + */ +export const zParserUninstall = z.object({ + plugin_installation_id: z.string(), +}) + +/** + * ParserGithubUpgrade + */ +export const zParserGithubUpgrade = z.object({ + new_plugin_unique_identifier: z.string(), + original_plugin_unique_identifier: z.string(), + package: z.string(), + repo: z.string(), + version: z.string(), +}) + +/** + * ParserMarketplaceUpgrade + */ +export const zParserMarketplaceUpgrade = z.object({ + new_plugin_unique_identifier: z.string(), + original_plugin_unique_identifier: z.string(), +}) + +/** + * ParserGithubUpload + */ +export const zParserGithubUpload = z.object({ + package: z.string(), + repo: z.string(), + version: z.string(), +}) + +/** + * ApiToolProviderDeletePayload + */ +export const zApiToolProviderDeletePayload = z.object({ + provider: z.string(), +}) + +/** + * ApiToolSchemaPayload + */ +export const zApiToolSchemaPayload = z.object({ + schema: z.string(), +}) + +/** + * BuiltinProviderDefaultCredentialPayload + */ +export const zBuiltinProviderDefaultCredentialPayload = z.object({ + id: z.string(), +}) + +/** + * BuiltinToolCredentialDeletePayload + */ +export const zBuiltinToolCredentialDeletePayload = z.object({ + credential_id: z.string(), +}) + +/** + * ToolOAuthCustomClientPayload + */ +export const zToolOAuthCustomClientPayload = z.object({ + client_params: z.record(z.string(), z.unknown()).nullish(), + enable_oauth_custom_client: z.boolean().nullish().default(true), +}) + +/** + * BuiltinToolUpdatePayload + */ +export const zBuiltinToolUpdatePayload = z.object({ + credential_id: z.string(), + credentials: z.record(z.string(), z.unknown()).nullish(), + name: z.string().max(30).nullish(), +}) + +/** + * MCPProviderDeletePayload + */ +export const zMcpProviderDeletePayload = z.object({ + provider_id: z.string(), +}) + +/** + * MCPProviderCreatePayload + */ +export const zMcpProviderCreatePayload = z.object({ + authentication: z.record(z.string(), z.unknown()).nullish(), + configuration: z.record(z.string(), z.unknown()).nullish(), + headers: z.record(z.string(), z.unknown()).nullish(), + icon: z.string(), + icon_background: z.string().optional().default(''), + icon_type: z.string(), + name: z.string(), + server_identifier: z.string(), + server_url: z.string(), +}) + +/** + * MCPProviderUpdatePayload + */ +export const zMcpProviderUpdatePayload = z.object({ + authentication: z.record(z.string(), z.unknown()).nullish(), + configuration: z.record(z.string(), z.unknown()).nullish(), + headers: z.record(z.string(), z.unknown()).nullish(), + icon: z.string(), + icon_background: z.string().optional().default(''), + icon_type: z.string(), + name: z.string(), + provider_id: z.string(), + server_identifier: z.string(), + server_url: z.string(), +}) + +/** + * MCPAuthPayload + */ +export const zMcpAuthPayload = z.object({ + authorization_code: z.string().nullish(), + provider_id: z.string(), +}) + +/** + * WorkflowToolDeletePayload + */ +export const zWorkflowToolDeletePayload = z.object({ + workflow_tool_id: z.string(), +}) + +/** + * TriggerOAuthClientPayload + */ +export const zTriggerOAuthClientPayload = z.object({ + client_params: z.record(z.string(), z.unknown()).nullish(), + enabled: z.boolean().nullish(), +}) + +/** + * TriggerSubscriptionBuilderUpdatePayload + */ +export const zTriggerSubscriptionBuilderUpdatePayload = z.object({ + credentials: z.record(z.string(), z.unknown()).nullish(), + name: z.string().nullish(), + parameters: z.record(z.string(), z.unknown()).nullish(), + properties: z.record(z.string(), z.unknown()).nullish(), +}) + +/** + * TriggerSubscriptionBuilderCreatePayload + */ +export const zTriggerSubscriptionBuilderCreatePayload = z.object({ + credential_type: z.string().optional().default('unauthorized'), +}) + +/** + * TriggerSubscriptionBuilderVerifyPayload + */ +export const zTriggerSubscriptionBuilderVerifyPayload = z.object({ + credentials: z.record(z.string(), z.unknown()), +}) + +/** + * WorkspaceCustomConfigPayload + */ +export const zWorkspaceCustomConfigPayload = z.object({ + remove_webapp_brand: z.boolean().nullish(), + replace_webapp_logo: z.string().nullish(), +}) + +/** + * WorkspaceInfoPayload + */ +export const zWorkspaceInfoPayload = z.object({ + name: z.string(), +}) + +/** + * SwitchWorkspacePayload + */ +export const zSwitchWorkspacePayload = z.object({ + tenant_id: z.string(), +}) + +/** + * AccountWithRole + */ +export const zAccountWithRole = z.object({ + avatar: z.string().nullish(), + created_at: z.int().nullish(), + email: z.string(), + id: z.string(), + last_active_at: z.int().nullish(), + last_login_at: z.int().nullish(), + name: z.string(), + role: z.string(), + status: z.string(), +}) + +/** + * AccountWithRoleList + */ +export const zAccountWithRoleList = z.object({ + accounts: z.array(zAccountWithRole), +}) + +/** + * TenantAccountRole + */ +export const zTenantAccountRole = z.enum(['owner', 'admin', 'editor', 'normal', 'dataset_operator']) + +/** + * MemberInvitePayload + */ +export const zMemberInvitePayload = z.object({ + emails: z.array(z.string()).optional(), + language: z.string().nullish(), + role: zTenantAccountRole, +}) + +/** + * ModelType + * + * Enum class for model type. + */ +export const zModelType = z.enum([ + 'llm', + 'text-embedding', + 'rerank', + 'speech2text', + 'moderation', + 'tts', +]) + +/** + * ParserDeleteModels + */ +export const zParserDeleteModels = z.object({ + model: z.string(), + model_type: zModelType, +}) + +/** + * ParserDeleteCredential + */ +export const zParserDeleteCredential = z.object({ + credential_id: z.string(), + model: z.string(), + model_type: zModelType, +}) + +/** + * ParserCreateCredential + */ +export const zParserCreateCredential = z.object({ + credentials: z.record(z.string(), z.unknown()), + model: z.string(), + model_type: zModelType, + name: z.string().max(30).nullish(), +}) + +/** + * ParserUpdateCredential + */ +export const zParserUpdateCredential = z.object({ + credential_id: z.string(), + credentials: z.record(z.string(), z.unknown()), + model: z.string(), + model_type: zModelType, + name: z.string().max(30).nullish(), +}) + +/** + * ParserSwitch + */ +export const zParserSwitch = z.object({ + credential_id: z.string(), + model: z.string(), + model_type: zModelType, +}) + +/** + * ParserValidate + */ +export const zParserValidate = z.object({ + credentials: z.record(z.string(), z.unknown()), + model: z.string(), + model_type: zModelType, +}) + +/** + * LoadBalancingCredentialPayload + */ +export const zLoadBalancingCredentialPayload = z.object({ + credentials: z.record(z.string(), z.unknown()), + model: z.string(), + model_type: zModelType, +}) + +/** + * Inner + */ +export const zInner = z.object({ + model: z.string().nullish(), + model_type: zModelType, + provider: z.string().nullish(), +}) + +/** + * ParserPostDefault + */ +export const zParserPostDefault = z.object({ + model_settings: z.array(zInner), +}) + +/** + * LoadBalancingPayload + */ +export const zLoadBalancingPayload = z.object({ + configs: z.array(z.record(z.string(), z.unknown())).nullish(), + enabled: z.boolean().nullish(), +}) + +/** + * ParserPostModels + */ +export const zParserPostModels = z.object({ + config_from: z.string().nullish(), + credential_id: z.string().nullish(), + load_balancing: zLoadBalancingPayload.optional(), + model: z.string(), + model_type: zModelType, +}) + +/** + * DebugPermission + */ +export const zDebugPermission = z.enum(['everyone', 'admins', 'noone']) + +/** + * InstallPermission + */ +export const zInstallPermission = z.enum(['everyone', 'admins', 'noone']) + +/** + * ParserPermissionChange + */ +export const zParserPermissionChange = z.object({ + debug_permission: zDebugPermission, + install_permission: zInstallPermission, +}) + +/** + * PluginPermissionSettingsPayload + */ +export const zPluginPermissionSettingsPayload = z.object({ + debug_permission: zDebugPermission.optional(), + install_permission: zInstallPermission.optional(), +}) + +/** + * ApiProviderSchemaType + * + * Enum class for api provider schema type. + */ +export const zApiProviderSchemaType = z.enum([ + 'openapi', + 'swagger', + 'openai_plugin', + 'openai_actions', +]) + +/** + * ApiToolProviderAddPayload + */ +export const zApiToolProviderAddPayload = z.object({ + credentials: z.record(z.string(), z.unknown()), + custom_disclaimer: z.string().optional().default(''), + icon: z.record(z.string(), z.unknown()), + labels: z.array(z.string()).nullish(), + privacy_policy: z.string().nullish(), + provider: z.string(), + schema: z.string(), + schema_type: zApiProviderSchemaType, +}) + +/** + * ApiToolTestPayload + */ +export const zApiToolTestPayload = z.object({ + credentials: z.record(z.string(), z.unknown()), + parameters: z.record(z.string(), z.unknown()), + provider_name: z.string().nullish(), + schema: z.string(), + schema_type: zApiProviderSchemaType, + tool_name: z.string(), +}) + +/** + * ApiToolProviderUpdatePayload + */ +export const zApiToolProviderUpdatePayload = z.object({ + credentials: z.record(z.string(), z.unknown()), + custom_disclaimer: z.string().optional().default(''), + icon: z.record(z.string(), z.unknown()), + labels: z.array(z.string()).nullish(), + original_provider: z.string(), + privacy_policy: z.string().nullish(), + provider: z.string(), + schema: z.string(), + schema_type: zApiProviderSchemaType, +}) + +/** + * CredentialType + */ +export const zCredentialType = z.enum(['api-key', 'oauth2', 'unauthorized']) + +/** + * BuiltinToolAddPayload + */ +export const zBuiltinToolAddPayload = z.object({ + credentials: z.record(z.string(), z.unknown()), + name: z.string().max(30).nullish(), + type: zCredentialType, +}) + +/** + * StrategySetting + */ +export const zStrategySetting = z.enum(['disabled', 'fix_only', 'latest']) + +/** + * UpgradeMode + */ +export const zUpgradeMode = z.enum(['all', 'partial', 'exclude']) + +/** + * PluginAutoUpgradeSettingsPayload + */ +export const zPluginAutoUpgradeSettingsPayload = z.object({ + exclude_plugins: z.array(z.string()).optional(), + include_plugins: z.array(z.string()).optional(), + strategy_setting: zStrategySetting.optional(), + upgrade_mode: zUpgradeMode.optional(), + upgrade_time_of_day: z.int().optional().default(0), +}) + +/** + * ParserPreferencesChange + */ +export const zParserPreferencesChange = z.object({ + auto_upgrade: zPluginAutoUpgradeSettingsPayload, + permission: zPluginPermissionSettingsPayload, +}) + +/** + * ToolParameterForm + */ +export const zToolParameterForm = z.enum(['schema', 'form', 'llm']) + +/** + * WorkflowToolParameterConfiguration + * + * Workflow tool configuration + */ +export const zWorkflowToolParameterConfiguration = z.object({ + description: z.string(), + form: zToolParameterForm, + name: z.string(), +}) + +/** + * WorkflowToolCreatePayload + */ +export const zWorkflowToolCreatePayload = z.object({ + description: z.string(), + icon: z.record(z.string(), z.unknown()), + label: z.string(), + labels: z.array(z.string()).nullish(), + name: z.string(), + parameters: z.array(zWorkflowToolParameterConfiguration).optional(), + privacy_policy: z.string().nullish().default(''), + workflow_app_id: z.string(), +}) + +/** + * WorkflowToolUpdatePayload + */ +export const zWorkflowToolUpdatePayload = z.object({ + description: z.string(), + icon: z.record(z.string(), z.unknown()), + label: z.string(), + labels: z.array(z.string()).nullish(), + name: z.string(), + parameters: z.array(zWorkflowToolParameterConfiguration).optional(), + privacy_policy: z.string().nullish().default(''), + workflow_tool_id: z.string(), +}) + +/** + * Success + */ +export const zGetWorkspacesResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zPostWorkspacesCurrentResponse = zTenantInfoResponse + +export const zGetWorkspacesCurrentAgentProviderByProviderNamePath = z.object({ + provider_name: z.string(), +}) + +/** + * Agent provider details + */ +export const zGetWorkspacesCurrentAgentProviderByProviderNameResponse = z.record( + z.string(), + z.unknown(), +) + +/** + * Success + */ +export const zGetWorkspacesCurrentAgentProvidersResponse = z.array( + z.record(z.string(), z.unknown()), +) + +/** + * Success + */ +export const zGetWorkspacesCurrentDatasetOperatorsResponse = zAccountWithRoleList + +export const zGetWorkspacesCurrentDefaultModelQuery = z.object({ + model_type: z.string(), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentDefaultModelResponse = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesCurrentDefaultModelBody = zParserPostDefault + +/** + * Success + */ +export const zPostWorkspacesCurrentDefaultModelResponse = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesCurrentEndpointsBody = zEndpointCreatePayload + +/** + * Endpoint created successfully + */ +export const zPostWorkspacesCurrentEndpointsResponse = zEndpointCreateResponse + +export const zPostWorkspacesCurrentEndpointsCreateBody = zEndpointCreatePayload + +/** + * Endpoint created successfully + */ +export const zPostWorkspacesCurrentEndpointsCreateResponse = zEndpointCreateResponse + +export const zPostWorkspacesCurrentEndpointsDeleteBody = zEndpointIdPayload + +/** + * Endpoint deleted successfully + */ +export const zPostWorkspacesCurrentEndpointsDeleteResponse = zEndpointDeleteResponse + +export const zPostWorkspacesCurrentEndpointsDisableBody = zEndpointIdPayload + +/** + * Endpoint disabled successfully + */ +export const zPostWorkspacesCurrentEndpointsDisableResponse = zEndpointDisableResponse + +export const zPostWorkspacesCurrentEndpointsEnableBody = zEndpointIdPayload + +/** + * Endpoint enabled successfully + */ +export const zPostWorkspacesCurrentEndpointsEnableResponse = zEndpointEnableResponse + +export const zGetWorkspacesCurrentEndpointsListQuery = z.object({ + page: z.int().gte(1), + page_size: z.int(), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentEndpointsListResponse = zEndpointListResponse + +export const zGetWorkspacesCurrentEndpointsListPluginQuery = z.object({ + page: z.int().gte(1), + page_size: z.int(), + plugin_id: z.string(), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentEndpointsListPluginResponse = zPluginEndpointListResponse + +export const zPostWorkspacesCurrentEndpointsUpdateBody = zLegacyEndpointUpdatePayload + +/** + * Endpoint updated successfully + */ +export const zPostWorkspacesCurrentEndpointsUpdateResponse = zEndpointUpdateResponse + +export const zDeleteWorkspacesCurrentEndpointsByIdPath = z.object({ + id: z.string(), +}) + +/** + * Endpoint deleted successfully + */ +export const zDeleteWorkspacesCurrentEndpointsByIdResponse = zEndpointDeleteResponse + +export const zPatchWorkspacesCurrentEndpointsByIdBody = zEndpointUpdatePayload + +export const zPatchWorkspacesCurrentEndpointsByIdPath = z.object({ + id: z.string(), +}) + +/** + * Endpoint updated successfully + */ +export const zPatchWorkspacesCurrentEndpointsByIdResponse = zEndpointUpdateResponse + +/** + * Success + */ +export const zGetWorkspacesCurrentMembersResponse = zAccountWithRoleList + +export const zPostWorkspacesCurrentMembersInviteEmailBody = zMemberInvitePayload + +/** + * Success + */ +export const zPostWorkspacesCurrentMembersInviteEmailResponse = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesCurrentMembersOwnerTransferCheckBody = zOwnerTransferCheckPayload + +/** + * Success + */ +export const zPostWorkspacesCurrentMembersOwnerTransferCheckResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostWorkspacesCurrentMembersSendOwnerTransferConfirmEmailBody + = zOwnerTransferEmailPayload + +/** + * Success + */ +export const zPostWorkspacesCurrentMembersSendOwnerTransferConfirmEmailResponse = z.record( + z.string(), + z.unknown(), +) + +export const zDeleteWorkspacesCurrentMembersByMemberIdPath = z.object({ + member_id: z.string(), +}) + +/** + * Success + */ +export const zDeleteWorkspacesCurrentMembersByMemberIdResponse = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesCurrentMembersByMemberIdOwnerTransferBody = zOwnerTransferPayload + +export const zPostWorkspacesCurrentMembersByMemberIdOwnerTransferPath = z.object({ + member_id: z.string(), +}) + +/** + * Success + */ +export const zPostWorkspacesCurrentMembersByMemberIdOwnerTransferResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPutWorkspacesCurrentMembersByMemberIdUpdateRoleBody = zMemberRoleUpdatePayload + +export const zPutWorkspacesCurrentMembersByMemberIdUpdateRolePath = z.object({ + member_id: z.string(), +}) + +/** + * Success + */ +export const zPutWorkspacesCurrentMembersByMemberIdUpdateRoleResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetWorkspacesCurrentModelProvidersQuery = z.object({ + model_type: z.string().nullish(), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentModelProvidersResponse = z.record(z.string(), z.unknown()) + +export const zGetWorkspacesCurrentModelProvidersByProviderCheckoutUrlPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentModelProvidersByProviderCheckoutUrlResponse = z.record( + z.string(), + z.unknown(), +) + +export const zDeleteWorkspacesCurrentModelProvidersByProviderCredentialsBody + = zParserCredentialDelete + +export const zDeleteWorkspacesCurrentModelProvidersByProviderCredentialsPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zDeleteWorkspacesCurrentModelProvidersByProviderCredentialsResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetWorkspacesCurrentModelProvidersByProviderCredentialsPath = z.object({ + provider: z.string(), +}) + +export const zGetWorkspacesCurrentModelProvidersByProviderCredentialsQuery = z.object({ + credential_id: z.string().nullish(), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentModelProvidersByProviderCredentialsResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostWorkspacesCurrentModelProvidersByProviderCredentialsBody = zParserCredentialCreate + +export const zPostWorkspacesCurrentModelProvidersByProviderCredentialsPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zPostWorkspacesCurrentModelProvidersByProviderCredentialsResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPutWorkspacesCurrentModelProvidersByProviderCredentialsBody = zParserCredentialUpdate + +export const zPutWorkspacesCurrentModelProvidersByProviderCredentialsPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zPutWorkspacesCurrentModelProvidersByProviderCredentialsResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostWorkspacesCurrentModelProvidersByProviderCredentialsSwitchBody + = zParserCredentialSwitch + +export const zPostWorkspacesCurrentModelProvidersByProviderCredentialsSwitchPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zPostWorkspacesCurrentModelProvidersByProviderCredentialsSwitchResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostWorkspacesCurrentModelProvidersByProviderCredentialsValidateBody + = zParserCredentialValidate + +export const zPostWorkspacesCurrentModelProvidersByProviderCredentialsValidatePath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zPostWorkspacesCurrentModelProvidersByProviderCredentialsValidateResponse = z.record( + z.string(), + z.unknown(), +) + +export const zDeleteWorkspacesCurrentModelProvidersByProviderModelsBody = zParserDeleteModels + +export const zDeleteWorkspacesCurrentModelProvidersByProviderModelsPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zDeleteWorkspacesCurrentModelProvidersByProviderModelsResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetWorkspacesCurrentModelProvidersByProviderModelsPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentModelProvidersByProviderModelsResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostWorkspacesCurrentModelProvidersByProviderModelsBody = zParserPostModels + +export const zPostWorkspacesCurrentModelProvidersByProviderModelsPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zPostWorkspacesCurrentModelProvidersByProviderModelsResponse = z.record( + z.string(), + z.unknown(), +) + +export const zDeleteWorkspacesCurrentModelProvidersByProviderModelsCredentialsBody + = zParserDeleteCredential + +export const zDeleteWorkspacesCurrentModelProvidersByProviderModelsCredentialsPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zDeleteWorkspacesCurrentModelProvidersByProviderModelsCredentialsResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetWorkspacesCurrentModelProvidersByProviderModelsCredentialsPath = z.object({ + provider: z.string(), +}) + +export const zGetWorkspacesCurrentModelProvidersByProviderModelsCredentialsQuery = z.object({ + config_from: z.string().nullish(), + credential_id: z.string().nullish(), + model: z.string(), + model_type: z.string(), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentModelProvidersByProviderModelsCredentialsResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostWorkspacesCurrentModelProvidersByProviderModelsCredentialsBody + = zParserCreateCredential + +export const zPostWorkspacesCurrentModelProvidersByProviderModelsCredentialsPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zPostWorkspacesCurrentModelProvidersByProviderModelsCredentialsResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPutWorkspacesCurrentModelProvidersByProviderModelsCredentialsBody + = zParserUpdateCredential + +export const zPutWorkspacesCurrentModelProvidersByProviderModelsCredentialsPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zPutWorkspacesCurrentModelProvidersByProviderModelsCredentialsResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostWorkspacesCurrentModelProvidersByProviderModelsCredentialsSwitchBody + = zParserSwitch + +export const zPostWorkspacesCurrentModelProvidersByProviderModelsCredentialsSwitchPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zPostWorkspacesCurrentModelProvidersByProviderModelsCredentialsSwitchResponse + = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesCurrentModelProvidersByProviderModelsCredentialsValidateBody + = zParserValidate + +export const zPostWorkspacesCurrentModelProvidersByProviderModelsCredentialsValidatePath = z.object( + { + provider: z.string(), + }, +) + +/** + * Success + */ +export const zPostWorkspacesCurrentModelProvidersByProviderModelsCredentialsValidateResponse + = z.record(z.string(), z.unknown()) + +export const zPatchWorkspacesCurrentModelProvidersByProviderModelsDisableBody = zParserDeleteModels + +export const zPatchWorkspacesCurrentModelProvidersByProviderModelsDisablePath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zPatchWorkspacesCurrentModelProvidersByProviderModelsDisableResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPatchWorkspacesCurrentModelProvidersByProviderModelsEnableBody = zParserDeleteModels + +export const zPatchWorkspacesCurrentModelProvidersByProviderModelsEnablePath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zPatchWorkspacesCurrentModelProvidersByProviderModelsEnableResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostWorkspacesCurrentModelProvidersByProviderModelsLoadBalancingConfigsCredentialsValidateBody + = zLoadBalancingCredentialPayload + +export const zPostWorkspacesCurrentModelProvidersByProviderModelsLoadBalancingConfigsCredentialsValidatePath + = z.object({ + provider: z.string(), + }) + +/** + * Success + */ +export const zPostWorkspacesCurrentModelProvidersByProviderModelsLoadBalancingConfigsCredentialsValidateResponse + = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesCurrentModelProvidersByProviderModelsLoadBalancingConfigsByConfigIdCredentialsValidateBody + = zLoadBalancingCredentialPayload + +export const zPostWorkspacesCurrentModelProvidersByProviderModelsLoadBalancingConfigsByConfigIdCredentialsValidatePath + = z.object({ + provider: z.string(), + config_id: z.string(), + }) + +/** + * Success + */ +export const zPostWorkspacesCurrentModelProvidersByProviderModelsLoadBalancingConfigsByConfigIdCredentialsValidateResponse + = z.record(z.string(), z.unknown()) + +export const zGetWorkspacesCurrentModelProvidersByProviderModelsParameterRulesPath = z.object({ + provider: z.string(), +}) + +export const zGetWorkspacesCurrentModelProvidersByProviderModelsParameterRulesQuery = z.object({ + model: z.string(), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentModelProvidersByProviderModelsParameterRulesResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostWorkspacesCurrentModelProvidersByProviderPreferredProviderTypeBody + = zParserPreferredProviderType + +export const zPostWorkspacesCurrentModelProvidersByProviderPreferredProviderTypePath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zPostWorkspacesCurrentModelProvidersByProviderPreferredProviderTypeResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetWorkspacesCurrentModelsModelTypesByModelTypePath = z.object({ + model_type: z.string(), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentModelsModelTypesByModelTypeResponse = z.record( + z.string(), + z.unknown(), +) + +/** + * Success + */ +export const zGetWorkspacesCurrentPermissionResponse = z.record(z.string(), z.unknown()) + +export const zGetWorkspacesCurrentPluginAssetQuery = z.object({ + file_name: z.string(), + plugin_unique_identifier: z.string(), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentPluginAssetResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zGetWorkspacesCurrentPluginDebuggingKeyResponse = z.record(z.string(), z.unknown()) + +export const zGetWorkspacesCurrentPluginFetchManifestQuery = z.object({ + plugin_unique_identifier: z.string(), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentPluginFetchManifestResponse = z.record(z.string(), z.unknown()) + +export const zGetWorkspacesCurrentPluginIconQuery = z.object({ + filename: z.string(), + tenant_id: z.string(), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentPluginIconResponse = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesCurrentPluginInstallGithubBody = zParserGithubInstall + +/** + * Success + */ +export const zPostWorkspacesCurrentPluginInstallGithubResponse = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesCurrentPluginInstallMarketplaceBody = zParserPluginIdentifiers + +/** + * Success + */ +export const zPostWorkspacesCurrentPluginInstallMarketplaceResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostWorkspacesCurrentPluginInstallPkgBody = zParserPluginIdentifiers + +/** + * Success + */ +export const zPostWorkspacesCurrentPluginInstallPkgResponse = z.record(z.string(), z.unknown()) + +export const zGetWorkspacesCurrentPluginListQuery = z.object({ + page: z.int().gte(1).optional().default(1), + page_size: z.int().gte(1).lte(256).optional().default(256), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentPluginListResponse = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesCurrentPluginListInstallationsIdsBody = zParserLatest + +/** + * Success + */ +export const zPostWorkspacesCurrentPluginListInstallationsIdsResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostWorkspacesCurrentPluginListLatestVersionsBody = zParserLatest + +/** + * Success + */ +export const zPostWorkspacesCurrentPluginListLatestVersionsResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetWorkspacesCurrentPluginMarketplacePkgQuery = z.object({ + plugin_unique_identifier: z.string(), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentPluginMarketplacePkgResponse = z.record(z.string(), z.unknown()) + +export const zGetWorkspacesCurrentPluginParametersDynamicOptionsQuery = z.object({ + action: z.string(), + credential_id: z.string().nullish(), + parameter: z.string(), + plugin_id: z.string(), + provider: z.string(), + provider_type: z.enum(['tool', 'trigger']), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentPluginParametersDynamicOptionsResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostWorkspacesCurrentPluginParametersDynamicOptionsWithCredentialsBody + = zParserDynamicOptionsWithCredentials + +/** + * Success + */ +export const zPostWorkspacesCurrentPluginParametersDynamicOptionsWithCredentialsResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostWorkspacesCurrentPluginPermissionChangeBody = zParserPermissionChange + +/** + * Success + */ +export const zPostWorkspacesCurrentPluginPermissionChangeResponse = z.record( + z.string(), + z.unknown(), +) + +/** + * Success + */ +export const zGetWorkspacesCurrentPluginPermissionFetchResponse = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesCurrentPluginPreferencesAutoupgradeExcludeBody = zParserExcludePlugin + +/** + * Success + */ +export const zPostWorkspacesCurrentPluginPreferencesAutoupgradeExcludeResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostWorkspacesCurrentPluginPreferencesChangeBody = zParserPreferencesChange + +/** + * Success + */ +export const zPostWorkspacesCurrentPluginPreferencesChangeResponse = z.record( + z.string(), + z.unknown(), +) + +/** + * Success + */ +export const zGetWorkspacesCurrentPluginPreferencesFetchResponse = z.record(z.string(), z.unknown()) + +export const zGetWorkspacesCurrentPluginReadmeQuery = z.object({ + language: z.string().optional().default('en-US'), + plugin_unique_identifier: z.string(), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentPluginReadmeResponse = z.record(z.string(), z.unknown()) + +export const zGetWorkspacesCurrentPluginTasksQuery = z.object({ + page: z.int().gte(1).optional().default(1), + page_size: z.int().gte(1).lte(256).optional().default(256), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentPluginTasksResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zPostWorkspacesCurrentPluginTasksDeleteAllResponse = z.record(z.string(), z.unknown()) + +export const zGetWorkspacesCurrentPluginTasksByTaskIdPath = z.object({ + task_id: z.string(), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentPluginTasksByTaskIdResponse = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesCurrentPluginTasksByTaskIdDeletePath = z.object({ + task_id: z.string(), +}) + +/** + * Success + */ +export const zPostWorkspacesCurrentPluginTasksByTaskIdDeleteResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostWorkspacesCurrentPluginTasksByTaskIdDeleteByIdentifierPath = z.object({ + task_id: z.string(), + identifier: z.string(), +}) + +/** + * Success + */ +export const zPostWorkspacesCurrentPluginTasksByTaskIdDeleteByIdentifierResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostWorkspacesCurrentPluginUninstallBody = zParserUninstall + +/** + * Success + */ +export const zPostWorkspacesCurrentPluginUninstallResponse = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesCurrentPluginUpgradeGithubBody = zParserGithubUpgrade + +/** + * Success + */ +export const zPostWorkspacesCurrentPluginUpgradeGithubResponse = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesCurrentPluginUpgradeMarketplaceBody = zParserMarketplaceUpgrade + +/** + * Success + */ +export const zPostWorkspacesCurrentPluginUpgradeMarketplaceResponse = z.record( + z.string(), + z.unknown(), +) + +/** + * Success + */ +export const zPostWorkspacesCurrentPluginUploadBundleResponse = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesCurrentPluginUploadGithubBody = zParserGithubUpload + +/** + * Success + */ +export const zPostWorkspacesCurrentPluginUploadGithubResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zPostWorkspacesCurrentPluginUploadPkgResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zGetWorkspacesCurrentToolLabelsResponse = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesCurrentToolProviderApiAddBody = zApiToolProviderAddPayload + +/** + * Success + */ +export const zPostWorkspacesCurrentToolProviderApiAddResponse = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesCurrentToolProviderApiDeleteBody = zApiToolProviderDeletePayload + +/** + * Success + */ +export const zPostWorkspacesCurrentToolProviderApiDeleteResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zGetWorkspacesCurrentToolProviderApiGetResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zGetWorkspacesCurrentToolProviderApiRemoteResponse = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesCurrentToolProviderApiSchemaBody = zApiToolSchemaPayload + +/** + * Success + */ +export const zPostWorkspacesCurrentToolProviderApiSchemaResponse = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesCurrentToolProviderApiTestPreBody = zApiToolTestPayload + +/** + * Success + */ +export const zPostWorkspacesCurrentToolProviderApiTestPreResponse = z.record( + z.string(), + z.unknown(), +) + +/** + * Success + */ +export const zGetWorkspacesCurrentToolProviderApiToolsResponse = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesCurrentToolProviderApiUpdateBody = zApiToolProviderUpdatePayload + +/** + * Success + */ +export const zPostWorkspacesCurrentToolProviderApiUpdateResponse = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesCurrentToolProviderBuiltinByProviderAddBody = zBuiltinToolAddPayload + +export const zPostWorkspacesCurrentToolProviderBuiltinByProviderAddPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zPostWorkspacesCurrentToolProviderBuiltinByProviderAddResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetWorkspacesCurrentToolProviderBuiltinByProviderCredentialInfoPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentToolProviderBuiltinByProviderCredentialInfoResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetWorkspacesCurrentToolProviderBuiltinByProviderCredentialSchemaByCredentialTypePath + = z.object({ + provider: z.string(), + credential_type: z.string(), + }) + +/** + * Success + */ +export const zGetWorkspacesCurrentToolProviderBuiltinByProviderCredentialSchemaByCredentialTypeResponse + = z.record(z.string(), z.unknown()) + +export const zGetWorkspacesCurrentToolProviderBuiltinByProviderCredentialsPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentToolProviderBuiltinByProviderCredentialsResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostWorkspacesCurrentToolProviderBuiltinByProviderDefaultCredentialBody + = zBuiltinProviderDefaultCredentialPayload + +export const zPostWorkspacesCurrentToolProviderBuiltinByProviderDefaultCredentialPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zPostWorkspacesCurrentToolProviderBuiltinByProviderDefaultCredentialResponse + = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesCurrentToolProviderBuiltinByProviderDeleteBody + = zBuiltinToolCredentialDeletePayload + +export const zPostWorkspacesCurrentToolProviderBuiltinByProviderDeletePath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zPostWorkspacesCurrentToolProviderBuiltinByProviderDeleteResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetWorkspacesCurrentToolProviderBuiltinByProviderIconPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentToolProviderBuiltinByProviderIconResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetWorkspacesCurrentToolProviderBuiltinByProviderInfoPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentToolProviderBuiltinByProviderInfoResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetWorkspacesCurrentToolProviderBuiltinByProviderOauthClientSchemaPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentToolProviderBuiltinByProviderOauthClientSchemaResponse = z.record( + z.string(), + z.unknown(), +) + +export const zDeleteWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zDeleteWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientResponse + = z.record(z.string(), z.unknown()) + +export const zGetWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientBody + = zToolOAuthCustomClientPayload + +export const zPostWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zPostWorkspacesCurrentToolProviderBuiltinByProviderOauthCustomClientResponse + = z.record(z.string(), z.unknown()) + +export const zGetWorkspacesCurrentToolProviderBuiltinByProviderToolsPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentToolProviderBuiltinByProviderToolsResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostWorkspacesCurrentToolProviderBuiltinByProviderUpdateBody + = zBuiltinToolUpdatePayload + +export const zPostWorkspacesCurrentToolProviderBuiltinByProviderUpdatePath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zPostWorkspacesCurrentToolProviderBuiltinByProviderUpdateResponse = z.record( + z.string(), + z.unknown(), +) + +export const zDeleteWorkspacesCurrentToolProviderMcpBody = zMcpProviderDeletePayload + +/** + * Success + */ +export const zDeleteWorkspacesCurrentToolProviderMcpResponse = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesCurrentToolProviderMcpBody = zMcpProviderCreatePayload + +/** + * Success + */ +export const zPostWorkspacesCurrentToolProviderMcpResponse = z.record(z.string(), z.unknown()) + +export const zPutWorkspacesCurrentToolProviderMcpBody = zMcpProviderUpdatePayload + +/** + * Success + */ +export const zPutWorkspacesCurrentToolProviderMcpResponse = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesCurrentToolProviderMcpAuthBody = zMcpAuthPayload + +/** + * Success + */ +export const zPostWorkspacesCurrentToolProviderMcpAuthResponse = z.record(z.string(), z.unknown()) + +export const zGetWorkspacesCurrentToolProviderMcpToolsByProviderIdPath = z.object({ + provider_id: z.string(), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentToolProviderMcpToolsByProviderIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetWorkspacesCurrentToolProviderMcpUpdateByProviderIdPath = z.object({ + provider_id: z.string(), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentToolProviderMcpUpdateByProviderIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostWorkspacesCurrentToolProviderWorkflowCreateBody = zWorkflowToolCreatePayload + +/** + * Success + */ +export const zPostWorkspacesCurrentToolProviderWorkflowCreateResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostWorkspacesCurrentToolProviderWorkflowDeleteBody = zWorkflowToolDeletePayload + +/** + * Success + */ +export const zPostWorkspacesCurrentToolProviderWorkflowDeleteResponse = z.record( + z.string(), + z.unknown(), +) + +/** + * Success + */ +export const zGetWorkspacesCurrentToolProviderWorkflowGetResponse = z.record( + z.string(), + z.unknown(), +) + +/** + * Success + */ +export const zGetWorkspacesCurrentToolProviderWorkflowToolsResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostWorkspacesCurrentToolProviderWorkflowUpdateBody = zWorkflowToolUpdatePayload + +/** + * Success + */ +export const zPostWorkspacesCurrentToolProviderWorkflowUpdateResponse = z.record( + z.string(), + z.unknown(), +) + +/** + * Success + */ +export const zGetWorkspacesCurrentToolProvidersResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zGetWorkspacesCurrentToolsApiResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zGetWorkspacesCurrentToolsBuiltinResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zGetWorkspacesCurrentToolsMcpResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zGetWorkspacesCurrentToolsWorkflowResponse = z.record(z.string(), z.unknown()) + +export const zGetWorkspacesCurrentTriggerProviderByProviderIconPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentTriggerProviderByProviderIconResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetWorkspacesCurrentTriggerProviderByProviderInfoPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentTriggerProviderByProviderInfoResponse = z.record( + z.string(), + z.unknown(), +) + +export const zDeleteWorkspacesCurrentTriggerProviderByProviderOauthClientPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zDeleteWorkspacesCurrentTriggerProviderByProviderOauthClientResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetWorkspacesCurrentTriggerProviderByProviderOauthClientPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentTriggerProviderByProviderOauthClientResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostWorkspacesCurrentTriggerProviderByProviderOauthClientBody + = zTriggerOAuthClientPayload + +export const zPostWorkspacesCurrentTriggerProviderByProviderOauthClientPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zPostWorkspacesCurrentTriggerProviderByProviderOauthClientResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderBuildBySubscriptionBuilderIdBody + = zTriggerSubscriptionBuilderUpdatePayload + +export const zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderBuildBySubscriptionBuilderIdPath + = z.object({ + provider: z.string(), + subscription_builder_id: z.string(), + }) + +/** + * Success + */ +export const zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderBuildBySubscriptionBuilderIdResponse + = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderCreateBody + = zTriggerSubscriptionBuilderCreatePayload + +export const zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderCreatePath + = z.object({ + provider: z.string(), + }) + +/** + * Success + */ +export const zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderCreateResponse + = z.record(z.string(), z.unknown()) + +export const zGetWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderLogsBySubscriptionBuilderIdPath + = z.object({ + provider: z.string(), + subscription_builder_id: z.string(), + }) + +/** + * Success + */ +export const zGetWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderLogsBySubscriptionBuilderIdResponse + = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderUpdateBySubscriptionBuilderIdBody + = zTriggerSubscriptionBuilderUpdatePayload + +export const zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderUpdateBySubscriptionBuilderIdPath + = z.object({ + provider: z.string(), + subscription_builder_id: z.string(), + }) + +/** + * Success + */ +export const zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderUpdateBySubscriptionBuilderIdResponse + = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderVerifyAndUpdateBySubscriptionBuilderIdBody + = zTriggerSubscriptionBuilderVerifyPayload + +export const zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderVerifyAndUpdateBySubscriptionBuilderIdPath + = z.object({ + provider: z.string(), + subscription_builder_id: z.string(), + }) + +/** + * Success + */ +export const zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderVerifyAndUpdateBySubscriptionBuilderIdResponse + = z.record(z.string(), z.unknown()) + +export const zGetWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderBySubscriptionBuilderIdPath + = z.object({ + provider: z.string(), + subscription_builder_id: z.string(), + }) + +/** + * Success + */ +export const zGetWorkspacesCurrentTriggerProviderByProviderSubscriptionsBuilderBySubscriptionBuilderIdResponse + = z.record(z.string(), z.unknown()) + +export const zGetWorkspacesCurrentTriggerProviderByProviderSubscriptionsListPath = z.object({ + provider: z.string(), +}) + +/** + * Success + */ +export const zGetWorkspacesCurrentTriggerProviderByProviderSubscriptionsListResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetWorkspacesCurrentTriggerProviderByProviderSubscriptionsOauthAuthorizePath + = z.object({ + provider: z.string(), + }) + +/** + * Success + */ +export const zGetWorkspacesCurrentTriggerProviderByProviderSubscriptionsOauthAuthorizeResponse + = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsVerifyBySubscriptionIdBody + = zTriggerSubscriptionBuilderVerifyPayload + +export const zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsVerifyBySubscriptionIdPath + = z.object({ + provider: z.string(), + subscription_id: z.string(), + }) + +/** + * Success + */ +export const zPostWorkspacesCurrentTriggerProviderByProviderSubscriptionsVerifyBySubscriptionIdResponse + = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesCurrentTriggerProviderBySubscriptionIdSubscriptionsDeletePath + = z.object({ + subscription_id: z.string(), + }) + +/** + * Success + */ +export const zPostWorkspacesCurrentTriggerProviderBySubscriptionIdSubscriptionsDeleteResponse + = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesCurrentTriggerProviderBySubscriptionIdSubscriptionsUpdateBody + = zTriggerSubscriptionBuilderUpdatePayload + +export const zPostWorkspacesCurrentTriggerProviderBySubscriptionIdSubscriptionsUpdatePath + = z.object({ + subscription_id: z.string(), + }) + +/** + * Success + */ +export const zPostWorkspacesCurrentTriggerProviderBySubscriptionIdSubscriptionsUpdateResponse + = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zGetWorkspacesCurrentTriggersResponse = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesCustomConfigBody = zWorkspaceCustomConfigPayload + +/** + * Success + */ +export const zPostWorkspacesCustomConfigResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zPostWorkspacesCustomConfigWebappLogoUploadResponse = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesInfoBody = zWorkspaceInfoPayload + +/** + * Success + */ +export const zPostWorkspacesInfoResponse = z.record(z.string(), z.unknown()) + +export const zPostWorkspacesSwitchBody = zSwitchWorkspacePayload + +/** + * Success + */ +export const zPostWorkspacesSwitchResponse = z.record(z.string(), z.unknown()) + +export const zGetWorkspacesByTenantIdModelProvidersByProviderByIconTypeByLangPath = z.object({ + tenant_id: z.string(), + provider: z.string(), + icon_type: z.string(), + lang: z.string(), +}) + +/** + * Success + */ +export const zGetWorkspacesByTenantIdModelProvidersByProviderByIconTypeByLangResponse = z.record( + z.string(), + z.unknown(), +) diff --git a/packages/contracts/generated/api/service/orpc.gen.ts b/packages/contracts/generated/api/service/orpc.gen.ts new file mode 100644 index 0000000000..a5a45a6452 --- /dev/null +++ b/packages/contracts/generated/api/service/orpc.gen.ts @@ -0,0 +1,2405 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { + zDeleteAppsAnnotationsByAnnotationIdPath, + zDeleteAppsAnnotationsByAnnotationIdResponse, + zDeleteConversationsByCIdPath, + zDeleteConversationsByCIdResponse, + zDeleteDatasetsByDatasetIdDocumentsByDocumentIdPath, + zDeleteDatasetsByDatasetIdDocumentsByDocumentIdResponse, + zDeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdPath, + zDeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdResponse, + zDeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdPath, + zDeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponse, + zDeleteDatasetsByDatasetIdMetadataByMetadataIdPath, + zDeleteDatasetsByDatasetIdMetadataByMetadataIdResponse, + zDeleteDatasetsByDatasetIdPath, + zDeleteDatasetsByDatasetIdResponse, + zDeleteDatasetsTagsBody, + zDeleteDatasetsTagsResponse, + zGetAppFeedbacksQuery, + zGetAppFeedbacksResponse, + zGetAppsAnnotationReplyByActionStatusByJobIdPath, + zGetAppsAnnotationReplyByActionStatusByJobIdResponse, + zGetAppsAnnotationsResponse, + zGetConversationsByCIdVariablesPath, + zGetConversationsByCIdVariablesQuery, + zGetConversationsByCIdVariablesResponse, + zGetConversationsQuery, + zGetConversationsResponse, + zGetDatasetsByDatasetIdDocumentsByBatchIndexingStatusPath, + zGetDatasetsByDatasetIdDocumentsByBatchIndexingStatusResponse, + zGetDatasetsByDatasetIdDocumentsByDocumentIdDownloadPath, + zGetDatasetsByDatasetIdDocumentsByDocumentIdDownloadResponse, + zGetDatasetsByDatasetIdDocumentsByDocumentIdPath, + zGetDatasetsByDatasetIdDocumentsByDocumentIdResponse, + zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksPath, + zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksQuery, + zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponse, + zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdPath, + zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponse, + zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsPath, + zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsQuery, + zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsResponse, + zGetDatasetsByDatasetIdDocumentsPath, + zGetDatasetsByDatasetIdDocumentsResponse, + zGetDatasetsByDatasetIdMetadataBuiltInPath, + zGetDatasetsByDatasetIdMetadataBuiltInResponse, + zGetDatasetsByDatasetIdMetadataPath, + zGetDatasetsByDatasetIdMetadataResponse, + zGetDatasetsByDatasetIdPath, + zGetDatasetsByDatasetIdPipelineDatasourcePluginsPath, + zGetDatasetsByDatasetIdPipelineDatasourcePluginsQuery, + zGetDatasetsByDatasetIdPipelineDatasourcePluginsResponse, + zGetDatasetsByDatasetIdResponse, + zGetDatasetsByDatasetIdTagsPath, + zGetDatasetsByDatasetIdTagsResponse, + zGetDatasetsResponse, + zGetDatasetsTagsResponse, + zGetEndUsersByEndUserIdPath, + zGetEndUsersByEndUserIdResponse, + zGetFilesByFileIdPreviewPath, + zGetFilesByFileIdPreviewQuery, + zGetFilesByFileIdPreviewResponse, + zGetFormHumanInputByFormTokenPath, + zGetFormHumanInputByFormTokenResponse, + zGetInfoResponse, + zGetMessagesByMessageIdSuggestedPath, + zGetMessagesByMessageIdSuggestedResponse, + zGetMessagesQuery, + zGetMessagesResponse, + zGetMetaResponse, + zGetParametersResponse, + zGetRootResponse, + zGetSiteResponse, + zGetWorkflowByTaskIdEventsPath, + zGetWorkflowByTaskIdEventsQuery, + zGetWorkflowByTaskIdEventsResponse, + zGetWorkflowsLogsQuery, + zGetWorkflowsLogsResponse, + zGetWorkflowsRunByWorkflowRunIdPath, + zGetWorkflowsRunByWorkflowRunIdResponse, + zGetWorkspacesCurrentModelsModelTypesByModelTypePath, + zGetWorkspacesCurrentModelsModelTypesByModelTypeResponse, + zPatchDatasetsByDatasetIdBody, + zPatchDatasetsByDatasetIdDocumentsByDocumentIdPath, + zPatchDatasetsByDatasetIdDocumentsByDocumentIdResponse, + zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdBody, + zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdPath, + zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdResponse, + zPatchDatasetsByDatasetIdDocumentsStatusByActionPath, + zPatchDatasetsByDatasetIdDocumentsStatusByActionResponse, + zPatchDatasetsByDatasetIdMetadataByMetadataIdBody, + zPatchDatasetsByDatasetIdMetadataByMetadataIdPath, + zPatchDatasetsByDatasetIdMetadataByMetadataIdResponse, + zPatchDatasetsByDatasetIdPath, + zPatchDatasetsByDatasetIdResponse, + zPatchDatasetsTagsBody, + zPatchDatasetsTagsResponse, + zPostAppsAnnotationReplyByActionBody, + zPostAppsAnnotationReplyByActionPath, + zPostAppsAnnotationReplyByActionResponse, + zPostAppsAnnotationsBody, + zPostAppsAnnotationsResponse, + zPostAudioToTextResponse, + zPostChatMessagesBody, + zPostChatMessagesByTaskIdStopPath, + zPostChatMessagesByTaskIdStopResponse, + zPostChatMessagesResponse, + zPostCompletionMessagesBody, + zPostCompletionMessagesByTaskIdStopPath, + zPostCompletionMessagesByTaskIdStopResponse, + zPostCompletionMessagesResponse, + zPostConversationsByCIdNameBody, + zPostConversationsByCIdNamePath, + zPostConversationsByCIdNameResponse, + zPostDatasetsBody, + zPostDatasetsByDatasetIdDocumentCreateByFile2Path, + zPostDatasetsByDatasetIdDocumentCreateByFile2Response, + zPostDatasetsByDatasetIdDocumentCreateByFilePath, + zPostDatasetsByDatasetIdDocumentCreateByFileResponse, + zPostDatasetsByDatasetIdDocumentCreateByText2Body, + zPostDatasetsByDatasetIdDocumentCreateByText2Path, + zPostDatasetsByDatasetIdDocumentCreateByText2Response, + zPostDatasetsByDatasetIdDocumentCreateByTextBody, + zPostDatasetsByDatasetIdDocumentCreateByTextPath, + zPostDatasetsByDatasetIdDocumentCreateByTextResponse, + zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBody, + zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdBody, + zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksBody, + zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksPath, + zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponse, + zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdPath, + zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponse, + zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsPath, + zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsResponse, + zPostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFile2Path, + zPostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFile2Response, + zPostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFilePath, + zPostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFileResponse, + zPostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByText2Body, + zPostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByText2Path, + zPostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByText2Response, + zPostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByTextBody, + zPostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByTextPath, + zPostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByTextResponse, + zPostDatasetsByDatasetIdDocumentsDownloadZipBody, + zPostDatasetsByDatasetIdDocumentsDownloadZipPath, + zPostDatasetsByDatasetIdDocumentsDownloadZipResponse, + zPostDatasetsByDatasetIdDocumentsMetadataBody, + zPostDatasetsByDatasetIdDocumentsMetadataPath, + zPostDatasetsByDatasetIdDocumentsMetadataResponse, + zPostDatasetsByDatasetIdHitTestingBody, + zPostDatasetsByDatasetIdHitTestingPath, + zPostDatasetsByDatasetIdHitTestingResponse, + zPostDatasetsByDatasetIdMetadataBody, + zPostDatasetsByDatasetIdMetadataBuiltInByActionPath, + zPostDatasetsByDatasetIdMetadataBuiltInByActionResponse, + zPostDatasetsByDatasetIdMetadataPath, + zPostDatasetsByDatasetIdMetadataResponse, + zPostDatasetsByDatasetIdPipelineDatasourceNodesByNodeIdRunPath, + zPostDatasetsByDatasetIdPipelineDatasourceNodesByNodeIdRunResponse, + zPostDatasetsByDatasetIdPipelineRunPath, + zPostDatasetsByDatasetIdPipelineRunResponse, + zPostDatasetsByDatasetIdRetrieveBody, + zPostDatasetsByDatasetIdRetrievePath, + zPostDatasetsByDatasetIdRetrieveResponse, + zPostDatasetsPipelineFileUploadResponse, + zPostDatasetsResponse, + zPostDatasetsTagsBindingBody, + zPostDatasetsTagsBindingResponse, + zPostDatasetsTagsBody, + zPostDatasetsTagsResponse, + zPostDatasetsTagsUnbindingBody, + zPostDatasetsTagsUnbindingResponse, + zPostFilesUploadResponse, + zPostFormHumanInputByFormTokenBody, + zPostFormHumanInputByFormTokenPath, + zPostFormHumanInputByFormTokenResponse, + zPostMessagesByMessageIdFeedbacksBody, + zPostMessagesByMessageIdFeedbacksPath, + zPostMessagesByMessageIdFeedbacksResponse, + zPostTextToAudioBody, + zPostTextToAudioResponse, + zPostWorkflowsByWorkflowIdRunBody, + zPostWorkflowsByWorkflowIdRunPath, + zPostWorkflowsByWorkflowIdRunResponse, + zPostWorkflowsRunBody, + zPostWorkflowsRunResponse, + zPostWorkflowsTasksByTaskIdStopPath, + zPostWorkflowsTasksByTaskIdStopResponse, + zPutAppsAnnotationsByAnnotationIdBody, + zPutAppsAnnotationsByAnnotationIdPath, + zPutAppsAnnotationsByAnnotationIdResponse, + zPutConversationsByCIdVariablesByVariableIdBody, + zPutConversationsByCIdVariablesByVariableIdPath, + zPutConversationsByCIdVariablesByVariableIdResponse, +} from './zod.gen' + +export const get = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'getRoot', + path: '/', + tags: ['service_api'], + }) + .output(zGetRootResponse) + +export const root = { + get, +} + +/** + * Get all feedbacks for the application + * + * Get all feedbacks for the application + * Returns paginated list of all feedback submitted for messages in this app. + */ +export const get2 = oc + .route({ + description: + 'Get all feedbacks for the application\nReturns paginated list of all feedback submitted for messages in this app.', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppFeedbacks', + path: '/app/feedbacks', + summary: 'Get all feedbacks for the application', + tags: ['service_api'], + }) + .input(z.object({ query: zGetAppFeedbacksQuery.optional() })) + .output(zGetAppFeedbacksResponse) + +export const feedbacks = { + get: get2, +} + +export const app = { + feedbacks, +} + +/** + * Get the status of an annotation reply action job + * + * Get the status of an annotation reply action job + */ +export const get3 = oc + .route({ + description: 'Get the status of an annotation reply action job', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsAnnotationReplyByActionStatusByJobId', + path: '/apps/annotation-reply/{action}/status/{job_id}', + summary: 'Get the status of an annotation reply action job', + tags: ['service_api'], + }) + .input(z.object({ params: zGetAppsAnnotationReplyByActionStatusByJobIdPath })) + .output(zGetAppsAnnotationReplyByActionStatusByJobIdResponse) + +export const byJobId = { + get: get3, +} + +export const status = { + byJobId, +} + +/** + * Enable or disable annotation reply feature + * + * Enable or disable annotation reply feature + */ +export const post = oc + .route({ + description: 'Enable or disable annotation reply feature', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsAnnotationReplyByAction', + path: '/apps/annotation-reply/{action}', + summary: 'Enable or disable annotation reply feature', + tags: ['service_api'], + }) + .input( + z.object({ + body: zPostAppsAnnotationReplyByActionBody, + params: zPostAppsAnnotationReplyByActionPath, + }), + ) + .output(zPostAppsAnnotationReplyByActionResponse) + +export const byAction = { + post, + status, +} + +export const annotationReply = { + byAction, +} + +/** + * Delete an annotation + * + * Delete an annotation + */ +export const delete_ = oc + .route({ + description: 'Delete an annotation', + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteAppsAnnotationsByAnnotationId', + path: '/apps/annotations/{annotation_id}', + successStatus: 204, + summary: 'Delete an annotation', + tags: ['service_api'], + }) + .input(z.object({ params: zDeleteAppsAnnotationsByAnnotationIdPath })) + .output(zDeleteAppsAnnotationsByAnnotationIdResponse) + +/** + * Update an existing annotation + * + * Update an existing annotation + */ +export const put = oc + .route({ + description: 'Update an existing annotation', + inputStructure: 'detailed', + method: 'PUT', + operationId: 'putAppsAnnotationsByAnnotationId', + path: '/apps/annotations/{annotation_id}', + summary: 'Update an existing annotation', + tags: ['service_api'], + }) + .input( + z.object({ + body: zPutAppsAnnotationsByAnnotationIdBody, + params: zPutAppsAnnotationsByAnnotationIdPath, + }), + ) + .output(zPutAppsAnnotationsByAnnotationIdResponse) + +export const byAnnotationId = { + delete: delete_, + put, +} + +/** + * List annotations for the application + * + * List annotations for the application + */ +export const get4 = oc + .route({ + description: 'List annotations for the application', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getAppsAnnotations', + path: '/apps/annotations', + summary: 'List annotations for the application', + tags: ['service_api'], + }) + .output(zGetAppsAnnotationsResponse) + +/** + * Create a new annotation + * + * Create a new annotation + */ +export const post2 = oc + .route({ + description: 'Create a new annotation', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAppsAnnotations', + path: '/apps/annotations', + successStatus: 201, + summary: 'Create a new annotation', + tags: ['service_api'], + }) + .input(z.object({ body: zPostAppsAnnotationsBody })) + .output(zPostAppsAnnotationsResponse) + +export const annotations = { + get: get4, + post: post2, + byAnnotationId, +} + +export const apps = { + annotationReply, + annotations, +} + +/** + * Convert audio to text using speech-to-text + * + * Convert audio to text using speech-to-text + * Accepts an audio file upload and returns the transcribed text. + */ +export const post3 = oc + .route({ + description: + 'Convert audio to text using speech-to-text\nAccepts an audio file upload and returns the transcribed text.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAudioToText', + path: '/audio-to-text', + summary: 'Convert audio to text using speech-to-text', + tags: ['service_api'], + }) + .output(zPostAudioToTextResponse) + +export const audioToText = { + post: post3, +} + +/** + * Stop a running chat message generation + * + * Stop a running chat message generation + */ +export const post4 = oc + .route({ + description: 'Stop a running chat message generation', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postChatMessagesByTaskIdStop', + path: '/chat-messages/{task_id}/stop', + summary: 'Stop a running chat message generation', + tags: ['service_api'], + }) + .input(z.object({ params: zPostChatMessagesByTaskIdStopPath })) + .output(zPostChatMessagesByTaskIdStopResponse) + +export const stop = { + post: post4, +} + +export const byTaskId = { + stop, +} + +/** + * Send a message in a chat conversation + * + * Send a message in a chat conversation + * This endpoint handles chat messages for chat, agent chat, and advanced chat applications. + * Supports conversation management and both blocking and streaming response modes. + */ +export const post5 = oc + .route({ + description: + 'Send a message in a chat conversation\nThis endpoint handles chat messages for chat, agent chat, and advanced chat applications.\nSupports conversation management and both blocking and streaming response modes.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postChatMessages', + path: '/chat-messages', + summary: 'Send a message in a chat conversation', + tags: ['service_api'], + }) + .input(z.object({ body: zPostChatMessagesBody })) + .output(zPostChatMessagesResponse) + +export const chatMessages = { + post: post5, + byTaskId, +} + +/** + * Stop a running completion task + * + * Stop a running completion task + */ +export const post6 = oc + .route({ + description: 'Stop a running completion task', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postCompletionMessagesByTaskIdStop', + path: '/completion-messages/{task_id}/stop', + summary: 'Stop a running completion task', + tags: ['service_api'], + }) + .input(z.object({ params: zPostCompletionMessagesByTaskIdStopPath })) + .output(zPostCompletionMessagesByTaskIdStopResponse) + +export const stop2 = { + post: post6, +} + +export const byTaskId2 = { + stop: stop2, +} + +/** + * Create a completion for the given prompt + * + * Create a completion for the given prompt + * This endpoint generates a completion based on the provided inputs and query. + * Supports both blocking and streaming response modes. + */ +export const post7 = oc + .route({ + description: + 'Create a completion for the given prompt\nThis endpoint generates a completion based on the provided inputs and query.\nSupports both blocking and streaming response modes.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postCompletionMessages', + path: '/completion-messages', + summary: 'Create a completion for the given prompt', + tags: ['service_api'], + }) + .input(z.object({ body: zPostCompletionMessagesBody })) + .output(zPostCompletionMessagesResponse) + +export const completionMessages = { + post: post7, + byTaskId: byTaskId2, +} + +/** + * Rename a conversation or auto-generate a name + * + * Rename a conversation or auto-generate a name + */ +export const post8 = oc + .route({ + description: 'Rename a conversation or auto-generate a name', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postConversationsByCIdName', + path: '/conversations/{c_id}/name', + summary: 'Rename a conversation or auto-generate a name', + tags: ['service_api'], + }) + .input( + z.object({ body: zPostConversationsByCIdNameBody, params: zPostConversationsByCIdNamePath }), + ) + .output(zPostConversationsByCIdNameResponse) + +export const name = { + post: post8, +} + +/** + * Update a conversation variable's value + * + * Update a conversation variable's value + * Allows updating the value of a specific conversation variable. + * The value must match the variable's expected type. + */ +export const put2 = oc + .route({ + description: + 'Update a conversation variable\'s value\nAllows updating the value of a specific conversation variable.\nThe value must match the variable\'s expected type.', + inputStructure: 'detailed', + method: 'PUT', + operationId: 'putConversationsByCIdVariablesByVariableId', + path: '/conversations/{c_id}/variables/{variable_id}', + summary: 'Update a conversation variable\'s value', + tags: ['service_api'], + }) + .input( + z.object({ + body: zPutConversationsByCIdVariablesByVariableIdBody, + params: zPutConversationsByCIdVariablesByVariableIdPath, + }), + ) + .output(zPutConversationsByCIdVariablesByVariableIdResponse) + +export const byVariableId = { + put: put2, +} + +/** + * List all variables for a conversation + * + * List all variables for a conversation + * Conversational variables are only available for chat applications. + */ +export const get5 = oc + .route({ + description: + 'List all variables for a conversation\nConversational variables are only available for chat applications.', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getConversationsByCIdVariables', + path: '/conversations/{c_id}/variables', + summary: 'List all variables for a conversation', + tags: ['service_api'], + }) + .input( + z.object({ + params: zGetConversationsByCIdVariablesPath, + query: zGetConversationsByCIdVariablesQuery.optional(), + }), + ) + .output(zGetConversationsByCIdVariablesResponse) + +export const variables = { + get: get5, + byVariableId, +} + +/** + * Delete a specific conversation + * + * Delete a specific conversation + */ +export const delete2 = oc + .route({ + description: 'Delete a specific conversation', + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteConversationsByCId', + path: '/conversations/{c_id}', + successStatus: 204, + summary: 'Delete a specific conversation', + tags: ['service_api'], + }) + .input(z.object({ params: zDeleteConversationsByCIdPath })) + .output(zDeleteConversationsByCIdResponse) + +export const byCId = { + delete: delete2, + name, + variables, +} + +/** + * List all conversations for the current user + * + * List all conversations for the current user + * Supports pagination using last_id and limit parameters. + */ +export const get6 = oc + .route({ + description: + 'List all conversations for the current user\nSupports pagination using last_id and limit parameters.', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getConversations', + path: '/conversations', + summary: 'List all conversations for the current user', + tags: ['service_api'], + }) + .input(z.object({ query: zGetConversationsQuery.optional() })) + .output(zGetConversationsResponse) + +export const conversations = { + get: get6, + byCId, +} + +/** + * Upload a file for use in conversations + * + * Upload a file to a knowledgebase pipeline + * Accepts a single file upload via multipart/form-data. + */ +export const post9 = oc + .route({ + description: + 'Upload a file to a knowledgebase pipeline\nAccepts a single file upload via multipart/form-data.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsPipelineFileUpload', + path: '/datasets/pipeline/file-upload', + successStatus: 201, + summary: 'Upload a file for use in conversations', + tags: ['service_api'], + }) + .output(zPostDatasetsPipelineFileUploadResponse) + +export const fileUpload = { + post: post9, +} + +export const pipeline = { + fileUpload, +} + +/** + * Bind tags to a dataset + */ +export const post10 = oc + .route({ + description: 'Bind tags to a dataset', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsTagsBinding', + path: '/datasets/tags/binding', + successStatus: 204, + tags: ['service_api'], + }) + .input(z.object({ body: zPostDatasetsTagsBindingBody })) + .output(zPostDatasetsTagsBindingResponse) + +export const binding = { + post: post10, +} + +/** + * Unbind a tag from a dataset + */ +export const post11 = oc + .route({ + description: 'Unbind a tag from a dataset', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsTagsUnbinding', + path: '/datasets/tags/unbinding', + successStatus: 204, + tags: ['service_api'], + }) + .input(z.object({ body: zPostDatasetsTagsUnbindingBody })) + .output(zPostDatasetsTagsUnbindingResponse) + +export const unbinding = { + post: post11, +} + +/** + * Delete a knowledge type tag + * + * Delete a knowledge type tag + */ +export const delete3 = oc + .route({ + description: 'Delete a knowledge type tag', + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteDatasetsTags', + path: '/datasets/tags', + successStatus: 204, + summary: 'Delete a knowledge type tag', + tags: ['service_api'], + }) + .input(z.object({ body: zDeleteDatasetsTagsBody })) + .output(zDeleteDatasetsTagsResponse) + +/** + * Get all knowledge type tags + * + * Get all knowledge type tags + */ +export const get7 = oc + .route({ + description: 'Get all knowledge type tags', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsTags', + path: '/datasets/tags', + summary: 'Get all knowledge type tags', + tags: ['service_api'], + }) + .output(zGetDatasetsTagsResponse) + +/** + * Update a knowledge type tag + */ +export const patch = oc + .route({ + description: 'Update a knowledge type tag', + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchDatasetsTags', + path: '/datasets/tags', + tags: ['service_api'], + }) + .input(z.object({ body: zPatchDatasetsTagsBody })) + .output(zPatchDatasetsTagsResponse) + +/** + * Add a knowledge type tag + * + * Add a knowledge type tag + */ +export const post12 = oc + .route({ + description: 'Add a knowledge type tag', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsTags', + path: '/datasets/tags', + summary: 'Add a knowledge type tag', + tags: ['service_api'], + }) + .input(z.object({ body: zPostDatasetsTagsBody })) + .output(zPostDatasetsTagsResponse) + +export const tags = { + delete: delete3, + get: get7, + patch, + post: post12, + binding, + unbinding, +} + +/** + * Create a new document by uploading a file + */ +export const post13 = oc + .route({ + description: 'Create a new document by uploading a file', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdDocumentCreateByFile', + path: '/datasets/{dataset_id}/document/create-by-file', + tags: ['service_api'], + }) + .input(z.object({ params: zPostDatasetsByDatasetIdDocumentCreateByFilePath })) + .output(zPostDatasetsByDatasetIdDocumentCreateByFileResponse) + +/** + * Create a new document by uploading a file + */ +export const post14 = oc + .route({ + description: 'Create a new document by uploading a file', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdDocumentCreateByFile', + path: '/datasets/{dataset_id}/document/create_by_file', + tags: ['service_api'], + }) + .input(z.object({ params: zPostDatasetsByDatasetIdDocumentCreateByFile2Path })) + .output(zPostDatasetsByDatasetIdDocumentCreateByFile2Response) + +export const createByFile = { + post: post14, +} + +/** + * Create a new document by providing text content + */ +export const post15 = oc + .route({ + description: 'Create a new document by providing text content', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdDocumentCreateByText', + path: '/datasets/{dataset_id}/document/create-by-text', + tags: ['service_api'], + }) + .input( + z.object({ + body: zPostDatasetsByDatasetIdDocumentCreateByTextBody, + params: zPostDatasetsByDatasetIdDocumentCreateByTextPath, + }), + ) + .output(zPostDatasetsByDatasetIdDocumentCreateByTextResponse) + +/** + * Deprecated legacy alias for creating a new document by providing text content. Use /datasets/{dataset_id}/document/create-by-text instead. + * + * @deprecated + */ +export const post16 = oc + .route({ + deprecated: true, + description: + 'Deprecated legacy alias for creating a new document by providing text content. Use /datasets/{dataset_id}/document/create-by-text instead.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdDocumentCreateByText', + path: '/datasets/{dataset_id}/document/create_by_text', + tags: ['service_api'], + }) + .input( + z.object({ + body: zPostDatasetsByDatasetIdDocumentCreateByText2Body, + params: zPostDatasetsByDatasetIdDocumentCreateByText2Path, + }), + ) + .output(zPostDatasetsByDatasetIdDocumentCreateByText2Response) + +export const createByText = { + post: post16, +} + +export const document_ = { + createByFile, + createByText, +} + +/** + * Download selected uploaded documents as a single ZIP archive + */ +export const post17 = oc + .route({ + description: 'Download selected uploaded documents as a single ZIP archive', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdDocumentsDownloadZip', + path: '/datasets/{dataset_id}/documents/download-zip', + tags: ['service_api'], + }) + .input( + z.object({ + body: zPostDatasetsByDatasetIdDocumentsDownloadZipBody, + params: zPostDatasetsByDatasetIdDocumentsDownloadZipPath, + }), + ) + .output(zPostDatasetsByDatasetIdDocumentsDownloadZipResponse) + +export const downloadZip = { + post: post17, +} + +/** + * Update metadata for multiple documents + * + * Update metadata for multiple documents + */ +export const post18 = oc + .route({ + description: 'Update metadata for multiple documents', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdDocumentsMetadata', + path: '/datasets/{dataset_id}/documents/metadata', + summary: 'Update metadata for multiple documents', + tags: ['service_api'], + }) + .input( + z.object({ + body: zPostDatasetsByDatasetIdDocumentsMetadataBody, + params: zPostDatasetsByDatasetIdDocumentsMetadataPath, + }), + ) + .output(zPostDatasetsByDatasetIdDocumentsMetadataResponse) + +export const metadata = { + post: post18, +} + +/** + * Batch update document status + * + * Batch update document status + * Args: + * tenant_id: tenant id + * dataset_id: dataset id + * action: action to perform (Literal["enable", "disable", "archive", "un_archive"]) + * + * Returns: + * dict: A dictionary with a key 'result' and a value 'success' + * int: HTTP status code 200 indicating that the operation was successful. + * + * Raises: + * NotFound: If the dataset with the given ID does not exist. + * Forbidden: If the user does not have permission. + * InvalidActionError: If the action is invalid or cannot be performed. + */ +export const patch2 = oc + .route({ + description: + 'Batch update document status\nArgs:\n tenant_id: tenant id\n dataset_id: dataset id\n action: action to perform (Literal["enable", "disable", "archive", "un_archive"])\n\nReturns:\n dict: A dictionary with a key \'result\' and a value \'success\'\n int: HTTP status code 200 indicating that the operation was successful.\n\nRaises:\n NotFound: If the dataset with the given ID does not exist.\n Forbidden: If the user does not have permission.\n InvalidActionError: If the action is invalid or cannot be performed.', + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchDatasetsByDatasetIdDocumentsStatusByAction', + path: '/datasets/{dataset_id}/documents/status/{action}', + summary: 'Batch update document status', + tags: ['service_api'], + }) + .input(z.object({ params: zPatchDatasetsByDatasetIdDocumentsStatusByActionPath })) + .output(zPatchDatasetsByDatasetIdDocumentsStatusByActionResponse) + +export const byAction2 = { + patch: patch2, +} + +export const status2 = { + byAction: byAction2, +} + +/** + * Get indexing status for documents in a batch + */ +export const get8 = oc + .route({ + description: 'Get indexing status for documents in a batch', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdDocumentsByBatchIndexingStatus', + path: '/datasets/{dataset_id}/documents/{batch}/indexing-status', + tags: ['service_api'], + }) + .input(z.object({ params: zGetDatasetsByDatasetIdDocumentsByBatchIndexingStatusPath })) + .output(zGetDatasetsByDatasetIdDocumentsByBatchIndexingStatusResponse) + +export const indexingStatus = { + get: get8, +} + +export const byBatch = { + indexingStatus, +} + +/** + * Get a signed download URL for a document's original uploaded file + */ +export const get9 = oc + .route({ + description: 'Get a signed download URL for a document\'s original uploaded file', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdDocumentsByDocumentIdDownload', + path: '/datasets/{dataset_id}/documents/{document_id}/download', + tags: ['service_api'], + }) + .input(z.object({ params: zGetDatasetsByDatasetIdDocumentsByDocumentIdDownloadPath })) + .output(zGetDatasetsByDatasetIdDocumentsByDocumentIdDownloadResponse) + +export const download = { + get: get9, +} + +/** + * Delete a specific child chunk + */ +export const delete4 = oc + .route({ + description: 'Delete a specific child chunk', + inputStructure: 'detailed', + method: 'DELETE', + operationId: + 'deleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkId', + path: '/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks/{child_chunk_id}', + successStatus: 204, + tags: ['service_api'], + }) + .input( + z.object({ + params: + zDeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdPath, + }), + ) + .output( + zDeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdResponse, + ) + +/** + * Update a specific child chunk + */ +export const patch3 = oc + .route({ + description: 'Update a specific child chunk', + inputStructure: 'detailed', + method: 'PATCH', + operationId: + 'patchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkId', + path: '/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks/{child_chunk_id}', + tags: ['service_api'], + }) + .input( + z.object({ + body: zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdBody, + params: + zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdPath, + }), + ) + .output( + zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdResponse, + ) + +export const byChildChunkId = { + delete: delete4, + patch: patch3, +} + +/** + * List child chunks for a segment + */ +export const get10 = oc + .route({ + description: 'List child chunks for a segment', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunks', + path: '/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks', + tags: ['service_api'], + }) + .input( + z.object({ + params: zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksPath, + query: + zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksQuery.optional(), + }), + ) + .output(zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponse) + +/** + * Create a new child chunk for a segment + */ +export const post19 = oc + .route({ + description: 'Create a new child chunk for a segment', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunks', + path: '/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks', + tags: ['service_api'], + }) + .input( + z.object({ + body: zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksBody, + params: zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksPath, + }), + ) + .output(zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponse) + +export const childChunks = { + get: get10, + post: post19, + byChildChunkId, +} + +/** + * Delete a specific segment + */ +export const delete5 = oc + .route({ + description: 'Delete a specific segment', + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentId', + path: '/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}', + successStatus: 204, + tags: ['service_api'], + }) + .input( + z.object({ params: zDeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdPath }), + ) + .output(zDeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponse) + +/** + * Get a specific segment by ID + */ +export const get11 = oc + .route({ + description: 'Get a specific segment by ID', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentId', + path: '/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}', + tags: ['service_api'], + }) + .input(z.object({ params: zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdPath })) + .output(zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponse) + +/** + * Update a specific segment + */ +export const post20 = oc + .route({ + description: 'Update a specific segment', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentId', + path: '/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}', + tags: ['service_api'], + }) + .input( + z.object({ + body: zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdBody, + params: zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdPath, + }), + ) + .output(zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponse) + +export const bySegmentId = { + delete: delete5, + get: get11, + post: post20, + childChunks, +} + +/** + * List segments in a document + */ +export const get12 = oc + .route({ + description: 'List segments in a document', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdDocumentsByDocumentIdSegments', + path: '/datasets/{dataset_id}/documents/{document_id}/segments', + tags: ['service_api'], + }) + .input( + z.object({ + params: zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsPath, + query: zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsQuery.optional(), + }), + ) + .output(zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsResponse) + +/** + * Create segments in a document + */ +export const post21 = oc + .route({ + description: 'Create segments in a document', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdDocumentsByDocumentIdSegments', + path: '/datasets/{dataset_id}/documents/{document_id}/segments', + tags: ['service_api'], + }) + .input( + z.object({ + body: zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBody, + params: zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsPath, + }), + ) + .output(zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsResponse) + +export const segments = { + get: get12, + post: post21, + bySegmentId, +} + +/** + * Deprecated legacy alias for updating an existing document by uploading a file. Use PATCH /datasets/{dataset_id}/documents/{document_id} instead. + * + * @deprecated + */ +export const post22 = oc + .route({ + deprecated: true, + description: + 'Deprecated legacy alias for updating an existing document by uploading a file. Use PATCH /datasets/{dataset_id}/documents/{document_id} instead.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFile', + path: '/datasets/{dataset_id}/documents/{document_id}/update-by-file', + tags: ['service_api'], + }) + .input(z.object({ params: zPostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFilePath })) + .output(zPostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFileResponse) + +/** + * Deprecated legacy alias for updating an existing document by uploading a file. Use PATCH /datasets/{dataset_id}/documents/{document_id} instead. + * + * @deprecated + */ +export const post23 = oc + .route({ + deprecated: true, + description: + 'Deprecated legacy alias for updating an existing document by uploading a file. Use PATCH /datasets/{dataset_id}/documents/{document_id} instead.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFile', + path: '/datasets/{dataset_id}/documents/{document_id}/update_by_file', + tags: ['service_api'], + }) + .input(z.object({ params: zPostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFile2Path })) + .output(zPostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFile2Response) + +export const updateByFile = { + post: post23, +} + +/** + * Update an existing document by providing text content + */ +export const post24 = oc + .route({ + description: 'Update an existing document by providing text content', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdDocumentsByDocumentIdUpdateByText', + path: '/datasets/{dataset_id}/documents/{document_id}/update-by-text', + tags: ['service_api'], + }) + .input( + z.object({ + body: zPostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByTextBody, + params: zPostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByTextPath, + }), + ) + .output(zPostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByTextResponse) + +/** + * Deprecated legacy alias for updating an existing document by providing text content. Use /datasets/{dataset_id}/documents/{document_id}/update-by-text instead. + * + * @deprecated + */ +export const post25 = oc + .route({ + deprecated: true, + description: + 'Deprecated legacy alias for updating an existing document by providing text content. Use /datasets/{dataset_id}/documents/{document_id}/update-by-text instead.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdDocumentsByDocumentIdUpdateByText', + path: '/datasets/{dataset_id}/documents/{document_id}/update_by_text', + tags: ['service_api'], + }) + .input( + z.object({ + body: zPostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByText2Body, + params: zPostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByText2Path, + }), + ) + .output(zPostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByText2Response) + +export const updateByText = { + post: post25, +} + +/** + * Delete document + * + * Delete a document + */ +export const delete6 = oc + .route({ + description: 'Delete a document', + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteDatasetsByDatasetIdDocumentsByDocumentId', + path: '/datasets/{dataset_id}/documents/{document_id}', + successStatus: 204, + summary: 'Delete document', + tags: ['service_api'], + }) + .input(z.object({ params: zDeleteDatasetsByDatasetIdDocumentsByDocumentIdPath })) + .output(zDeleteDatasetsByDatasetIdDocumentsByDocumentIdResponse) + +/** + * Get a specific document by ID + */ +export const get13 = oc + .route({ + description: 'Get a specific document by ID', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdDocumentsByDocumentId', + path: '/datasets/{dataset_id}/documents/{document_id}', + tags: ['service_api'], + }) + .input(z.object({ params: zGetDatasetsByDatasetIdDocumentsByDocumentIdPath })) + .output(zGetDatasetsByDatasetIdDocumentsByDocumentIdResponse) + +/** + * Update an existing document by uploading a file + */ +export const patch4 = oc + .route({ + description: 'Update an existing document by uploading a file', + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchDatasetsByDatasetIdDocumentsByDocumentId', + path: '/datasets/{dataset_id}/documents/{document_id}', + tags: ['service_api'], + }) + .input(z.object({ params: zPatchDatasetsByDatasetIdDocumentsByDocumentIdPath })) + .output(zPatchDatasetsByDatasetIdDocumentsByDocumentIdResponse) + +export const byDocumentId = { + delete: delete6, + get: get13, + patch: patch4, + download, + segments, + updateByFile, + updateByText, +} + +/** + * List all documents in a dataset + */ +export const get14 = oc + .route({ + description: 'List all documents in a dataset', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdDocuments', + path: '/datasets/{dataset_id}/documents', + tags: ['service_api'], + }) + .input(z.object({ params: zGetDatasetsByDatasetIdDocumentsPath })) + .output(zGetDatasetsByDatasetIdDocumentsResponse) + +export const documents = { + get: get14, + downloadZip, + metadata, + status: status2, + byBatch, + byDocumentId, +} + +/** + * Perform hit testing on a dataset + * + * Perform hit testing on a dataset + * Tests retrieval performance for the specified dataset. + */ +export const post26 = oc + .route({ + description: + 'Perform hit testing on a dataset\nTests retrieval performance for the specified dataset.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdHitTesting', + path: '/datasets/{dataset_id}/hit-testing', + summary: 'Perform hit testing on a dataset', + tags: ['service_api'], + }) + .input( + z.object({ + body: zPostDatasetsByDatasetIdHitTestingBody, + params: zPostDatasetsByDatasetIdHitTestingPath, + }), + ) + .output(zPostDatasetsByDatasetIdHitTestingResponse) + +export const hitTesting = { + post: post26, +} + +/** + * Enable or disable built-in metadata field + * + * Enable or disable built-in metadata field + */ +export const post27 = oc + .route({ + description: 'Enable or disable built-in metadata field', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdMetadataBuiltInByAction', + path: '/datasets/{dataset_id}/metadata/built-in/{action}', + summary: 'Enable or disable built-in metadata field', + tags: ['service_api'], + }) + .input(z.object({ params: zPostDatasetsByDatasetIdMetadataBuiltInByActionPath })) + .output(zPostDatasetsByDatasetIdMetadataBuiltInByActionResponse) + +export const byAction3 = { + post: post27, +} + +/** + * Get all built-in metadata fields + * + * Get all built-in metadata fields + */ +export const get15 = oc + .route({ + description: 'Get all built-in metadata fields', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdMetadataBuiltIn', + path: '/datasets/{dataset_id}/metadata/built-in', + summary: 'Get all built-in metadata fields', + tags: ['service_api'], + }) + .input(z.object({ params: zGetDatasetsByDatasetIdMetadataBuiltInPath })) + .output(zGetDatasetsByDatasetIdMetadataBuiltInResponse) + +export const builtIn = { + get: get15, + byAction: byAction3, +} + +/** + * Delete metadata + * + * Delete metadata + */ +export const delete7 = oc + .route({ + description: 'Delete metadata', + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteDatasetsByDatasetIdMetadataByMetadataId', + path: '/datasets/{dataset_id}/metadata/{metadata_id}', + successStatus: 204, + summary: 'Delete metadata', + tags: ['service_api'], + }) + .input(z.object({ params: zDeleteDatasetsByDatasetIdMetadataByMetadataIdPath })) + .output(zDeleteDatasetsByDatasetIdMetadataByMetadataIdResponse) + +/** + * Update metadata name + * + * Update metadata name + */ +export const patch5 = oc + .route({ + description: 'Update metadata name', + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchDatasetsByDatasetIdMetadataByMetadataId', + path: '/datasets/{dataset_id}/metadata/{metadata_id}', + summary: 'Update metadata name', + tags: ['service_api'], + }) + .input( + z.object({ + body: zPatchDatasetsByDatasetIdMetadataByMetadataIdBody, + params: zPatchDatasetsByDatasetIdMetadataByMetadataIdPath, + }), + ) + .output(zPatchDatasetsByDatasetIdMetadataByMetadataIdResponse) + +export const byMetadataId = { + delete: delete7, + patch: patch5, +} + +/** + * Get all metadata for a dataset + * + * Get all metadata for a dataset + */ +export const get16 = oc + .route({ + description: 'Get all metadata for a dataset', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdMetadata', + path: '/datasets/{dataset_id}/metadata', + summary: 'Get all metadata for a dataset', + tags: ['service_api'], + }) + .input(z.object({ params: zGetDatasetsByDatasetIdMetadataPath })) + .output(zGetDatasetsByDatasetIdMetadataResponse) + +/** + * Create metadata for a dataset + * + * Create metadata for a dataset + */ +export const post28 = oc + .route({ + description: 'Create metadata for a dataset', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdMetadata', + path: '/datasets/{dataset_id}/metadata', + successStatus: 201, + summary: 'Create metadata for a dataset', + tags: ['service_api'], + }) + .input( + z.object({ + body: zPostDatasetsByDatasetIdMetadataBody, + params: zPostDatasetsByDatasetIdMetadataPath, + }), + ) + .output(zPostDatasetsByDatasetIdMetadataResponse) + +export const metadata2 = { + get: get16, + post: post28, + builtIn, + byMetadataId, +} + +/** + * Resource for getting datasource plugins + * + * List all datasource plugins for a rag pipeline + */ +export const get17 = oc + .route({ + description: 'List all datasource plugins for a rag pipeline', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdPipelineDatasourcePlugins', + path: '/datasets/{dataset_id}/pipeline/datasource-plugins', + summary: 'Resource for getting datasource plugins', + tags: ['service_api'], + }) + .input( + z.object({ + params: zGetDatasetsByDatasetIdPipelineDatasourcePluginsPath, + query: zGetDatasetsByDatasetIdPipelineDatasourcePluginsQuery.optional(), + }), + ) + .output(zGetDatasetsByDatasetIdPipelineDatasourcePluginsResponse) + +export const datasourcePlugins = { + get: get17, +} + +/** + * Resource for getting datasource plugins + * + * Run a datasource node for a rag pipeline + */ +export const post29 = oc + .route({ + description: 'Run a datasource node for a rag pipeline', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdPipelineDatasourceNodesByNodeIdRun', + path: '/datasets/{dataset_id}/pipeline/datasource/nodes/{node_id}/run', + summary: 'Resource for getting datasource plugins', + tags: ['service_api'], + }) + .input(z.object({ params: zPostDatasetsByDatasetIdPipelineDatasourceNodesByNodeIdRunPath })) + .output(zPostDatasetsByDatasetIdPipelineDatasourceNodesByNodeIdRunResponse) + +export const run = { + post: post29, +} + +export const byNodeId = { + run, +} + +export const nodes = { + byNodeId, +} + +export const datasource = { + nodes, +} + +/** + * Resource for running a rag pipeline + * + * Run a datasource node for a rag pipeline + */ +export const post30 = oc + .route({ + description: 'Run a datasource node for a rag pipeline', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdPipelineRun', + path: '/datasets/{dataset_id}/pipeline/run', + summary: 'Resource for running a rag pipeline', + tags: ['service_api'], + }) + .input(z.object({ params: zPostDatasetsByDatasetIdPipelineRunPath })) + .output(zPostDatasetsByDatasetIdPipelineRunResponse) + +export const run2 = { + post: post30, +} + +export const pipeline2 = { + datasourcePlugins, + datasource, + run: run2, +} + +/** + * Perform hit testing on a dataset + * + * Perform hit testing on a dataset + * Tests retrieval performance for the specified dataset. + */ +export const post31 = oc + .route({ + description: + 'Perform hit testing on a dataset\nTests retrieval performance for the specified dataset.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasetsByDatasetIdRetrieve', + path: '/datasets/{dataset_id}/retrieve', + summary: 'Perform hit testing on a dataset', + tags: ['service_api'], + }) + .input( + z.object({ + body: zPostDatasetsByDatasetIdRetrieveBody, + params: zPostDatasetsByDatasetIdRetrievePath, + }), + ) + .output(zPostDatasetsByDatasetIdRetrieveResponse) + +export const retrieve = { + post: post31, +} + +/** + * Get all knowledge type tags + * + * Get tags bound to a specific dataset + */ +export const get18 = oc + .route({ + description: 'Get tags bound to a specific dataset', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetIdTags', + path: '/datasets/{dataset_id}/tags', + summary: 'Get all knowledge type tags', + tags: ['service_api'], + }) + .input(z.object({ params: zGetDatasetsByDatasetIdTagsPath })) + .output(zGetDatasetsByDatasetIdTagsResponse) + +export const tags2 = { + get: get18, +} + +/** + * Deletes a dataset given its ID + * + * Delete a dataset + * Args: + * _: ignore + * dataset_id (UUID): The ID of the dataset to be deleted. + * + * Returns: + * dict: A dictionary with a key 'result' and a value 'success' + * if the dataset was successfully deleted. Omitted in HTTP response. + * int: HTTP status code 204 indicating that the operation was successful. + * + * Raises: + * NotFound: If the dataset with the given ID does not exist. + */ +export const delete8 = oc + .route({ + description: + 'Delete a dataset\nArgs:\n _: ignore\n dataset_id (UUID): The ID of the dataset to be deleted.\n\nReturns:\n dict: A dictionary with a key \'result\' and a value \'success\'\n if the dataset was successfully deleted. Omitted in HTTP response.\n int: HTTP status code 204 indicating that the operation was successful.\n\nRaises:\n NotFound: If the dataset with the given ID does not exist.', + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteDatasetsByDatasetId', + path: '/datasets/{dataset_id}', + successStatus: 204, + summary: 'Deletes a dataset given its ID', + tags: ['service_api'], + }) + .input(z.object({ params: zDeleteDatasetsByDatasetIdPath })) + .output(zDeleteDatasetsByDatasetIdResponse) + +/** + * Get a specific dataset by ID + */ +export const get19 = oc + .route({ + description: 'Get a specific dataset by ID', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasetsByDatasetId', + path: '/datasets/{dataset_id}', + tags: ['service_api'], + }) + .input(z.object({ params: zGetDatasetsByDatasetIdPath })) + .output(zGetDatasetsByDatasetIdResponse) + +/** + * Update an existing dataset + */ +export const patch6 = oc + .route({ + description: 'Update an existing dataset', + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchDatasetsByDatasetId', + path: '/datasets/{dataset_id}', + tags: ['service_api'], + }) + .input(z.object({ body: zPatchDatasetsByDatasetIdBody, params: zPatchDatasetsByDatasetIdPath })) + .output(zPatchDatasetsByDatasetIdResponse) + +export const byDatasetId = { + delete: delete8, + get: get19, + patch: patch6, + document: document_, + documents, + hitTesting, + metadata: metadata2, + pipeline: pipeline2, + retrieve, + tags: tags2, +} + +/** + * Resource for getting datasets + * + * List all datasets + */ +export const get20 = oc + .route({ + description: 'List all datasets', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getDatasets', + path: '/datasets', + summary: 'Resource for getting datasets', + tags: ['service_api'], + }) + .output(zGetDatasetsResponse) + +/** + * Resource for creating datasets + * + * Create a new dataset + */ +export const post32 = oc + .route({ + description: 'Create a new dataset', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postDatasets', + path: '/datasets', + summary: 'Resource for creating datasets', + tags: ['service_api'], + }) + .input(z.object({ body: zPostDatasetsBody })) + .output(zPostDatasetsResponse) + +export const datasets = { + get: get20, + post: post32, + pipeline, + tags, + byDatasetId, +} + +/** + * Get end user detail + * + * Get an end user by ID + * This endpoint is scoped to the current app token's tenant/app to prevent + * cross-tenant/app access when an end-user ID is known. + */ +export const get21 = oc + .route({ + description: + 'Get an end user by ID\nThis endpoint is scoped to the current app token\'s tenant/app to prevent\ncross-tenant/app access when an end-user ID is known.', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getEndUsersByEndUserId', + path: '/end-users/{end_user_id}', + summary: 'Get end user detail', + tags: ['service_api'], + }) + .input(z.object({ params: zGetEndUsersByEndUserIdPath })) + .output(zGetEndUsersByEndUserIdResponse) + +export const byEndUserId = { + get: get21, +} + +export const endUsers = { + byEndUserId, +} + +/** + * Upload a file for use in conversations + * + * Upload a file for use in conversations + * Accepts a single file upload via multipart/form-data. + */ +export const post33 = oc + .route({ + description: + 'Upload a file for use in conversations\nAccepts a single file upload via multipart/form-data.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postFilesUpload', + path: '/files/upload', + successStatus: 201, + summary: 'Upload a file for use in conversations', + tags: ['service_api'], + }) + .output(zPostFilesUploadResponse) + +export const upload = { + post: post33, +} + +/** + * Preview/Download a file that was uploaded via Service API + * + * Preview or download a file uploaded via Service API + * Provides secure file preview/download functionality. + * Files can only be accessed if they belong to messages within the requesting app's context. + */ +export const get22 = oc + .route({ + description: + 'Preview or download a file uploaded via Service API\nProvides secure file preview/download functionality.\nFiles can only be accessed if they belong to messages within the requesting app\'s context.', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getFilesByFileIdPreview', + path: '/files/{file_id}/preview', + summary: 'Preview/Download a file that was uploaded via Service API', + tags: ['service_api'], + }) + .input( + z.object({ + params: zGetFilesByFileIdPreviewPath, + query: zGetFilesByFileIdPreviewQuery.optional(), + }), + ) + .output(zGetFilesByFileIdPreviewResponse) + +export const preview = { + get: get22, +} + +export const byFileId = { + preview, +} + +export const files = { + upload, + byFileId, +} + +/** + * Get a paused human input form by token + */ +export const get23 = oc + .route({ + description: 'Get a paused human input form by token', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getFormHumanInputByFormToken', + path: '/form/human_input/{form_token}', + tags: ['service_api'], + }) + .input(z.object({ params: zGetFormHumanInputByFormTokenPath })) + .output(zGetFormHumanInputByFormTokenResponse) + +/** + * Submit a paused human input form by token + */ +export const post34 = oc + .route({ + description: 'Submit a paused human input form by token', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postFormHumanInputByFormToken', + path: '/form/human_input/{form_token}', + tags: ['service_api'], + }) + .input( + z.object({ + body: zPostFormHumanInputByFormTokenBody, + params: zPostFormHumanInputByFormTokenPath, + }), + ) + .output(zPostFormHumanInputByFormTokenResponse) + +export const byFormToken = { + get: get23, + post: post34, +} + +export const humanInput = { + byFormToken, +} + +export const form = { + humanInput, +} + +/** + * Get app information + * + * Get basic application information + * Returns basic information about the application including name, description, tags, and mode. + */ +export const get24 = oc + .route({ + description: + 'Get basic application information\nReturns basic information about the application including name, description, tags, and mode.', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getInfo', + path: '/info', + summary: 'Get app information', + tags: ['service_api'], + }) + .output(zGetInfoResponse) + +export const info = { + get: get24, +} + +/** + * Submit feedback for a message + * + * Submit feedback for a message + * Allows users to rate messages as like/dislike and provide optional feedback content. + */ +export const post35 = oc + .route({ + description: + 'Submit feedback for a message\nAllows users to rate messages as like/dislike and provide optional feedback content.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postMessagesByMessageIdFeedbacks', + path: '/messages/{message_id}/feedbacks', + summary: 'Submit feedback for a message', + tags: ['service_api'], + }) + .input( + z.object({ + body: zPostMessagesByMessageIdFeedbacksBody, + params: zPostMessagesByMessageIdFeedbacksPath, + }), + ) + .output(zPostMessagesByMessageIdFeedbacksResponse) + +export const feedbacks2 = { + post: post35, +} + +/** + * Get suggested follow-up questions for a message + * + * Get suggested follow-up questions for a message + * Returns AI-generated follow-up questions based on the message content. + */ +export const get25 = oc + .route({ + description: + 'Get suggested follow-up questions for a message\nReturns AI-generated follow-up questions based on the message content.', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getMessagesByMessageIdSuggested', + path: '/messages/{message_id}/suggested', + summary: 'Get suggested follow-up questions for a message', + tags: ['service_api'], + }) + .input(z.object({ params: zGetMessagesByMessageIdSuggestedPath })) + .output(zGetMessagesByMessageIdSuggestedResponse) + +export const suggested = { + get: get25, +} + +export const byMessageId = { + feedbacks: feedbacks2, + suggested, +} + +/** + * List messages in a conversation + * + * List messages in a conversation + * Retrieves messages with pagination support using first_id. + */ +export const get26 = oc + .route({ + description: + 'List messages in a conversation\nRetrieves messages with pagination support using first_id.', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getMessages', + path: '/messages', + summary: 'List messages in a conversation', + tags: ['service_api'], + }) + .input(z.object({ query: zGetMessagesQuery })) + .output(zGetMessagesResponse) + +export const messages = { + get: get26, + byMessageId, +} + +/** + * Get app metadata + * + * Get application metadata + * Returns metadata about the application including configuration and settings. + */ +export const get27 = oc + .route({ + description: + 'Get application metadata\nReturns metadata about the application including configuration and settings.', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getMeta', + path: '/meta', + summary: 'Get app metadata', + tags: ['service_api'], + }) + .output(zGetMetaResponse) + +export const meta = { + get: get27, +} + +/** + * Retrieve app parameters + * + * Retrieve application input parameters and configuration + * Returns the input form parameters and configuration for the application. + */ +export const get28 = oc + .route({ + description: + 'Retrieve application input parameters and configuration\nReturns the input form parameters and configuration for the application.', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getParameters', + path: '/parameters', + summary: 'Retrieve app parameters', + tags: ['service_api'], + }) + .output(zGetParametersResponse) + +export const parameters = { + get: get28, +} + +/** + * Retrieve app site info + * + * Get application site configuration + * Returns the site configuration for the application including theme, icons, and text. + */ +export const get29 = oc + .route({ + description: + 'Get application site configuration\nReturns the site configuration for the application including theme, icons, and text.', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getSite', + path: '/site', + summary: 'Retrieve app site info', + tags: ['service_api'], + }) + .output(zGetSiteResponse) + +export const site = { + get: get29, +} + +/** + * Convert text to audio using text-to-speech + * + * Convert text to audio using text-to-speech + * Converts the provided text to audio using the specified voice. + */ +export const post36 = oc + .route({ + description: + 'Convert text to audio using text-to-speech\nConverts the provided text to audio using the specified voice.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postTextToAudio', + path: '/text-to-audio', + summary: 'Convert text to audio using text-to-speech', + tags: ['service_api'], + }) + .input(z.object({ body: zPostTextToAudioBody })) + .output(zPostTextToAudioResponse) + +export const textToAudio = { + post: post36, +} + +/** + * Get workflow execution events stream after resume + */ +export const get30 = oc + .route({ + description: 'Get workflow execution events stream after resume', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkflowByTaskIdEvents', + path: '/workflow/{task_id}/events', + tags: ['service_api'], + }) + .input( + z.object({ + params: zGetWorkflowByTaskIdEventsPath, + query: zGetWorkflowByTaskIdEventsQuery.optional(), + }), + ) + .output(zGetWorkflowByTaskIdEventsResponse) + +export const events = { + get: get30, +} + +export const byTaskId3 = { + events, +} + +export const workflow = { + byTaskId: byTaskId3, +} + +/** + * Get workflow app logs + * + * Get workflow execution logs + * Returns paginated workflow execution logs with filtering options. + */ +export const get31 = oc + .route({ + description: + 'Get workflow execution logs\nReturns paginated workflow execution logs with filtering options.', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkflowsLogs', + path: '/workflows/logs', + summary: 'Get workflow app logs', + tags: ['service_api'], + }) + .input(z.object({ query: zGetWorkflowsLogsQuery.optional() })) + .output(zGetWorkflowsLogsResponse) + +export const logs = { + get: get31, +} + +/** + * Get a workflow task running detail + * + * Get workflow run details + * Returns detailed information about a specific workflow run. + */ +export const get32 = oc + .route({ + description: + 'Get workflow run details\nReturns detailed information about a specific workflow run.', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkflowsRunByWorkflowRunId', + path: '/workflows/run/{workflow_run_id}', + summary: 'Get a workflow task running detail', + tags: ['service_api'], + }) + .input(z.object({ params: zGetWorkflowsRunByWorkflowRunIdPath })) + .output(zGetWorkflowsRunByWorkflowRunIdResponse) + +export const byWorkflowRunId = { + get: get32, +} + +/** + * Execute a workflow + * + * Execute a workflow + * Runs a workflow with the provided inputs and returns the results. + * Supports both blocking and streaming response modes. + */ +export const post37 = oc + .route({ + description: + 'Execute a workflow\nRuns a workflow with the provided inputs and returns the results.\nSupports both blocking and streaming response modes.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkflowsRun', + path: '/workflows/run', + summary: 'Execute a workflow', + tags: ['service_api'], + }) + .input(z.object({ body: zPostWorkflowsRunBody })) + .output(zPostWorkflowsRunResponse) + +export const run3 = { + post: post37, + byWorkflowRunId, +} + +/** + * Stop a running workflow task + * + * Stop a running workflow task + */ +export const post38 = oc + .route({ + description: 'Stop a running workflow task', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkflowsTasksByTaskIdStop', + path: '/workflows/tasks/{task_id}/stop', + summary: 'Stop a running workflow task', + tags: ['service_api'], + }) + .input(z.object({ params: zPostWorkflowsTasksByTaskIdStopPath })) + .output(zPostWorkflowsTasksByTaskIdStopResponse) + +export const stop3 = { + post: post38, +} + +export const byTaskId4 = { + stop: stop3, +} + +export const tasks = { + byTaskId: byTaskId4, +} + +/** + * Run specific workflow by ID + * + * Execute a specific workflow by ID + * Executes a specific workflow version identified by its ID. + */ +export const post39 = oc + .route({ + description: + 'Execute a specific workflow by ID\nExecutes a specific workflow version identified by its ID.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkflowsByWorkflowIdRun', + path: '/workflows/{workflow_id}/run', + summary: 'Run specific workflow by ID', + tags: ['service_api'], + }) + .input( + z.object({ + body: zPostWorkflowsByWorkflowIdRunBody, + params: zPostWorkflowsByWorkflowIdRunPath, + }), + ) + .output(zPostWorkflowsByWorkflowIdRunResponse) + +export const run4 = { + post: post39, +} + +export const byWorkflowId = { + run: run4, +} + +export const workflows = { + logs, + run: run3, + tasks, + byWorkflowId, +} + +/** + * Get available models by model type + * + * Get available models by model type + * Returns a list of available models for the specified model type. + */ +export const get33 = oc + .route({ + description: + 'Get available models by model type\nReturns a list of available models for the specified model type.', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkspacesCurrentModelsModelTypesByModelType', + path: '/workspaces/current/models/model-types/{model_type}', + summary: 'Get available models by model type', + tags: ['service_api'], + }) + .input(z.object({ params: zGetWorkspacesCurrentModelsModelTypesByModelTypePath })) + .output(zGetWorkspacesCurrentModelsModelTypesByModelTypeResponse) + +export const byModelType = { + get: get33, +} + +export const modelTypes = { + byModelType, +} + +export const models = { + modelTypes, +} + +export const current = { + models, +} + +export const workspaces = { + current, +} + +export const contract = { + root, + app, + apps, + audioToText, + chatMessages, + completionMessages, + conversations, + datasets, + endUsers, + files, + form, + info, + messages, + meta, + parameters, + site, + textToAudio, + workflow, + workflows, + workspaces, +} diff --git a/packages/contracts/generated/api/service/types.gen.ts b/packages/contracts/generated/api/service/types.gen.ts new file mode 100644 index 0000000000..e3791e295c --- /dev/null +++ b/packages/contracts/generated/api/service/types.gen.ts @@ -0,0 +1,3155 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/v1` | (string & {}) +} + +export type Annotation = { + content?: string | null + created_at?: number | null + hit_count?: number | null + id: string + question?: string | null +} + +export type AnnotationCreatePayload = { + answer: string + question: string +} + +export type AnnotationList = { + data: Array + has_more: boolean + limit: number + page: number + total: number +} + +export type AnnotationReplyActionPayload = { + embedding_model_name: string + embedding_provider_name: string + score_threshold: number +} + +export type ChatRequestPayload = { + auto_generate_name?: boolean + conversation_id?: string | null + files?: Array<{ + [key: string]: unknown + }> | null + inputs: { + [key: string]: unknown + } + query: string + response_mode?: 'blocking' | 'streaming' | null + retriever_from?: string + workflow_id?: string | null +} + +export type ChildChunkCreatePayload = { + content: string +} + +export type ChildChunkListQuery = { + keyword?: string | null + limit?: number + page?: number +} + +export type ChildChunkUpdatePayload = { + content: string +} + +export type CompletionRequestPayload = { + files?: Array<{ + [key: string]: unknown + }> | null + inputs: { + [key: string]: unknown + } + query?: string + response_mode?: 'blocking' | 'streaming' | null + retriever_from?: string +} + +export type ConversationListQuery = { + last_id?: string | null + limit?: number + sort_by?: 'created_at' | '-created_at' | 'updated_at' | '-updated_at' +} + +export type ConversationRenamePayload = { + auto_generate?: boolean + name?: string | null +} + +export type ConversationVariableInfiniteScrollPaginationResponse = { + data: Array + has_more: boolean + limit: number +} + +export type ConversationVariableResponse = { + created_at?: number | null + description?: string | null + id: string + name: string + updated_at?: number | null + value?: string | null + value_type: string +} + +export type ConversationVariableUpdatePayload = { + value: unknown +} + +export type ConversationVariablesQuery = { + last_id?: string | null + limit?: number + variable_name?: string | null +} + +export type DatasetCreatePayload = { + description?: string + embedding_model?: string | null + embedding_model_provider?: string | null + external_knowledge_api_id?: string | null + external_knowledge_id?: string | null + indexing_technique?: 'high_quality' | 'economy' | null + name: string + permission?: DatasetPermissionEnum + provider?: string + retrieval_model?: RetrievalModel + summary_index_setting?: { + [key: string]: unknown + } | null +} + +export type DatasetUpdatePayload = { + description?: string | null + embedding_model?: string | null + embedding_model_provider?: string | null + external_knowledge_api_id?: string | null + external_knowledge_id?: string | null + external_retrieval_model?: { + [key: string]: unknown + } | null + indexing_technique?: 'high_quality' | 'economy' | null + name?: string | null + partial_member_list?: Array<{ + [key: string]: string + }> | null + permission?: DatasetPermissionEnum + retrieval_model?: RetrievalModel +} + +export type DocumentBatchDownloadZipPayload = { + document_ids: Array +} + +export type DocumentTextCreatePayload = { + doc_form?: string + doc_language?: string + embedding_model?: string | null + embedding_model_provider?: string | null + indexing_technique?: string | null + name: string + original_document_id?: string | null + process_rule?: ProcessRule + retrieval_model?: RetrievalModel + text: string +} + +export type DocumentTextUpdate = { + doc_form?: string + doc_language?: string + name?: string | null + process_rule?: ProcessRule + retrieval_model?: RetrievalModel + text?: string | null +} + +export type FeedbackListQuery = { + limit?: number + page?: number +} + +export type FilePreviewQuery = { + as_attachment?: boolean +} + +export type FileResponse = { + conversation_id?: string | null + created_at?: number | null + created_by?: string | null + extension?: string | null + file_key?: string | null + id: string + mime_type?: string | null + name: string + original_url?: string | null + preview_url?: string | null + size: number + source_url?: string | null + tenant_id?: string | null + user_id?: string | null +} + +export type HitTestingPayload = { + attachment_ids?: Array | null + external_retrieval_model?: { + [key: string]: unknown + } | null + query: string + retrieval_model?: RetrievalModel +} + +export type HumanInputFormSubmitPayload = { + action: string + inputs: { + [key: string]: JsonValue + } +} + +export type MessageFeedbackPayload = { + content?: string | null + rating?: 'like' | 'dislike' | null +} + +export type MessageListQuery = { + conversation_id: string + first_id?: string | null + limit?: number +} + +export type MetadataArgs = { + name: string + type: 'string' | 'number' | 'time' +} + +export type MetadataOperationData = { + operation_data: Array +} + +export type MetadataUpdatePayload = { + name: string +} + +export type SegmentCreatePayload = { + segments?: Array<{ + [key: string]: unknown + }> | null +} + +export type SegmentListQuery = { + keyword?: string | null + status?: Array +} + +export type SegmentUpdatePayload = { + segment: SegmentUpdateArgs +} + +export type TagBindingPayload = { + tag_ids: Array + target_id: string +} + +export type TagCreatePayload = { + name: string +} + +export type TagDeletePayload = { + tag_id: string +} + +export type TagUnbindingPayload = { + tag_id: string + target_id: string +} + +export type TagUpdatePayload = { + name: string + tag_id: string +} + +export type TextToAudioPayload = { + message_id?: string | null + streaming?: boolean | null + text?: string | null + voice?: string | null +} + +export type WorkflowAppLogPaginationResponse = { + data: Array + has_more: boolean + limit: number + page: number + total: number +} + +export type WorkflowLogQuery = { + created_at__after?: string | null + created_at__before?: string | null + created_by_account?: string | null + created_by_end_user_session_id?: string | null + keyword?: string | null + limit?: number + page?: number + status?: 'succeeded' | 'failed' | 'stopped' | null +} + +export type WorkflowRunPayload = { + files?: Array<{ + [key: string]: unknown + }> | null + inputs: { + [key: string]: unknown + } + response_mode?: 'blocking' | 'streaming' | null +} + +export type WorkflowRunResponse = { + created_at?: number | null + elapsed_time?: unknown + error?: string | null + finished_at?: number | null + id: string + inputs?: unknown + outputs?: { + [key: string]: unknown + } + status: string + total_steps?: number | null + total_tokens?: number | null + workflow_id: string +} + +export type Condition = { + comparison_operator: + | 'contains' + | 'not contains' + | 'start with' + | 'end with' + | 'is' + | 'is not' + | 'empty' + | 'not empty' + | 'in' + | 'not in' + | '=' + | '≠' + | '>' + | '<' + | '≥' + | '≤' + | 'before' + | 'after' + name: string + value?: unknown +} + +export type DatasetPermissionEnum = 'only_me' | 'all_team_members' | 'partial_members' + +export type MetadataFilteringCondition = { + conditions?: Array | null + logical_operator?: 'and' | 'or' | null +} + +export type RerankingModel = { + reranking_model_name?: string | null + reranking_provider_name?: string | null +} + +export type RetrievalMethod + = | 'semantic_search' + | 'full_text_search' + | 'hybrid_search' + | 'keyword_search' + +export type RetrievalModel = { + metadata_filtering_conditions?: MetadataFilteringCondition + reranking_enable: boolean + reranking_mode?: string | null + reranking_model?: RerankingModel + score_threshold?: number | null + score_threshold_enabled: boolean + search_method: RetrievalMethod + top_k: number + weights?: WeightModel +} + +export type WeightKeywordSetting = { + keyword_weight: number +} + +export type WeightModel = { + keyword_setting?: WeightKeywordSetting + vector_setting?: WeightVectorSetting + weight_type?: 'semantic_first' | 'keyword_first' | 'customized' | null +} + +export type WeightVectorSetting = { + embedding_model_name: string + embedding_provider_name: string + vector_weight: number +} + +export type PreProcessingRule = { + enabled: boolean + id: string +} + +export type ProcessRule = { + mode: 'automatic' | 'custom' | 'hierarchical' + rules?: Rule +} + +export type Rule = { + parent_mode?: 'full-doc' | 'paragraph' | null + pre_processing_rules?: Array | null + segmentation?: Segmentation + subchunk_segmentation?: Segmentation +} + +export type Segmentation = { + chunk_overlap?: number + max_tokens: number + separator?: string +} + +export type JsonValue = unknown + +export type DocumentMetadataOperation = { + document_id: string + metadata_list: Array + partial_update?: boolean +} + +export type MetadataDetail = { + id: string + name: string + value?: unknown +} + +export type SegmentUpdateArgs = { + answer?: string | null + attachment_ids?: Array | null + content?: string | null + enabled?: boolean | null + keywords?: Array | null + regenerate_child_chunks?: boolean + summary?: string | null +} + +export type SimpleAccount = { + email: string + id: string + name: string +} + +export type SimpleEndUser = { + id: string + is_anonymous: boolean + session_id?: string | null + type: string +} + +export type WorkflowAppLogPartialResponse = { + created_at?: number | null + created_by_account?: SimpleAccount + created_by_end_user?: SimpleEndUser + created_by_role?: string | null + created_from?: string | null + details?: unknown + id: string + workflow_run?: WorkflowRunForLogResponse +} + +export type WorkflowRunForLogResponse = { + created_at?: number | null + elapsed_time?: unknown + error?: string | null + exceptions_count?: number | null + finished_at?: number | null + id: string + status?: string | null + total_steps?: number | null + total_tokens?: number | null + triggered_from?: string | null + version?: string | null +} + +export type GetRootData = { + body?: never + path?: never + query?: never + url: '/' +} + +export type GetRootResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetRootResponse = GetRootResponses[keyof GetRootResponses] + +export type GetAppFeedbacksData = { + body?: never + path?: never + query?: { + limit?: number + page?: number + } + url: '/app/feedbacks' +} + +export type GetAppFeedbacksErrors = { + 401: { + [key: string]: unknown + } +} + +export type GetAppFeedbacksError = GetAppFeedbacksErrors[keyof GetAppFeedbacksErrors] + +export type GetAppFeedbacksResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetAppFeedbacksResponse = GetAppFeedbacksResponses[keyof GetAppFeedbacksResponses] + +export type PostAppsAnnotationReplyByActionData = { + body: AnnotationReplyActionPayload + path: { + action: string + } + query?: never + url: '/apps/annotation-reply/{action}' +} + +export type PostAppsAnnotationReplyByActionErrors = { + 401: { + [key: string]: unknown + } +} + +export type PostAppsAnnotationReplyByActionError + = PostAppsAnnotationReplyByActionErrors[keyof PostAppsAnnotationReplyByActionErrors] + +export type PostAppsAnnotationReplyByActionResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAppsAnnotationReplyByActionResponse + = PostAppsAnnotationReplyByActionResponses[keyof PostAppsAnnotationReplyByActionResponses] + +export type GetAppsAnnotationReplyByActionStatusByJobIdData = { + body?: never + path: { + action: string + job_id: string + } + query?: never + url: '/apps/annotation-reply/{action}/status/{job_id}' +} + +export type GetAppsAnnotationReplyByActionStatusByJobIdErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type GetAppsAnnotationReplyByActionStatusByJobIdError + = GetAppsAnnotationReplyByActionStatusByJobIdErrors[keyof GetAppsAnnotationReplyByActionStatusByJobIdErrors] + +export type GetAppsAnnotationReplyByActionStatusByJobIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetAppsAnnotationReplyByActionStatusByJobIdResponse + = GetAppsAnnotationReplyByActionStatusByJobIdResponses[keyof GetAppsAnnotationReplyByActionStatusByJobIdResponses] + +export type GetAppsAnnotationsData = { + body?: never + path?: never + query?: never + url: '/apps/annotations' +} + +export type GetAppsAnnotationsErrors = { + 401: { + [key: string]: unknown + } +} + +export type GetAppsAnnotationsError = GetAppsAnnotationsErrors[keyof GetAppsAnnotationsErrors] + +export type GetAppsAnnotationsResponses = { + 200: AnnotationList +} + +export type GetAppsAnnotationsResponse + = GetAppsAnnotationsResponses[keyof GetAppsAnnotationsResponses] + +export type PostAppsAnnotationsData = { + body: AnnotationCreatePayload + path?: never + query?: never + url: '/apps/annotations' +} + +export type PostAppsAnnotationsErrors = { + 401: { + [key: string]: unknown + } +} + +export type PostAppsAnnotationsError = PostAppsAnnotationsErrors[keyof PostAppsAnnotationsErrors] + +export type PostAppsAnnotationsResponses = { + 201: Annotation +} + +export type PostAppsAnnotationsResponse + = PostAppsAnnotationsResponses[keyof PostAppsAnnotationsResponses] + +export type DeleteAppsAnnotationsByAnnotationIdData = { + body?: never + path: { + annotation_id: string + } + query?: never + url: '/apps/annotations/{annotation_id}' +} + +export type DeleteAppsAnnotationsByAnnotationIdErrors = { + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type DeleteAppsAnnotationsByAnnotationIdError + = DeleteAppsAnnotationsByAnnotationIdErrors[keyof DeleteAppsAnnotationsByAnnotationIdErrors] + +export type DeleteAppsAnnotationsByAnnotationIdResponses = { + 204: { + [key: string]: unknown + } +} + +export type DeleteAppsAnnotationsByAnnotationIdResponse + = DeleteAppsAnnotationsByAnnotationIdResponses[keyof DeleteAppsAnnotationsByAnnotationIdResponses] + +export type PutAppsAnnotationsByAnnotationIdData = { + body: AnnotationCreatePayload + path: { + annotation_id: string + } + query?: never + url: '/apps/annotations/{annotation_id}' +} + +export type PutAppsAnnotationsByAnnotationIdErrors = { + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PutAppsAnnotationsByAnnotationIdError + = PutAppsAnnotationsByAnnotationIdErrors[keyof PutAppsAnnotationsByAnnotationIdErrors] + +export type PutAppsAnnotationsByAnnotationIdResponses = { + 200: Annotation +} + +export type PutAppsAnnotationsByAnnotationIdResponse + = PutAppsAnnotationsByAnnotationIdResponses[keyof PutAppsAnnotationsByAnnotationIdResponses] + +export type PostAudioToTextData = { + body?: never + path?: never + query?: never + url: '/audio-to-text' +} + +export type PostAudioToTextErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 413: { + [key: string]: unknown + } + 415: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type PostAudioToTextError = PostAudioToTextErrors[keyof PostAudioToTextErrors] + +export type PostAudioToTextResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAudioToTextResponse = PostAudioToTextResponses[keyof PostAudioToTextResponses] + +export type PostChatMessagesData = { + body: ChatRequestPayload + path?: never + query?: never + url: '/chat-messages' +} + +export type PostChatMessagesErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + 429: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type PostChatMessagesError = PostChatMessagesErrors[keyof PostChatMessagesErrors] + +export type PostChatMessagesResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostChatMessagesResponse = PostChatMessagesResponses[keyof PostChatMessagesResponses] + +export type PostChatMessagesByTaskIdStopData = { + body?: never + path: { + task_id: string + } + query?: never + url: '/chat-messages/{task_id}/stop' +} + +export type PostChatMessagesByTaskIdStopErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostChatMessagesByTaskIdStopError + = PostChatMessagesByTaskIdStopErrors[keyof PostChatMessagesByTaskIdStopErrors] + +export type PostChatMessagesByTaskIdStopResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostChatMessagesByTaskIdStopResponse + = PostChatMessagesByTaskIdStopResponses[keyof PostChatMessagesByTaskIdStopResponses] + +export type PostCompletionMessagesData = { + body: CompletionRequestPayload + path?: never + query?: never + url: '/completion-messages' +} + +export type PostCompletionMessagesErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type PostCompletionMessagesError + = PostCompletionMessagesErrors[keyof PostCompletionMessagesErrors] + +export type PostCompletionMessagesResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostCompletionMessagesResponse + = PostCompletionMessagesResponses[keyof PostCompletionMessagesResponses] + +export type PostCompletionMessagesByTaskIdStopData = { + body?: never + path: { + task_id: string + } + query?: never + url: '/completion-messages/{task_id}/stop' +} + +export type PostCompletionMessagesByTaskIdStopErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostCompletionMessagesByTaskIdStopError + = PostCompletionMessagesByTaskIdStopErrors[keyof PostCompletionMessagesByTaskIdStopErrors] + +export type PostCompletionMessagesByTaskIdStopResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostCompletionMessagesByTaskIdStopResponse + = PostCompletionMessagesByTaskIdStopResponses[keyof PostCompletionMessagesByTaskIdStopResponses] + +export type GetConversationsData = { + body?: never + path?: never + query?: { + last_id?: string | null + limit?: number + sort_by?: 'created_at' | '-created_at' | 'updated_at' | '-updated_at' + } + url: '/conversations' +} + +export type GetConversationsErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type GetConversationsError = GetConversationsErrors[keyof GetConversationsErrors] + +export type GetConversationsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetConversationsResponse = GetConversationsResponses[keyof GetConversationsResponses] + +export type DeleteConversationsByCIdData = { + body?: never + path: { + c_id: string + } + query?: never + url: '/conversations/{c_id}' +} + +export type DeleteConversationsByCIdErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type DeleteConversationsByCIdError + = DeleteConversationsByCIdErrors[keyof DeleteConversationsByCIdErrors] + +export type DeleteConversationsByCIdResponses = { + 204: { + [key: string]: unknown + } +} + +export type DeleteConversationsByCIdResponse + = DeleteConversationsByCIdResponses[keyof DeleteConversationsByCIdResponses] + +export type PostConversationsByCIdNameData = { + body: ConversationRenamePayload + path: { + c_id: string + } + query?: never + url: '/conversations/{c_id}/name' +} + +export type PostConversationsByCIdNameErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostConversationsByCIdNameError + = PostConversationsByCIdNameErrors[keyof PostConversationsByCIdNameErrors] + +export type PostConversationsByCIdNameResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostConversationsByCIdNameResponse + = PostConversationsByCIdNameResponses[keyof PostConversationsByCIdNameResponses] + +export type GetConversationsByCIdVariablesData = { + body?: never + path: { + c_id: string + } + query?: { + last_id?: string | null + limit?: number + variable_name?: string | null + } + url: '/conversations/{c_id}/variables' +} + +export type GetConversationsByCIdVariablesErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type GetConversationsByCIdVariablesError + = GetConversationsByCIdVariablesErrors[keyof GetConversationsByCIdVariablesErrors] + +export type GetConversationsByCIdVariablesResponses = { + 200: ConversationVariableInfiniteScrollPaginationResponse +} + +export type GetConversationsByCIdVariablesResponse + = GetConversationsByCIdVariablesResponses[keyof GetConversationsByCIdVariablesResponses] + +export type PutConversationsByCIdVariablesByVariableIdData = { + body: ConversationVariableUpdatePayload + path: { + c_id: string + variable_id: string + } + query?: never + url: '/conversations/{c_id}/variables/{variable_id}' +} + +export type PutConversationsByCIdVariablesByVariableIdErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PutConversationsByCIdVariablesByVariableIdError + = PutConversationsByCIdVariablesByVariableIdErrors[keyof PutConversationsByCIdVariablesByVariableIdErrors] + +export type PutConversationsByCIdVariablesByVariableIdResponses = { + 200: ConversationVariableResponse +} + +export type PutConversationsByCIdVariablesByVariableIdResponse + = PutConversationsByCIdVariablesByVariableIdResponses[keyof PutConversationsByCIdVariablesByVariableIdResponses] + +export type GetDatasetsData = { + body?: never + path?: never + query?: never + url: '/datasets' +} + +export type GetDatasetsErrors = { + 401: { + [key: string]: unknown + } +} + +export type GetDatasetsError = GetDatasetsErrors[keyof GetDatasetsErrors] + +export type GetDatasetsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsResponse = GetDatasetsResponses[keyof GetDatasetsResponses] + +export type PostDatasetsData = { + body: DatasetCreatePayload + path?: never + query?: never + url: '/datasets' +} + +export type PostDatasetsErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } +} + +export type PostDatasetsError = PostDatasetsErrors[keyof PostDatasetsErrors] + +export type PostDatasetsResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsResponse = PostDatasetsResponses[keyof PostDatasetsResponses] + +export type PostDatasetsPipelineFileUploadData = { + body?: never + path?: never + query?: never + url: '/datasets/pipeline/file-upload' +} + +export type PostDatasetsPipelineFileUploadErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 413: { + [key: string]: unknown + } + 415: { + [key: string]: unknown + } +} + +export type PostDatasetsPipelineFileUploadError + = PostDatasetsPipelineFileUploadErrors[keyof PostDatasetsPipelineFileUploadErrors] + +export type PostDatasetsPipelineFileUploadResponses = { + 201: { + [key: string]: unknown + } +} + +export type PostDatasetsPipelineFileUploadResponse + = PostDatasetsPipelineFileUploadResponses[keyof PostDatasetsPipelineFileUploadResponses] + +export type DeleteDatasetsTagsData = { + body: TagDeletePayload + path?: never + query?: never + url: '/datasets/tags' +} + +export type DeleteDatasetsTagsErrors = { + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } +} + +export type DeleteDatasetsTagsError = DeleteDatasetsTagsErrors[keyof DeleteDatasetsTagsErrors] + +export type DeleteDatasetsTagsResponses = { + 204: { + [key: string]: unknown + } +} + +export type DeleteDatasetsTagsResponse + = DeleteDatasetsTagsResponses[keyof DeleteDatasetsTagsResponses] + +export type GetDatasetsTagsData = { + body?: never + path?: never + query?: never + url: '/datasets/tags' +} + +export type GetDatasetsTagsErrors = { + 401: { + [key: string]: unknown + } +} + +export type GetDatasetsTagsError = GetDatasetsTagsErrors[keyof GetDatasetsTagsErrors] + +export type GetDatasetsTagsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsTagsResponse = GetDatasetsTagsResponses[keyof GetDatasetsTagsResponses] + +export type PatchDatasetsTagsData = { + body: TagUpdatePayload + path?: never + query?: never + url: '/datasets/tags' +} + +export type PatchDatasetsTagsErrors = { + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } +} + +export type PatchDatasetsTagsError = PatchDatasetsTagsErrors[keyof PatchDatasetsTagsErrors] + +export type PatchDatasetsTagsResponses = { + 200: { + [key: string]: unknown + } +} + +export type PatchDatasetsTagsResponse = PatchDatasetsTagsResponses[keyof PatchDatasetsTagsResponses] + +export type PostDatasetsTagsData = { + body: TagCreatePayload + path?: never + query?: never + url: '/datasets/tags' +} + +export type PostDatasetsTagsErrors = { + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } +} + +export type PostDatasetsTagsError = PostDatasetsTagsErrors[keyof PostDatasetsTagsErrors] + +export type PostDatasetsTagsResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsTagsResponse = PostDatasetsTagsResponses[keyof PostDatasetsTagsResponses] + +export type PostDatasetsTagsBindingData = { + body: TagBindingPayload + path?: never + query?: never + url: '/datasets/tags/binding' +} + +export type PostDatasetsTagsBindingErrors = { + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } +} + +export type PostDatasetsTagsBindingError + = PostDatasetsTagsBindingErrors[keyof PostDatasetsTagsBindingErrors] + +export type PostDatasetsTagsBindingResponses = { + 204: { + [key: string]: unknown + } +} + +export type PostDatasetsTagsBindingResponse + = PostDatasetsTagsBindingResponses[keyof PostDatasetsTagsBindingResponses] + +export type PostDatasetsTagsUnbindingData = { + body: TagUnbindingPayload + path?: never + query?: never + url: '/datasets/tags/unbinding' +} + +export type PostDatasetsTagsUnbindingErrors = { + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } +} + +export type PostDatasetsTagsUnbindingError + = PostDatasetsTagsUnbindingErrors[keyof PostDatasetsTagsUnbindingErrors] + +export type PostDatasetsTagsUnbindingResponses = { + 204: { + [key: string]: unknown + } +} + +export type PostDatasetsTagsUnbindingResponse + = PostDatasetsTagsUnbindingResponses[keyof PostDatasetsTagsUnbindingResponses] + +export type DeleteDatasetsByDatasetIdData = { + body?: never + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}' +} + +export type DeleteDatasetsByDatasetIdErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + 409: { + [key: string]: unknown + } +} + +export type DeleteDatasetsByDatasetIdError + = DeleteDatasetsByDatasetIdErrors[keyof DeleteDatasetsByDatasetIdErrors] + +export type DeleteDatasetsByDatasetIdResponses = { + 204: { + [key: string]: unknown + } +} + +export type DeleteDatasetsByDatasetIdResponse + = DeleteDatasetsByDatasetIdResponses[keyof DeleteDatasetsByDatasetIdResponses] + +export type GetDatasetsByDatasetIdData = { + body?: never + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}' +} + +export type GetDatasetsByDatasetIdErrors = { + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdError + = GetDatasetsByDatasetIdErrors[keyof GetDatasetsByDatasetIdErrors] + +export type GetDatasetsByDatasetIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdResponse + = GetDatasetsByDatasetIdResponses[keyof GetDatasetsByDatasetIdResponses] + +export type PatchDatasetsByDatasetIdData = { + body: DatasetUpdatePayload + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}' +} + +export type PatchDatasetsByDatasetIdErrors = { + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PatchDatasetsByDatasetIdError + = PatchDatasetsByDatasetIdErrors[keyof PatchDatasetsByDatasetIdErrors] + +export type PatchDatasetsByDatasetIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type PatchDatasetsByDatasetIdResponse + = PatchDatasetsByDatasetIdResponses[keyof PatchDatasetsByDatasetIdResponses] + +export type PostDatasetsByDatasetIdDocumentCreateByFileData = { + body?: never + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/document/create-by-file' +} + +export type PostDatasetsByDatasetIdDocumentCreateByFileErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentCreateByFileError + = PostDatasetsByDatasetIdDocumentCreateByFileErrors[keyof PostDatasetsByDatasetIdDocumentCreateByFileErrors] + +export type PostDatasetsByDatasetIdDocumentCreateByFileResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentCreateByFileResponse + = PostDatasetsByDatasetIdDocumentCreateByFileResponses[keyof PostDatasetsByDatasetIdDocumentCreateByFileResponses] + +export type PostDatasetsByDatasetIdDocumentCreateByTextData = { + body: DocumentTextCreatePayload + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/document/create-by-text' +} + +export type PostDatasetsByDatasetIdDocumentCreateByTextErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentCreateByTextError + = PostDatasetsByDatasetIdDocumentCreateByTextErrors[keyof PostDatasetsByDatasetIdDocumentCreateByTextErrors] + +export type PostDatasetsByDatasetIdDocumentCreateByTextResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentCreateByTextResponse + = PostDatasetsByDatasetIdDocumentCreateByTextResponses[keyof PostDatasetsByDatasetIdDocumentCreateByTextResponses] + +export type PostDatasetsByDatasetIdDocumentCreateByFile2Data = { + body?: never + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/document/create_by_file' +} + +export type PostDatasetsByDatasetIdDocumentCreateByFile2Errors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentCreateByFile2Error + = PostDatasetsByDatasetIdDocumentCreateByFile2Errors[keyof PostDatasetsByDatasetIdDocumentCreateByFile2Errors] + +export type PostDatasetsByDatasetIdDocumentCreateByFile2Responses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentCreateByFile2Response + = PostDatasetsByDatasetIdDocumentCreateByFile2Responses[keyof PostDatasetsByDatasetIdDocumentCreateByFile2Responses] + +export type PostDatasetsByDatasetIdDocumentCreateByText2Data = { + body: DocumentTextCreatePayload + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/document/create_by_text' +} + +export type PostDatasetsByDatasetIdDocumentCreateByText2Errors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentCreateByText2Error + = PostDatasetsByDatasetIdDocumentCreateByText2Errors[keyof PostDatasetsByDatasetIdDocumentCreateByText2Errors] + +export type PostDatasetsByDatasetIdDocumentCreateByText2Responses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentCreateByText2Response + = PostDatasetsByDatasetIdDocumentCreateByText2Responses[keyof PostDatasetsByDatasetIdDocumentCreateByText2Responses] + +export type GetDatasetsByDatasetIdDocumentsData = { + body?: never + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents' +} + +export type GetDatasetsByDatasetIdDocumentsErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdDocumentsError + = GetDatasetsByDatasetIdDocumentsErrors[keyof GetDatasetsByDatasetIdDocumentsErrors] + +export type GetDatasetsByDatasetIdDocumentsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdDocumentsResponse + = GetDatasetsByDatasetIdDocumentsResponses[keyof GetDatasetsByDatasetIdDocumentsResponses] + +export type PostDatasetsByDatasetIdDocumentsDownloadZipData = { + body: DocumentBatchDownloadZipPayload + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/download-zip' +} + +export type PostDatasetsByDatasetIdDocumentsDownloadZipErrors = { + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentsDownloadZipError + = PostDatasetsByDatasetIdDocumentsDownloadZipErrors[keyof PostDatasetsByDatasetIdDocumentsDownloadZipErrors] + +export type PostDatasetsByDatasetIdDocumentsDownloadZipResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentsDownloadZipResponse + = PostDatasetsByDatasetIdDocumentsDownloadZipResponses[keyof PostDatasetsByDatasetIdDocumentsDownloadZipResponses] + +export type PostDatasetsByDatasetIdDocumentsMetadataData = { + body: MetadataOperationData + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/metadata' +} + +export type PostDatasetsByDatasetIdDocumentsMetadataErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentsMetadataError + = PostDatasetsByDatasetIdDocumentsMetadataErrors[keyof PostDatasetsByDatasetIdDocumentsMetadataErrors] + +export type PostDatasetsByDatasetIdDocumentsMetadataResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentsMetadataResponse + = PostDatasetsByDatasetIdDocumentsMetadataResponses[keyof PostDatasetsByDatasetIdDocumentsMetadataResponses] + +export type PatchDatasetsByDatasetIdDocumentsStatusByActionData = { + body?: never + path: { + dataset_id: string + action: string + } + query?: never + url: '/datasets/{dataset_id}/documents/status/{action}' +} + +export type PatchDatasetsByDatasetIdDocumentsStatusByActionErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PatchDatasetsByDatasetIdDocumentsStatusByActionError + = PatchDatasetsByDatasetIdDocumentsStatusByActionErrors[keyof PatchDatasetsByDatasetIdDocumentsStatusByActionErrors] + +export type PatchDatasetsByDatasetIdDocumentsStatusByActionResponses = { + 200: { + [key: string]: unknown + } +} + +export type PatchDatasetsByDatasetIdDocumentsStatusByActionResponse + = PatchDatasetsByDatasetIdDocumentsStatusByActionResponses[keyof PatchDatasetsByDatasetIdDocumentsStatusByActionResponses] + +export type GetDatasetsByDatasetIdDocumentsByBatchIndexingStatusData = { + body?: never + path: { + dataset_id: string + batch: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{batch}/indexing-status' +} + +export type GetDatasetsByDatasetIdDocumentsByBatchIndexingStatusErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdDocumentsByBatchIndexingStatusError + = GetDatasetsByDatasetIdDocumentsByBatchIndexingStatusErrors[keyof GetDatasetsByDatasetIdDocumentsByBatchIndexingStatusErrors] + +export type GetDatasetsByDatasetIdDocumentsByBatchIndexingStatusResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdDocumentsByBatchIndexingStatusResponse + = GetDatasetsByDatasetIdDocumentsByBatchIndexingStatusResponses[keyof GetDatasetsByDatasetIdDocumentsByBatchIndexingStatusResponses] + +export type DeleteDatasetsByDatasetIdDocumentsByDocumentIdData = { + body?: never + path: { + dataset_id: string + document_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}' +} + +export type DeleteDatasetsByDatasetIdDocumentsByDocumentIdErrors = { + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type DeleteDatasetsByDatasetIdDocumentsByDocumentIdError + = DeleteDatasetsByDatasetIdDocumentsByDocumentIdErrors[keyof DeleteDatasetsByDatasetIdDocumentsByDocumentIdErrors] + +export type DeleteDatasetsByDatasetIdDocumentsByDocumentIdResponses = { + 204: { + [key: string]: unknown + } +} + +export type DeleteDatasetsByDatasetIdDocumentsByDocumentIdResponse + = DeleteDatasetsByDatasetIdDocumentsByDocumentIdResponses[keyof DeleteDatasetsByDatasetIdDocumentsByDocumentIdResponses] + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdData = { + body?: never + path: { + dataset_id: string + document_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}' +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdErrors = { + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdError + = GetDatasetsByDatasetIdDocumentsByDocumentIdErrors[keyof GetDatasetsByDatasetIdDocumentsByDocumentIdErrors] + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdResponse + = GetDatasetsByDatasetIdDocumentsByDocumentIdResponses[keyof GetDatasetsByDatasetIdDocumentsByDocumentIdResponses] + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdData = { + body?: never + path: { + dataset_id: string + document_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}' +} + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdError + = PatchDatasetsByDatasetIdDocumentsByDocumentIdErrors[keyof PatchDatasetsByDatasetIdDocumentsByDocumentIdErrors] + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdResponse + = PatchDatasetsByDatasetIdDocumentsByDocumentIdResponses[keyof PatchDatasetsByDatasetIdDocumentsByDocumentIdResponses] + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdDownloadData = { + body?: never + path: { + dataset_id: string + document_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/download' +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdDownloadErrors = { + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdDownloadError + = GetDatasetsByDatasetIdDocumentsByDocumentIdDownloadErrors[keyof GetDatasetsByDatasetIdDocumentsByDocumentIdDownloadErrors] + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdDownloadResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdDownloadResponse + = GetDatasetsByDatasetIdDocumentsByDocumentIdDownloadResponses[keyof GetDatasetsByDatasetIdDocumentsByDocumentIdDownloadResponses] + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsData = { + body?: never + path: { + dataset_id: string + document_id: string + } + query?: { + keyword?: string | null + status?: Array + } + url: '/datasets/{dataset_id}/documents/{document_id}/segments' +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsError + = GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsErrors[keyof GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsErrors] + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsResponse + = GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsResponses[keyof GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsResponses] + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsData = { + body: SegmentCreatePayload + path: { + dataset_id: string + document_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/segments' +} + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsError + = PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsErrors[keyof PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsErrors] + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsResponse + = PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsResponses[keyof PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsResponses] + +export type DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdData = { + body?: never + path: { + dataset_id: string + document_id: string + segment_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}' +} + +export type DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdError + = DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdErrors[keyof DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdErrors] + +export type DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponses = { + 204: { + [key: string]: unknown + } +} + +export type DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponse + = DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponses[keyof DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponses] + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdData = { + body?: never + path: { + segment_id: string + document_id: string + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}' +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdError + = GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdErrors[keyof GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdErrors] + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponse + = GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponses[keyof GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponses] + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdData = { + body: SegmentUpdatePayload + path: { + dataset_id: string + document_id: string + segment_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}' +} + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdError + = PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdErrors[keyof PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdErrors] + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponse + = PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponses[keyof PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponses] + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksData = { + body?: never + path: { + dataset_id: string + document_id: string + segment_id: string + } + query?: { + keyword?: string | null + limit?: number + page?: number + } + url: '/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks' +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksError + = GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksErrors[keyof GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksErrors] + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponse + = GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponses[keyof GetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponses] + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksData = { + body: ChildChunkCreatePayload + path: { + dataset_id: string + document_id: string + segment_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks' +} + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksError + = PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksErrors[keyof PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksErrors] + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponse + = PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponses[keyof PostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponses] + +export type DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdData + = { + body?: never + path: { + dataset_id: string + document_id: string + segment_id: string + child_chunk_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks/{child_chunk_id}' + } + +export type DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdErrors + = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + } + +export type DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdError + = DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdErrors[keyof DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdErrors] + +export type DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdResponses + = { + 204: { + [key: string]: unknown + } + } + +export type DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdResponse + = DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdResponses[keyof DeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdResponses] + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdData + = { + body: ChildChunkUpdatePayload + path: { + dataset_id: string + document_id: string + segment_id: string + child_chunk_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks/{child_chunk_id}' + } + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdErrors + = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + } + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdError + = PatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdErrors[keyof PatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdErrors] + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdResponses + = { + 200: { + [key: string]: unknown + } + } + +export type PatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdResponse + = PatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdResponses[keyof PatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdResponses] + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFileData = { + body?: never + path: { + dataset_id: string + document_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/update-by-file' +} + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFileErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFileError + = PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFileErrors[keyof PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFileErrors] + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFileResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFileResponse + = PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFileResponses[keyof PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFileResponses] + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByTextData = { + body: DocumentTextUpdate + path: { + dataset_id: string + document_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/update-by-text' +} + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByTextErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByTextError + = PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByTextErrors[keyof PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByTextErrors] + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByTextResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByTextResponse + = PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByTextResponses[keyof PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByTextResponses] + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFile2Data = { + body?: never + path: { + dataset_id: string + document_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/update_by_file' +} + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFile2Errors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFile2Error + = PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFile2Errors[keyof PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFile2Errors] + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFile2Responses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFile2Response + = PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFile2Responses[keyof PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFile2Responses] + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByText2Data = { + body: DocumentTextUpdate + path: { + dataset_id: string + document_id: string + } + query?: never + url: '/datasets/{dataset_id}/documents/{document_id}/update_by_text' +} + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByText2Errors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByText2Error + = PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByText2Errors[keyof PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByText2Errors] + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByText2Responses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByText2Response + = PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByText2Responses[keyof PostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByText2Responses] + +export type PostDatasetsByDatasetIdHitTestingData = { + body: HitTestingPayload + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/hit-testing' +} + +export type PostDatasetsByDatasetIdHitTestingErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdHitTestingError + = PostDatasetsByDatasetIdHitTestingErrors[keyof PostDatasetsByDatasetIdHitTestingErrors] + +export type PostDatasetsByDatasetIdHitTestingResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdHitTestingResponse + = PostDatasetsByDatasetIdHitTestingResponses[keyof PostDatasetsByDatasetIdHitTestingResponses] + +export type GetDatasetsByDatasetIdMetadataData = { + body?: never + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/metadata' +} + +export type GetDatasetsByDatasetIdMetadataErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdMetadataError + = GetDatasetsByDatasetIdMetadataErrors[keyof GetDatasetsByDatasetIdMetadataErrors] + +export type GetDatasetsByDatasetIdMetadataResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdMetadataResponse + = GetDatasetsByDatasetIdMetadataResponses[keyof GetDatasetsByDatasetIdMetadataResponses] + +export type PostDatasetsByDatasetIdMetadataData = { + body: MetadataArgs + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/metadata' +} + +export type PostDatasetsByDatasetIdMetadataErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdMetadataError + = PostDatasetsByDatasetIdMetadataErrors[keyof PostDatasetsByDatasetIdMetadataErrors] + +export type PostDatasetsByDatasetIdMetadataResponses = { + 201: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdMetadataResponse + = PostDatasetsByDatasetIdMetadataResponses[keyof PostDatasetsByDatasetIdMetadataResponses] + +export type GetDatasetsByDatasetIdMetadataBuiltInData = { + body?: never + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/metadata/built-in' +} + +export type GetDatasetsByDatasetIdMetadataBuiltInErrors = { + 401: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdMetadataBuiltInError + = GetDatasetsByDatasetIdMetadataBuiltInErrors[keyof GetDatasetsByDatasetIdMetadataBuiltInErrors] + +export type GetDatasetsByDatasetIdMetadataBuiltInResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdMetadataBuiltInResponse + = GetDatasetsByDatasetIdMetadataBuiltInResponses[keyof GetDatasetsByDatasetIdMetadataBuiltInResponses] + +export type PostDatasetsByDatasetIdMetadataBuiltInByActionData = { + body?: never + path: { + dataset_id: string + action: string + } + query?: never + url: '/datasets/{dataset_id}/metadata/built-in/{action}' +} + +export type PostDatasetsByDatasetIdMetadataBuiltInByActionErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdMetadataBuiltInByActionError + = PostDatasetsByDatasetIdMetadataBuiltInByActionErrors[keyof PostDatasetsByDatasetIdMetadataBuiltInByActionErrors] + +export type PostDatasetsByDatasetIdMetadataBuiltInByActionResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdMetadataBuiltInByActionResponse + = PostDatasetsByDatasetIdMetadataBuiltInByActionResponses[keyof PostDatasetsByDatasetIdMetadataBuiltInByActionResponses] + +export type DeleteDatasetsByDatasetIdMetadataByMetadataIdData = { + body?: never + path: { + dataset_id: string + metadata_id: string + } + query?: never + url: '/datasets/{dataset_id}/metadata/{metadata_id}' +} + +export type DeleteDatasetsByDatasetIdMetadataByMetadataIdErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type DeleteDatasetsByDatasetIdMetadataByMetadataIdError + = DeleteDatasetsByDatasetIdMetadataByMetadataIdErrors[keyof DeleteDatasetsByDatasetIdMetadataByMetadataIdErrors] + +export type DeleteDatasetsByDatasetIdMetadataByMetadataIdResponses = { + 204: { + [key: string]: unknown + } +} + +export type DeleteDatasetsByDatasetIdMetadataByMetadataIdResponse + = DeleteDatasetsByDatasetIdMetadataByMetadataIdResponses[keyof DeleteDatasetsByDatasetIdMetadataByMetadataIdResponses] + +export type PatchDatasetsByDatasetIdMetadataByMetadataIdData = { + body: MetadataUpdatePayload + path: { + dataset_id: string + metadata_id: string + } + query?: never + url: '/datasets/{dataset_id}/metadata/{metadata_id}' +} + +export type PatchDatasetsByDatasetIdMetadataByMetadataIdErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PatchDatasetsByDatasetIdMetadataByMetadataIdError + = PatchDatasetsByDatasetIdMetadataByMetadataIdErrors[keyof PatchDatasetsByDatasetIdMetadataByMetadataIdErrors] + +export type PatchDatasetsByDatasetIdMetadataByMetadataIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type PatchDatasetsByDatasetIdMetadataByMetadataIdResponse + = PatchDatasetsByDatasetIdMetadataByMetadataIdResponses[keyof PatchDatasetsByDatasetIdMetadataByMetadataIdResponses] + +export type GetDatasetsByDatasetIdPipelineDatasourcePluginsData = { + body?: never + path: { + dataset_id: string + } + query?: { + is_published?: string + } + url: '/datasets/{dataset_id}/pipeline/datasource-plugins' +} + +export type GetDatasetsByDatasetIdPipelineDatasourcePluginsErrors = { + 401: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdPipelineDatasourcePluginsError + = GetDatasetsByDatasetIdPipelineDatasourcePluginsErrors[keyof GetDatasetsByDatasetIdPipelineDatasourcePluginsErrors] + +export type GetDatasetsByDatasetIdPipelineDatasourcePluginsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdPipelineDatasourcePluginsResponse + = GetDatasetsByDatasetIdPipelineDatasourcePluginsResponses[keyof GetDatasetsByDatasetIdPipelineDatasourcePluginsResponses] + +export type PostDatasetsByDatasetIdPipelineDatasourceNodesByNodeIdRunData = { + body?: never + path: { + dataset_id: string + node_id: string + } + query?: never + url: '/datasets/{dataset_id}/pipeline/datasource/nodes/{node_id}/run' +} + +export type PostDatasetsByDatasetIdPipelineDatasourceNodesByNodeIdRunErrors = { + 401: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdPipelineDatasourceNodesByNodeIdRunError + = PostDatasetsByDatasetIdPipelineDatasourceNodesByNodeIdRunErrors[keyof PostDatasetsByDatasetIdPipelineDatasourceNodesByNodeIdRunErrors] + +export type PostDatasetsByDatasetIdPipelineDatasourceNodesByNodeIdRunResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdPipelineDatasourceNodesByNodeIdRunResponse + = PostDatasetsByDatasetIdPipelineDatasourceNodesByNodeIdRunResponses[keyof PostDatasetsByDatasetIdPipelineDatasourceNodesByNodeIdRunResponses] + +export type PostDatasetsByDatasetIdPipelineRunData = { + body?: never + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/pipeline/run' +} + +export type PostDatasetsByDatasetIdPipelineRunErrors = { + 401: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdPipelineRunError + = PostDatasetsByDatasetIdPipelineRunErrors[keyof PostDatasetsByDatasetIdPipelineRunErrors] + +export type PostDatasetsByDatasetIdPipelineRunResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdPipelineRunResponse + = PostDatasetsByDatasetIdPipelineRunResponses[keyof PostDatasetsByDatasetIdPipelineRunResponses] + +export type PostDatasetsByDatasetIdRetrieveData = { + body: HitTestingPayload + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/retrieve' +} + +export type PostDatasetsByDatasetIdRetrieveErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdRetrieveError + = PostDatasetsByDatasetIdRetrieveErrors[keyof PostDatasetsByDatasetIdRetrieveErrors] + +export type PostDatasetsByDatasetIdRetrieveResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostDatasetsByDatasetIdRetrieveResponse + = PostDatasetsByDatasetIdRetrieveResponses[keyof PostDatasetsByDatasetIdRetrieveResponses] + +export type GetDatasetsByDatasetIdTagsData = { + body?: never + path: { + dataset_id: string + } + query?: never + url: '/datasets/{dataset_id}/tags' +} + +export type GetDatasetsByDatasetIdTagsErrors = { + 401: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdTagsError + = GetDatasetsByDatasetIdTagsErrors[keyof GetDatasetsByDatasetIdTagsErrors] + +export type GetDatasetsByDatasetIdTagsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetDatasetsByDatasetIdTagsResponse + = GetDatasetsByDatasetIdTagsResponses[keyof GetDatasetsByDatasetIdTagsResponses] + +export type GetEndUsersByEndUserIdData = { + body?: never + path: { + end_user_id: string + } + query?: never + url: '/end-users/{end_user_id}' +} + +export type GetEndUsersByEndUserIdErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type GetEndUsersByEndUserIdError + = GetEndUsersByEndUserIdErrors[keyof GetEndUsersByEndUserIdErrors] + +export type GetEndUsersByEndUserIdResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetEndUsersByEndUserIdResponse + = GetEndUsersByEndUserIdResponses[keyof GetEndUsersByEndUserIdResponses] + +export type PostFilesUploadData = { + body?: never + path?: never + query?: never + url: '/files/upload' +} + +export type PostFilesUploadErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 413: { + [key: string]: unknown + } + 415: { + [key: string]: unknown + } +} + +export type PostFilesUploadError = PostFilesUploadErrors[keyof PostFilesUploadErrors] + +export type PostFilesUploadResponses = { + 201: FileResponse +} + +export type PostFilesUploadResponse = PostFilesUploadResponses[keyof PostFilesUploadResponses] + +export type GetFilesByFileIdPreviewData = { + body?: never + path: { + file_id: string + } + query?: { + as_attachment?: boolean + } + url: '/files/{file_id}/preview' +} + +export type GetFilesByFileIdPreviewErrors = { + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type GetFilesByFileIdPreviewError + = GetFilesByFileIdPreviewErrors[keyof GetFilesByFileIdPreviewErrors] + +export type GetFilesByFileIdPreviewResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetFilesByFileIdPreviewResponse + = GetFilesByFileIdPreviewResponses[keyof GetFilesByFileIdPreviewResponses] + +export type GetFormHumanInputByFormTokenData = { + body?: never + path: { + form_token: string + } + query?: never + url: '/form/human_input/{form_token}' +} + +export type GetFormHumanInputByFormTokenErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + 412: { + [key: string]: unknown + } +} + +export type GetFormHumanInputByFormTokenError + = GetFormHumanInputByFormTokenErrors[keyof GetFormHumanInputByFormTokenErrors] + +export type GetFormHumanInputByFormTokenResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetFormHumanInputByFormTokenResponse + = GetFormHumanInputByFormTokenResponses[keyof GetFormHumanInputByFormTokenResponses] + +export type PostFormHumanInputByFormTokenData = { + body: HumanInputFormSubmitPayload + path: { + form_token: string + } + query?: never + url: '/form/human_input/{form_token}' +} + +export type PostFormHumanInputByFormTokenErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + 412: { + [key: string]: unknown + } +} + +export type PostFormHumanInputByFormTokenError + = PostFormHumanInputByFormTokenErrors[keyof PostFormHumanInputByFormTokenErrors] + +export type PostFormHumanInputByFormTokenResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostFormHumanInputByFormTokenResponse + = PostFormHumanInputByFormTokenResponses[keyof PostFormHumanInputByFormTokenResponses] + +export type GetInfoData = { + body?: never + path?: never + query?: never + url: '/info' +} + +export type GetInfoErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type GetInfoError = GetInfoErrors[keyof GetInfoErrors] + +export type GetInfoResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetInfoResponse = GetInfoResponses[keyof GetInfoResponses] + +export type GetMessagesData = { + body?: never + path?: never + query: { + conversation_id: string + first_id?: string | null + limit?: number + } + url: '/messages' +} + +export type GetMessagesErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type GetMessagesError = GetMessagesErrors[keyof GetMessagesErrors] + +export type GetMessagesResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetMessagesResponse = GetMessagesResponses[keyof GetMessagesResponses] + +export type PostMessagesByMessageIdFeedbacksData = { + body: MessageFeedbackPayload + path: { + message_id: string + } + query?: never + url: '/messages/{message_id}/feedbacks' +} + +export type PostMessagesByMessageIdFeedbacksErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostMessagesByMessageIdFeedbacksError + = PostMessagesByMessageIdFeedbacksErrors[keyof PostMessagesByMessageIdFeedbacksErrors] + +export type PostMessagesByMessageIdFeedbacksResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostMessagesByMessageIdFeedbacksResponse + = PostMessagesByMessageIdFeedbacksResponses[keyof PostMessagesByMessageIdFeedbacksResponses] + +export type GetMessagesByMessageIdSuggestedData = { + body?: never + path: { + message_id: string + } + query?: never + url: '/messages/{message_id}/suggested' +} + +export type GetMessagesByMessageIdSuggestedErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type GetMessagesByMessageIdSuggestedError + = GetMessagesByMessageIdSuggestedErrors[keyof GetMessagesByMessageIdSuggestedErrors] + +export type GetMessagesByMessageIdSuggestedResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetMessagesByMessageIdSuggestedResponse + = GetMessagesByMessageIdSuggestedResponses[keyof GetMessagesByMessageIdSuggestedResponses] + +export type GetMetaData = { + body?: never + path?: never + query?: never + url: '/meta' +} + +export type GetMetaErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type GetMetaError = GetMetaErrors[keyof GetMetaErrors] + +export type GetMetaResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetMetaResponse = GetMetaResponses[keyof GetMetaResponses] + +export type GetParametersData = { + body?: never + path?: never + query?: never + url: '/parameters' +} + +export type GetParametersErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type GetParametersError = GetParametersErrors[keyof GetParametersErrors] + +export type GetParametersResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetParametersResponse = GetParametersResponses[keyof GetParametersResponses] + +export type GetSiteData = { + body?: never + path?: never + query?: never + url: '/site' +} + +export type GetSiteErrors = { + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } +} + +export type GetSiteError = GetSiteErrors[keyof GetSiteErrors] + +export type GetSiteResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetSiteResponse = GetSiteResponses[keyof GetSiteResponses] + +export type PostTextToAudioData = { + body: TextToAudioPayload + path?: never + query?: never + url: '/text-to-audio' +} + +export type PostTextToAudioErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type PostTextToAudioError = PostTextToAudioErrors[keyof PostTextToAudioErrors] + +export type PostTextToAudioResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostTextToAudioResponse = PostTextToAudioResponses[keyof PostTextToAudioResponses] + +export type GetWorkflowByTaskIdEventsData = { + body?: never + path: { + task_id: string + } + query?: { + user?: string + include_state_snapshot?: string + continue_on_pause?: string + } + url: '/workflow/{task_id}/events' +} + +export type GetWorkflowByTaskIdEventsErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type GetWorkflowByTaskIdEventsError + = GetWorkflowByTaskIdEventsErrors[keyof GetWorkflowByTaskIdEventsErrors] + +export type GetWorkflowByTaskIdEventsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkflowByTaskIdEventsResponse + = GetWorkflowByTaskIdEventsResponses[keyof GetWorkflowByTaskIdEventsResponses] + +export type GetWorkflowsLogsData = { + body?: never + path?: never + query?: { + created_at__after?: string | null + created_at__before?: string | null + created_by_account?: string | null + created_by_end_user_session_id?: string | null + keyword?: string | null + limit?: number + page?: number + status?: 'succeeded' | 'failed' | 'stopped' | null + } + url: '/workflows/logs' +} + +export type GetWorkflowsLogsErrors = { + 401: { + [key: string]: unknown + } +} + +export type GetWorkflowsLogsError = GetWorkflowsLogsErrors[keyof GetWorkflowsLogsErrors] + +export type GetWorkflowsLogsResponses = { + 200: WorkflowAppLogPaginationResponse +} + +export type GetWorkflowsLogsResponse = GetWorkflowsLogsResponses[keyof GetWorkflowsLogsResponses] + +export type PostWorkflowsRunData = { + body: WorkflowRunPayload + path?: never + query?: never + url: '/workflows/run' +} + +export type PostWorkflowsRunErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + 429: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type PostWorkflowsRunError = PostWorkflowsRunErrors[keyof PostWorkflowsRunErrors] + +export type PostWorkflowsRunResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkflowsRunResponse = PostWorkflowsRunResponses[keyof PostWorkflowsRunResponses] + +export type GetWorkflowsRunByWorkflowRunIdData = { + body?: never + path: { + workflow_run_id: string + } + query?: never + url: '/workflows/run/{workflow_run_id}' +} + +export type GetWorkflowsRunByWorkflowRunIdErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type GetWorkflowsRunByWorkflowRunIdError + = GetWorkflowsRunByWorkflowRunIdErrors[keyof GetWorkflowsRunByWorkflowRunIdErrors] + +export type GetWorkflowsRunByWorkflowRunIdResponses = { + 200: WorkflowRunResponse +} + +export type GetWorkflowsRunByWorkflowRunIdResponse + = GetWorkflowsRunByWorkflowRunIdResponses[keyof GetWorkflowsRunByWorkflowRunIdResponses] + +export type PostWorkflowsTasksByTaskIdStopData = { + body?: never + path: { + task_id: string + } + query?: never + url: '/workflows/tasks/{task_id}/stop' +} + +export type PostWorkflowsTasksByTaskIdStopErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostWorkflowsTasksByTaskIdStopError + = PostWorkflowsTasksByTaskIdStopErrors[keyof PostWorkflowsTasksByTaskIdStopErrors] + +export type PostWorkflowsTasksByTaskIdStopResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkflowsTasksByTaskIdStopResponse + = PostWorkflowsTasksByTaskIdStopResponses[keyof PostWorkflowsTasksByTaskIdStopResponses] + +export type PostWorkflowsByWorkflowIdRunData = { + body: WorkflowRunPayload + path: { + workflow_id: string + } + query?: never + url: '/workflows/{workflow_id}/run' +} + +export type PostWorkflowsByWorkflowIdRunErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + 429: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type PostWorkflowsByWorkflowIdRunError + = PostWorkflowsByWorkflowIdRunErrors[keyof PostWorkflowsByWorkflowIdRunErrors] + +export type PostWorkflowsByWorkflowIdRunResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkflowsByWorkflowIdRunResponse + = PostWorkflowsByWorkflowIdRunResponses[keyof PostWorkflowsByWorkflowIdRunResponses] + +export type GetWorkspacesCurrentModelsModelTypesByModelTypeData = { + body?: never + path: { + model_type: string + } + query?: never + url: '/workspaces/current/models/model-types/{model_type}' +} + +export type GetWorkspacesCurrentModelsModelTypesByModelTypeErrors = { + 401: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentModelsModelTypesByModelTypeError + = GetWorkspacesCurrentModelsModelTypesByModelTypeErrors[keyof GetWorkspacesCurrentModelsModelTypesByModelTypeErrors] + +export type GetWorkspacesCurrentModelsModelTypesByModelTypeResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkspacesCurrentModelsModelTypesByModelTypeResponse + = GetWorkspacesCurrentModelsModelTypesByModelTypeResponses[keyof GetWorkspacesCurrentModelsModelTypesByModelTypeResponses] diff --git a/packages/contracts/generated/api/service/zod.gen.ts b/packages/contracts/generated/api/service/zod.gen.ts new file mode 100644 index 0000000000..6feacbdead --- /dev/null +++ b/packages/contracts/generated/api/service/zod.gen.ts @@ -0,0 +1,1599 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * Annotation + */ +export const zAnnotation = z.object({ + content: z.string().nullish(), + created_at: z.int().nullish(), + hit_count: z.int().nullish(), + id: z.string(), + question: z.string().nullish(), +}) + +/** + * AnnotationCreatePayload + */ +export const zAnnotationCreatePayload = z.object({ + answer: z.string(), + question: z.string(), +}) + +/** + * AnnotationList + */ +export const zAnnotationList = z.object({ + data: z.array(zAnnotation), + has_more: z.boolean(), + limit: z.int(), + page: z.int(), + total: z.int(), +}) + +/** + * AnnotationReplyActionPayload + */ +export const zAnnotationReplyActionPayload = z.object({ + embedding_model_name: z.string(), + embedding_provider_name: z.string(), + score_threshold: z.number(), +}) + +/** + * ChatRequestPayload + */ +export const zChatRequestPayload = z.object({ + auto_generate_name: z.boolean().optional().default(true), + conversation_id: z.string().nullish(), + files: z.array(z.record(z.string(), z.unknown())).nullish(), + inputs: z.record(z.string(), z.unknown()), + query: z.string(), + response_mode: z.enum(['blocking', 'streaming']).nullish(), + retriever_from: z.string().optional().default('dev'), + workflow_id: z.string().nullish(), +}) + +/** + * ChildChunkCreatePayload + */ +export const zChildChunkCreatePayload = z.object({ + content: z.string(), +}) + +/** + * ChildChunkListQuery + */ +export const zChildChunkListQuery = z.object({ + keyword: z.string().nullish(), + limit: z.int().gte(1).optional().default(20), + page: z.int().gte(1).optional().default(1), +}) + +/** + * ChildChunkUpdatePayload + */ +export const zChildChunkUpdatePayload = z.object({ + content: z.string(), +}) + +/** + * CompletionRequestPayload + */ +export const zCompletionRequestPayload = z.object({ + files: z.array(z.record(z.string(), z.unknown())).nullish(), + inputs: z.record(z.string(), z.unknown()), + query: z.string().optional().default(''), + response_mode: z.enum(['blocking', 'streaming']).nullish(), + retriever_from: z.string().optional().default('dev'), +}) + +/** + * ConversationListQuery + */ +export const zConversationListQuery = z.object({ + last_id: z.string().nullish(), + limit: z.int().gte(1).lte(100).optional().default(20), + sort_by: z + .enum(['created_at', '-created_at', 'updated_at', '-updated_at']) + .optional() + .default('-updated_at'), +}) + +/** + * ConversationRenamePayload + */ +export const zConversationRenamePayload = z.object({ + auto_generate: z.boolean().optional().default(false), + name: z.string().nullish(), +}) + +/** + * ConversationVariableResponse + */ +export const zConversationVariableResponse = z.object({ + created_at: z.int().nullish(), + description: z.string().nullish(), + id: z.string(), + name: z.string(), + updated_at: z.int().nullish(), + value: z.string().nullish(), + value_type: z.string(), +}) + +/** + * ConversationVariableInfiniteScrollPaginationResponse + */ +export const zConversationVariableInfiniteScrollPaginationResponse = z.object({ + data: z.array(zConversationVariableResponse), + has_more: z.boolean(), + limit: z.int(), +}) + +/** + * ConversationVariableUpdatePayload + */ +export const zConversationVariableUpdatePayload = z.object({ + value: z.unknown(), +}) + +/** + * ConversationVariablesQuery + */ +export const zConversationVariablesQuery = z.object({ + last_id: z.string().nullish(), + limit: z.int().gte(1).lte(100).optional().default(20), + variable_name: z.string().min(1).max(255).nullish(), +}) + +/** + * DocumentBatchDownloadZipPayload + * + * Request payload for bulk downloading documents as a zip archive. + */ +export const zDocumentBatchDownloadZipPayload = z.object({ + document_ids: z.array(z.uuid()).min(1).max(100), +}) + +/** + * FeedbackListQuery + */ +export const zFeedbackListQuery = z.object({ + limit: z.int().gte(1).lte(101).optional().default(20), + page: z.int().gte(1).optional().default(1), +}) + +/** + * FilePreviewQuery + */ +export const zFilePreviewQuery = z.object({ + as_attachment: z.boolean().optional().default(false), +}) + +/** + * FileResponse + */ +export const zFileResponse = z.object({ + conversation_id: z.string().nullish(), + created_at: z.int().nullish(), + created_by: z.string().nullish(), + extension: z.string().nullish(), + file_key: z.string().nullish(), + id: z.string(), + mime_type: z.string().nullish(), + name: z.string(), + original_url: z.string().nullish(), + preview_url: z.string().nullish(), + size: z.int(), + source_url: z.string().nullish(), + tenant_id: z.string().nullish(), + user_id: z.string().nullish(), +}) + +/** + * MessageFeedbackPayload + */ +export const zMessageFeedbackPayload = z.object({ + content: z.string().nullish(), + rating: z.enum(['like', 'dislike']).nullish(), +}) + +/** + * MessageListQuery + */ +export const zMessageListQuery = z.object({ + conversation_id: z.string(), + first_id: z.string().nullish(), + limit: z.int().gte(1).lte(100).optional().default(20), +}) + +/** + * MetadataArgs + */ +export const zMetadataArgs = z.object({ + name: z.string(), + type: z.enum(['string', 'number', 'time']), +}) + +/** + * MetadataUpdatePayload + */ +export const zMetadataUpdatePayload = z.object({ + name: z.string(), +}) + +/** + * SegmentCreatePayload + */ +export const zSegmentCreatePayload = z.object({ + segments: z.array(z.record(z.string(), z.unknown())).nullish(), +}) + +/** + * SegmentListQuery + */ +export const zSegmentListQuery = z.object({ + keyword: z.string().nullish(), + status: z.array(z.string()).optional(), +}) + +/** + * TagBindingPayload + */ +export const zTagBindingPayload = z.object({ + tag_ids: z.array(z.string()), + target_id: z.string(), +}) + +/** + * TagCreatePayload + */ +export const zTagCreatePayload = z.object({ + name: z.string().min(1).max(50), +}) + +/** + * TagDeletePayload + */ +export const zTagDeletePayload = z.object({ + tag_id: z.string(), +}) + +/** + * TagUnbindingPayload + */ +export const zTagUnbindingPayload = z.object({ + tag_id: z.string(), + target_id: z.string(), +}) + +/** + * TagUpdatePayload + */ +export const zTagUpdatePayload = z.object({ + name: z.string().min(1).max(50), + tag_id: z.string(), +}) + +/** + * TextToAudioPayload + */ +export const zTextToAudioPayload = z.object({ + message_id: z.string().nullish(), + streaming: z.boolean().nullish(), + text: z.string().nullish(), + voice: z.string().nullish(), +}) + +/** + * WorkflowLogQuery + */ +export const zWorkflowLogQuery = z.object({ + created_at__after: z.string().nullish(), + created_at__before: z.string().nullish(), + created_by_account: z.string().nullish(), + created_by_end_user_session_id: z.string().nullish(), + keyword: z.string().nullish(), + limit: z.int().gte(1).lte(100).optional().default(20), + page: z.int().gte(1).lte(99999).optional().default(1), + status: z.enum(['succeeded', 'failed', 'stopped']).nullish(), +}) + +/** + * WorkflowRunPayload + */ +export const zWorkflowRunPayload = z.object({ + files: z.array(z.record(z.string(), z.unknown())).nullish(), + inputs: z.record(z.string(), z.unknown()), + response_mode: z.enum(['blocking', 'streaming']).nullish(), +}) + +/** + * WorkflowRunResponse + */ +export const zWorkflowRunResponse = z.object({ + created_at: z.int().nullish(), + elapsed_time: z.unknown().optional(), + error: z.string().nullish(), + finished_at: z.int().nullish(), + id: z.string(), + inputs: z.unknown().optional(), + outputs: z.record(z.string(), z.unknown()).optional(), + status: z.string(), + total_steps: z.int().nullish(), + total_tokens: z.int().nullish(), + workflow_id: z.string(), +}) + +/** + * Condition + * + * Condition detail + */ +export const zCondition = z.object({ + comparison_operator: z.enum([ + 'contains', + 'not contains', + 'start with', + 'end with', + 'is', + 'is not', + 'empty', + 'not empty', + 'in', + 'not in', + '=', + '≠', + '>', + '<', + '≥', + '≤', + 'before', + 'after', + ]), + name: z.string(), + value: z.unknown().optional(), +}) + +/** + * DatasetPermissionEnum + */ +export const zDatasetPermissionEnum = z.enum(['only_me', 'all_team_members', 'partial_members']) + +/** + * MetadataFilteringCondition + * + * Metadata Filtering Condition. + */ +export const zMetadataFilteringCondition = z.object({ + conditions: z.array(zCondition).nullish(), + logical_operator: z.enum(['and', 'or']).nullish().default('and'), +}) + +/** + * RerankingModel + */ +export const zRerankingModel = z.object({ + reranking_model_name: z.string().nullish(), + reranking_provider_name: z.string().nullish(), +}) + +/** + * RetrievalMethod + */ +export const zRetrievalMethod = z.enum([ + 'semantic_search', + 'full_text_search', + 'hybrid_search', + 'keyword_search', +]) + +/** + * WeightKeywordSetting + */ +export const zWeightKeywordSetting = z.object({ + keyword_weight: z.number(), +}) + +/** + * WeightVectorSetting + */ +export const zWeightVectorSetting = z.object({ + embedding_model_name: z.string(), + embedding_provider_name: z.string(), + vector_weight: z.number(), +}) + +/** + * WeightModel + */ +export const zWeightModel = z.object({ + keyword_setting: zWeightKeywordSetting.optional(), + vector_setting: zWeightVectorSetting.optional(), + weight_type: z.enum(['semantic_first', 'keyword_first', 'customized']).nullish(), +}) + +/** + * RetrievalModel + */ +export const zRetrievalModel = z.object({ + metadata_filtering_conditions: zMetadataFilteringCondition.optional(), + reranking_enable: z.boolean(), + reranking_mode: z.string().nullish(), + reranking_model: zRerankingModel.optional(), + score_threshold: z.number().nullish(), + score_threshold_enabled: z.boolean(), + search_method: zRetrievalMethod, + top_k: z.int(), + weights: zWeightModel.optional(), +}) + +/** + * DatasetCreatePayload + */ +export const zDatasetCreatePayload = z.object({ + description: z.string().max(400).optional().default(''), + embedding_model: z.string().nullish(), + embedding_model_provider: z.string().nullish(), + external_knowledge_api_id: z.string().nullish(), + external_knowledge_id: z.string().nullish(), + indexing_technique: z.enum(['high_quality', 'economy']).nullish(), + name: z.string().min(1).max(40), + permission: zDatasetPermissionEnum.optional(), + provider: z.string().optional().default('vendor'), + retrieval_model: zRetrievalModel.optional(), + summary_index_setting: z.record(z.string(), z.unknown()).nullish(), +}) + +/** + * DatasetUpdatePayload + */ +export const zDatasetUpdatePayload = z.object({ + description: z.string().max(400).nullish(), + embedding_model: z.string().nullish(), + embedding_model_provider: z.string().nullish(), + external_knowledge_api_id: z.string().nullish(), + external_knowledge_id: z.string().nullish(), + external_retrieval_model: z.record(z.string(), z.unknown()).nullish(), + indexing_technique: z.enum(['high_quality', 'economy']).nullish(), + name: z.string().min(1).max(40).nullish(), + partial_member_list: z.array(z.record(z.string(), z.string())).nullish(), + permission: zDatasetPermissionEnum.optional(), + retrieval_model: zRetrievalModel.optional(), +}) + +/** + * HitTestingPayload + */ +export const zHitTestingPayload = z.object({ + attachment_ids: z.array(z.string()).nullish(), + external_retrieval_model: z.record(z.string(), z.unknown()).nullish(), + query: z.string().max(250), + retrieval_model: zRetrievalModel.optional(), +}) + +/** + * PreProcessingRule + */ +export const zPreProcessingRule = z.object({ + enabled: z.boolean(), + id: z.string(), +}) + +/** + * Segmentation + */ +export const zSegmentation = z.object({ + chunk_overlap: z.int().optional().default(0), + max_tokens: z.int(), + separator: z.string().optional().default('\n'), +}) + +/** + * Rule + */ +export const zRule = z.object({ + parent_mode: z.enum(['full-doc', 'paragraph']).nullish(), + pre_processing_rules: z.array(zPreProcessingRule).nullish(), + segmentation: zSegmentation.optional(), + subchunk_segmentation: zSegmentation.optional(), +}) + +/** + * ProcessRule + */ +export const zProcessRule = z.object({ + mode: z.enum(['automatic', 'custom', 'hierarchical']), + rules: zRule.optional(), +}) + +/** + * DocumentTextCreatePayload + */ +export const zDocumentTextCreatePayload = z.object({ + doc_form: z.string().optional().default('text_model'), + doc_language: z.string().optional().default('English'), + embedding_model: z.string().nullish(), + embedding_model_provider: z.string().nullish(), + indexing_technique: z.string().nullish(), + name: z.string(), + original_document_id: z.string().nullish(), + process_rule: zProcessRule.optional(), + retrieval_model: zRetrievalModel.optional(), + text: z.string(), +}) + +/** + * DocumentTextUpdate + */ +export const zDocumentTextUpdate = z.object({ + doc_form: z.string().optional().default('text_model'), + doc_language: z.string().optional().default('English'), + name: z.string().nullish(), + process_rule: zProcessRule.optional(), + retrieval_model: zRetrievalModel.optional(), + text: z.string().nullish(), +}) + +export const zJsonValue = z.unknown() + +/** + * HumanInputFormSubmitPayload + */ +export const zHumanInputFormSubmitPayload = z.object({ + action: z.string(), + inputs: z.record(z.string(), zJsonValue), +}) + +/** + * MetadataDetail + */ +export const zMetadataDetail = z.object({ + id: z.string(), + name: z.string(), + value: z.unknown().optional(), +}) + +/** + * DocumentMetadataOperation + */ +export const zDocumentMetadataOperation = z.object({ + document_id: z.string(), + metadata_list: z.array(zMetadataDetail), + partial_update: z.boolean().optional().default(false), +}) + +/** + * MetadataOperationData + * + * Metadata operation data + */ +export const zMetadataOperationData = z.object({ + operation_data: z.array(zDocumentMetadataOperation), +}) + +/** + * SegmentUpdateArgs + */ +export const zSegmentUpdateArgs = z.object({ + answer: z.string().nullish(), + attachment_ids: z.array(z.string()).nullish(), + content: z.string().nullish(), + enabled: z.boolean().nullish(), + keywords: z.array(z.string()).nullish(), + regenerate_child_chunks: z.boolean().optional().default(false), + summary: z.string().nullish(), +}) + +/** + * SegmentUpdatePayload + */ +export const zSegmentUpdatePayload = z.object({ + segment: zSegmentUpdateArgs, +}) + +/** + * SimpleAccount + */ +export const zSimpleAccount = z.object({ + email: z.string(), + id: z.string(), + name: z.string(), +}) + +/** + * SimpleEndUser + */ +export const zSimpleEndUser = z.object({ + id: z.string(), + is_anonymous: z.boolean(), + session_id: z.string().nullish(), + type: z.string(), +}) + +/** + * WorkflowRunForLogResponse + */ +export const zWorkflowRunForLogResponse = z.object({ + created_at: z.int().nullish(), + elapsed_time: z.unknown().optional(), + error: z.string().nullish(), + exceptions_count: z.int().nullish(), + finished_at: z.int().nullish(), + id: z.string(), + status: z.string().nullish(), + total_steps: z.int().nullish(), + total_tokens: z.int().nullish(), + triggered_from: z.string().nullish(), + version: z.string().nullish(), +}) + +/** + * WorkflowAppLogPartialResponse + */ +export const zWorkflowAppLogPartialResponse = z.object({ + created_at: z.int().nullish(), + created_by_account: zSimpleAccount.optional(), + created_by_end_user: zSimpleEndUser.optional(), + created_by_role: z.string().nullish(), + created_from: z.string().nullish(), + details: z.unknown().optional(), + id: z.string(), + workflow_run: zWorkflowRunForLogResponse.optional(), +}) + +/** + * WorkflowAppLogPaginationResponse + */ +export const zWorkflowAppLogPaginationResponse = z.object({ + data: z.array(zWorkflowAppLogPartialResponse), + has_more: z.boolean(), + limit: z.int(), + page: z.int(), + total: z.int(), +}) + +/** + * Success + */ +export const zGetRootResponse = z.record(z.string(), z.unknown()) + +export const zGetAppFeedbacksQuery = z.object({ + limit: z.int().gte(1).lte(101).optional().default(20), + page: z.int().gte(1).optional().default(1), +}) + +/** + * Feedbacks retrieved successfully + */ +export const zGetAppFeedbacksResponse = z.record(z.string(), z.unknown()) + +export const zPostAppsAnnotationReplyByActionBody = zAnnotationReplyActionPayload + +export const zPostAppsAnnotationReplyByActionPath = z.object({ + action: z.string(), +}) + +/** + * Action completed successfully + */ +export const zPostAppsAnnotationReplyByActionResponse = z.record(z.string(), z.unknown()) + +export const zGetAppsAnnotationReplyByActionStatusByJobIdPath = z.object({ + action: z.string(), + job_id: z.string(), +}) + +/** + * Job status retrieved successfully + */ +export const zGetAppsAnnotationReplyByActionStatusByJobIdResponse = z.record( + z.string(), + z.unknown(), +) + +/** + * Annotations retrieved successfully + */ +export const zGetAppsAnnotationsResponse = zAnnotationList + +export const zPostAppsAnnotationsBody = zAnnotationCreatePayload + +/** + * Annotation created successfully + */ +export const zPostAppsAnnotationsResponse = zAnnotation + +export const zDeleteAppsAnnotationsByAnnotationIdPath = z.object({ + annotation_id: z.string(), +}) + +/** + * Annotation deleted successfully + */ +export const zDeleteAppsAnnotationsByAnnotationIdResponse = z.record(z.string(), z.unknown()) + +export const zPutAppsAnnotationsByAnnotationIdBody = zAnnotationCreatePayload + +export const zPutAppsAnnotationsByAnnotationIdPath = z.object({ + annotation_id: z.string(), +}) + +/** + * Annotation updated successfully + */ +export const zPutAppsAnnotationsByAnnotationIdResponse = zAnnotation + +/** + * Audio successfully transcribed + */ +export const zPostAudioToTextResponse = z.record(z.string(), z.unknown()) + +export const zPostChatMessagesBody = zChatRequestPayload + +/** + * Message sent successfully + */ +export const zPostChatMessagesResponse = z.record(z.string(), z.unknown()) + +export const zPostChatMessagesByTaskIdStopPath = z.object({ + task_id: z.string(), +}) + +/** + * Task stopped successfully + */ +export const zPostChatMessagesByTaskIdStopResponse = z.record(z.string(), z.unknown()) + +export const zPostCompletionMessagesBody = zCompletionRequestPayload + +/** + * Completion created successfully + */ +export const zPostCompletionMessagesResponse = z.record(z.string(), z.unknown()) + +export const zPostCompletionMessagesByTaskIdStopPath = z.object({ + task_id: z.string(), +}) + +/** + * Task stopped successfully + */ +export const zPostCompletionMessagesByTaskIdStopResponse = z.record(z.string(), z.unknown()) + +export const zGetConversationsQuery = z.object({ + last_id: z.string().nullish(), + limit: z.int().gte(1).lte(100).optional().default(20), + sort_by: z + .enum(['created_at', '-created_at', 'updated_at', '-updated_at']) + .optional() + .default('-updated_at'), +}) + +/** + * Conversations retrieved successfully + */ +export const zGetConversationsResponse = z.record(z.string(), z.unknown()) + +export const zDeleteConversationsByCIdPath = z.object({ + c_id: z.string(), +}) + +/** + * Conversation deleted successfully + */ +export const zDeleteConversationsByCIdResponse = z.record(z.string(), z.unknown()) + +export const zPostConversationsByCIdNameBody = zConversationRenamePayload + +export const zPostConversationsByCIdNamePath = z.object({ + c_id: z.string(), +}) + +/** + * Conversation renamed successfully + */ +export const zPostConversationsByCIdNameResponse = z.record(z.string(), z.unknown()) + +export const zGetConversationsByCIdVariablesPath = z.object({ + c_id: z.string(), +}) + +export const zGetConversationsByCIdVariablesQuery = z.object({ + last_id: z.string().nullish(), + limit: z.int().gte(1).lte(100).optional().default(20), + variable_name: z.string().min(1).max(255).nullish(), +}) + +/** + * Variables retrieved successfully + */ +export const zGetConversationsByCIdVariablesResponse + = zConversationVariableInfiniteScrollPaginationResponse + +export const zPutConversationsByCIdVariablesByVariableIdBody = zConversationVariableUpdatePayload + +export const zPutConversationsByCIdVariablesByVariableIdPath = z.object({ + c_id: z.string(), + variable_id: z.string(), +}) + +/** + * Variable updated successfully + */ +export const zPutConversationsByCIdVariablesByVariableIdResponse = zConversationVariableResponse + +/** + * Datasets retrieved successfully + */ +export const zGetDatasetsResponse = z.record(z.string(), z.unknown()) + +export const zPostDatasetsBody = zDatasetCreatePayload + +/** + * Dataset created successfully + */ +export const zPostDatasetsResponse = z.record(z.string(), z.unknown()) + +/** + * File uploaded successfully + */ +export const zPostDatasetsPipelineFileUploadResponse = z.record(z.string(), z.unknown()) + +export const zDeleteDatasetsTagsBody = zTagDeletePayload + +/** + * Tag deleted successfully + */ +export const zDeleteDatasetsTagsResponse = z.record(z.string(), z.unknown()) + +/** + * Tags retrieved successfully + */ +export const zGetDatasetsTagsResponse = z.record(z.string(), z.unknown()) + +export const zPatchDatasetsTagsBody = zTagUpdatePayload + +/** + * Tag updated successfully + */ +export const zPatchDatasetsTagsResponse = z.record(z.string(), z.unknown()) + +export const zPostDatasetsTagsBody = zTagCreatePayload + +/** + * Tag created successfully + */ +export const zPostDatasetsTagsResponse = z.record(z.string(), z.unknown()) + +export const zPostDatasetsTagsBindingBody = zTagBindingPayload + +/** + * Tags bound successfully + */ +export const zPostDatasetsTagsBindingResponse = z.record(z.string(), z.unknown()) + +export const zPostDatasetsTagsUnbindingBody = zTagUnbindingPayload + +/** + * Tag unbound successfully + */ +export const zPostDatasetsTagsUnbindingResponse = z.record(z.string(), z.unknown()) + +export const zDeleteDatasetsByDatasetIdPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Dataset deleted successfully + */ +export const zDeleteDatasetsByDatasetIdResponse = z.record(z.string(), z.unknown()) + +export const zGetDatasetsByDatasetIdPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Dataset retrieved successfully + */ +export const zGetDatasetsByDatasetIdResponse = z.record(z.string(), z.unknown()) + +export const zPatchDatasetsByDatasetIdBody = zDatasetUpdatePayload + +export const zPatchDatasetsByDatasetIdPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Dataset updated successfully + */ +export const zPatchDatasetsByDatasetIdResponse = z.record(z.string(), z.unknown()) + +export const zPostDatasetsByDatasetIdDocumentCreateByFilePath = z.object({ + dataset_id: z.string(), +}) + +/** + * Document created successfully + */ +export const zPostDatasetsByDatasetIdDocumentCreateByFileResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostDatasetsByDatasetIdDocumentCreateByTextBody = zDocumentTextCreatePayload + +export const zPostDatasetsByDatasetIdDocumentCreateByTextPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Document created successfully + */ +export const zPostDatasetsByDatasetIdDocumentCreateByTextResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostDatasetsByDatasetIdDocumentCreateByFile2Path = z.object({ + dataset_id: z.string(), +}) + +/** + * Document created successfully + */ +export const zPostDatasetsByDatasetIdDocumentCreateByFile2Response = z.record( + z.string(), + z.unknown(), +) + +export const zPostDatasetsByDatasetIdDocumentCreateByText2Body = zDocumentTextCreatePayload + +export const zPostDatasetsByDatasetIdDocumentCreateByText2Path = z.object({ + dataset_id: z.string(), +}) + +/** + * Document created successfully + */ +export const zPostDatasetsByDatasetIdDocumentCreateByText2Response = z.record( + z.string(), + z.unknown(), +) + +export const zGetDatasetsByDatasetIdDocumentsPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Documents retrieved successfully + */ +export const zGetDatasetsByDatasetIdDocumentsResponse = z.record(z.string(), z.unknown()) + +export const zPostDatasetsByDatasetIdDocumentsDownloadZipBody = zDocumentBatchDownloadZipPayload + +export const zPostDatasetsByDatasetIdDocumentsDownloadZipPath = z.object({ + dataset_id: z.string(), +}) + +/** + * ZIP archive generated successfully + */ +export const zPostDatasetsByDatasetIdDocumentsDownloadZipResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostDatasetsByDatasetIdDocumentsMetadataBody = zMetadataOperationData + +export const zPostDatasetsByDatasetIdDocumentsMetadataPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Documents metadata updated successfully + */ +export const zPostDatasetsByDatasetIdDocumentsMetadataResponse = z.record(z.string(), z.unknown()) + +export const zPatchDatasetsByDatasetIdDocumentsStatusByActionPath = z.object({ + dataset_id: z.string(), + action: z.string(), +}) + +/** + * Document status updated successfully + */ +export const zPatchDatasetsByDatasetIdDocumentsStatusByActionResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetDatasetsByDatasetIdDocumentsByBatchIndexingStatusPath = z.object({ + dataset_id: z.string(), + batch: z.string(), +}) + +/** + * Indexing status retrieved successfully + */ +export const zGetDatasetsByDatasetIdDocumentsByBatchIndexingStatusResponse = z.record( + z.string(), + z.unknown(), +) + +export const zDeleteDatasetsByDatasetIdDocumentsByDocumentIdPath = z.object({ + dataset_id: z.string(), + document_id: z.string(), +}) + +/** + * Document deleted successfully + */ +export const zDeleteDatasetsByDatasetIdDocumentsByDocumentIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdPath = z.object({ + dataset_id: z.string(), + document_id: z.string(), +}) + +/** + * Document retrieved successfully + */ +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPatchDatasetsByDatasetIdDocumentsByDocumentIdPath = z.object({ + dataset_id: z.string(), + document_id: z.string(), +}) + +/** + * Document updated successfully + */ +export const zPatchDatasetsByDatasetIdDocumentsByDocumentIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdDownloadPath = z.object({ + dataset_id: z.string(), + document_id: z.string(), +}) + +/** + * Download URL generated successfully + */ +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdDownloadResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsPath = z.object({ + dataset_id: z.string(), + document_id: z.string(), +}) + +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsQuery = z.object({ + keyword: z.string().nullish(), + status: z.array(z.string()).optional(), +}) + +/** + * Segments retrieved successfully + */ +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBody = zSegmentCreatePayload + +export const zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsPath = z.object({ + dataset_id: z.string(), + document_id: z.string(), +}) + +/** + * Segments created successfully + */ +export const zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsResponse = z.record( + z.string(), + z.unknown(), +) + +export const zDeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdPath = z.object({ + dataset_id: z.string(), + document_id: z.string(), + segment_id: z.string(), +}) + +/** + * Segment deleted successfully + */ +export const zDeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdPath = z.object({ + segment_id: z.string(), + document_id: z.string(), + dataset_id: z.string(), +}) + +/** + * Segment retrieved successfully + */ +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdBody + = zSegmentUpdatePayload + +export const zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdPath = z.object({ + dataset_id: z.string(), + document_id: z.string(), + segment_id: z.string(), +}) + +/** + * Segment updated successfully + */ +export const zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksPath + = z.object({ + dataset_id: z.string(), + document_id: z.string(), + segment_id: z.string(), + }) + +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksQuery + = z.object({ + keyword: z.string().nullish(), + limit: z.int().gte(1).optional().default(20), + page: z.int().gte(1).optional().default(1), + }) + +/** + * Child chunks retrieved successfully + */ +export const zGetDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponse + = z.record(z.string(), z.unknown()) + +export const zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksBody + = zChildChunkCreatePayload + +export const zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksPath + = z.object({ + dataset_id: z.string(), + document_id: z.string(), + segment_id: z.string(), + }) + +/** + * Child chunk created successfully + */ +export const zPostDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksResponse + = z.record(z.string(), z.unknown()) + +export const zDeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdPath + = z.object({ + dataset_id: z.string(), + document_id: z.string(), + segment_id: z.string(), + child_chunk_id: z.string(), + }) + +/** + * Child chunk deleted successfully + */ +export const zDeleteDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdResponse + = z.record(z.string(), z.unknown()) + +export const zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdBody + = zChildChunkUpdatePayload + +export const zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdPath + = z.object({ + dataset_id: z.string(), + document_id: z.string(), + segment_id: z.string(), + child_chunk_id: z.string(), + }) + +/** + * Child chunk updated successfully + */ +export const zPatchDatasetsByDatasetIdDocumentsByDocumentIdSegmentsBySegmentIdChildChunksByChildChunkIdResponse + = z.record(z.string(), z.unknown()) + +export const zPostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFilePath = z.object({ + dataset_id: z.string(), + document_id: z.string(), +}) + +/** + * Document updated successfully + */ +export const zPostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFileResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByTextBody = zDocumentTextUpdate + +export const zPostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByTextPath = z.object({ + dataset_id: z.string(), + document_id: z.string(), +}) + +/** + * Document updated successfully + */ +export const zPostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByTextResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFile2Path = z.object({ + dataset_id: z.string(), + document_id: z.string(), +}) + +/** + * Document updated successfully + */ +export const zPostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByFile2Response = z.record( + z.string(), + z.unknown(), +) + +export const zPostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByText2Body = zDocumentTextUpdate + +export const zPostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByText2Path = z.object({ + dataset_id: z.string(), + document_id: z.string(), +}) + +/** + * Document updated successfully + */ +export const zPostDatasetsByDatasetIdDocumentsByDocumentIdUpdateByText2Response = z.record( + z.string(), + z.unknown(), +) + +export const zPostDatasetsByDatasetIdHitTestingBody = zHitTestingPayload + +export const zPostDatasetsByDatasetIdHitTestingPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Hit testing results + */ +export const zPostDatasetsByDatasetIdHitTestingResponse = z.record(z.string(), z.unknown()) + +export const zGetDatasetsByDatasetIdMetadataPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Metadata retrieved successfully + */ +export const zGetDatasetsByDatasetIdMetadataResponse = z.record(z.string(), z.unknown()) + +export const zPostDatasetsByDatasetIdMetadataBody = zMetadataArgs + +export const zPostDatasetsByDatasetIdMetadataPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Metadata created successfully + */ +export const zPostDatasetsByDatasetIdMetadataResponse = z.record(z.string(), z.unknown()) + +export const zGetDatasetsByDatasetIdMetadataBuiltInPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Built-in fields retrieved successfully + */ +export const zGetDatasetsByDatasetIdMetadataBuiltInResponse = z.record(z.string(), z.unknown()) + +export const zPostDatasetsByDatasetIdMetadataBuiltInByActionPath = z.object({ + dataset_id: z.string(), + action: z.string(), +}) + +/** + * Action completed successfully + */ +export const zPostDatasetsByDatasetIdMetadataBuiltInByActionResponse = z.record( + z.string(), + z.unknown(), +) + +export const zDeleteDatasetsByDatasetIdMetadataByMetadataIdPath = z.object({ + dataset_id: z.string(), + metadata_id: z.string(), +}) + +/** + * Metadata deleted successfully + */ +export const zDeleteDatasetsByDatasetIdMetadataByMetadataIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPatchDatasetsByDatasetIdMetadataByMetadataIdBody = zMetadataUpdatePayload + +export const zPatchDatasetsByDatasetIdMetadataByMetadataIdPath = z.object({ + dataset_id: z.string(), + metadata_id: z.string(), +}) + +/** + * Metadata updated successfully + */ +export const zPatchDatasetsByDatasetIdMetadataByMetadataIdResponse = z.record( + z.string(), + z.unknown(), +) + +export const zGetDatasetsByDatasetIdPipelineDatasourcePluginsPath = z.object({ + dataset_id: z.string(), +}) + +export const zGetDatasetsByDatasetIdPipelineDatasourcePluginsQuery = z.object({ + is_published: z.string().optional(), +}) + +/** + * Datasource plugins retrieved successfully + */ +export const zGetDatasetsByDatasetIdPipelineDatasourcePluginsResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostDatasetsByDatasetIdPipelineDatasourceNodesByNodeIdRunPath = z.object({ + dataset_id: z.string(), + node_id: z.string(), +}) + +/** + * Datasource node run successfully + */ +export const zPostDatasetsByDatasetIdPipelineDatasourceNodesByNodeIdRunResponse = z.record( + z.string(), + z.unknown(), +) + +export const zPostDatasetsByDatasetIdPipelineRunPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Pipeline run successfully + */ +export const zPostDatasetsByDatasetIdPipelineRunResponse = z.record(z.string(), z.unknown()) + +export const zPostDatasetsByDatasetIdRetrieveBody = zHitTestingPayload + +export const zPostDatasetsByDatasetIdRetrievePath = z.object({ + dataset_id: z.string(), +}) + +/** + * Hit testing results + */ +export const zPostDatasetsByDatasetIdRetrieveResponse = z.record(z.string(), z.unknown()) + +export const zGetDatasetsByDatasetIdTagsPath = z.object({ + dataset_id: z.string(), +}) + +/** + * Tags retrieved successfully + */ +export const zGetDatasetsByDatasetIdTagsResponse = z.record(z.string(), z.unknown()) + +export const zGetEndUsersByEndUserIdPath = z.object({ + end_user_id: z.string(), +}) + +/** + * End user retrieved successfully + */ +export const zGetEndUsersByEndUserIdResponse = z.record(z.string(), z.unknown()) + +/** + * File uploaded successfully + */ +export const zPostFilesUploadResponse = zFileResponse + +export const zGetFilesByFileIdPreviewPath = z.object({ + file_id: z.string(), +}) + +export const zGetFilesByFileIdPreviewQuery = z.object({ + as_attachment: z.boolean().optional().default(false), +}) + +/** + * File retrieved successfully + */ +export const zGetFilesByFileIdPreviewResponse = z.record(z.string(), z.unknown()) + +export const zGetFormHumanInputByFormTokenPath = z.object({ + form_token: z.string(), +}) + +/** + * Form retrieved successfully + */ +export const zGetFormHumanInputByFormTokenResponse = z.record(z.string(), z.unknown()) + +export const zPostFormHumanInputByFormTokenBody = zHumanInputFormSubmitPayload + +export const zPostFormHumanInputByFormTokenPath = z.object({ + form_token: z.string(), +}) + +/** + * Form submitted successfully + */ +export const zPostFormHumanInputByFormTokenResponse = z.record(z.string(), z.unknown()) + +/** + * Application info retrieved successfully + */ +export const zGetInfoResponse = z.record(z.string(), z.unknown()) + +export const zGetMessagesQuery = z.object({ + conversation_id: z.string(), + first_id: z.string().nullish(), + limit: z.int().gte(1).lte(100).optional().default(20), +}) + +/** + * Messages retrieved successfully + */ +export const zGetMessagesResponse = z.record(z.string(), z.unknown()) + +export const zPostMessagesByMessageIdFeedbacksBody = zMessageFeedbackPayload + +export const zPostMessagesByMessageIdFeedbacksPath = z.object({ + message_id: z.string(), +}) + +/** + * Feedback submitted successfully + */ +export const zPostMessagesByMessageIdFeedbacksResponse = z.record(z.string(), z.unknown()) + +export const zGetMessagesByMessageIdSuggestedPath = z.object({ + message_id: z.string(), +}) + +/** + * Suggested questions retrieved successfully + */ +export const zGetMessagesByMessageIdSuggestedResponse = z.record(z.string(), z.unknown()) + +/** + * Metadata retrieved successfully + */ +export const zGetMetaResponse = z.record(z.string(), z.unknown()) + +/** + * Parameters retrieved successfully + */ +export const zGetParametersResponse = z.record(z.string(), z.unknown()) + +/** + * Site configuration retrieved successfully + */ +export const zGetSiteResponse = z.record(z.string(), z.unknown()) + +export const zPostTextToAudioBody = zTextToAudioPayload + +/** + * Text successfully converted to audio + */ +export const zPostTextToAudioResponse = z.record(z.string(), z.unknown()) + +export const zGetWorkflowByTaskIdEventsPath = z.object({ + task_id: z.string(), +}) + +export const zGetWorkflowByTaskIdEventsQuery = z.object({ + user: z.string().optional(), + include_state_snapshot: z.string().optional(), + continue_on_pause: z.string().optional(), +}) + +/** + * SSE event stream + */ +export const zGetWorkflowByTaskIdEventsResponse = z.record(z.string(), z.unknown()) + +export const zGetWorkflowsLogsQuery = z.object({ + created_at__after: z.string().nullish(), + created_at__before: z.string().nullish(), + created_by_account: z.string().nullish(), + created_by_end_user_session_id: z.string().nullish(), + keyword: z.string().nullish(), + limit: z.int().gte(1).lte(100).optional().default(20), + page: z.int().gte(1).lte(99999).optional().default(1), + status: z.enum(['succeeded', 'failed', 'stopped']).nullish(), +}) + +/** + * Logs retrieved successfully + */ +export const zGetWorkflowsLogsResponse = zWorkflowAppLogPaginationResponse + +export const zPostWorkflowsRunBody = zWorkflowRunPayload + +/** + * Workflow executed successfully + */ +export const zPostWorkflowsRunResponse = z.record(z.string(), z.unknown()) + +export const zGetWorkflowsRunByWorkflowRunIdPath = z.object({ + workflow_run_id: z.string(), +}) + +/** + * Workflow run details retrieved successfully + */ +export const zGetWorkflowsRunByWorkflowRunIdResponse = zWorkflowRunResponse + +export const zPostWorkflowsTasksByTaskIdStopPath = z.object({ + task_id: z.string(), +}) + +/** + * Task stopped successfully + */ +export const zPostWorkflowsTasksByTaskIdStopResponse = z.record(z.string(), z.unknown()) + +export const zPostWorkflowsByWorkflowIdRunBody = zWorkflowRunPayload + +export const zPostWorkflowsByWorkflowIdRunPath = z.object({ + workflow_id: z.string(), +}) + +/** + * Workflow executed successfully + */ +export const zPostWorkflowsByWorkflowIdRunResponse = z.record(z.string(), z.unknown()) + +export const zGetWorkspacesCurrentModelsModelTypesByModelTypePath = z.object({ + model_type: z.string(), +}) + +/** + * Models retrieved successfully + */ +export const zGetWorkspacesCurrentModelsModelTypesByModelTypeResponse = z.record( + z.string(), + z.unknown(), +) diff --git a/packages/contracts/generated/api/web/orpc.gen.ts b/packages/contracts/generated/api/web/orpc.gen.ts new file mode 100644 index 0000000000..459d556145 --- /dev/null +++ b/packages/contracts/generated/api/web/orpc.gen.ts @@ -0,0 +1,1085 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { + zDeleteConversationsByCIdPath, + zDeleteConversationsByCIdResponse, + zDeleteSavedMessagesByMessageIdPath, + zDeleteSavedMessagesByMessageIdResponse, + zGetConversationsQuery, + zGetConversationsResponse, + zGetFormHumanInputByFormTokenPath, + zGetFormHumanInputByFormTokenResponse, + zGetLoginStatusResponse, + zGetMessagesByMessageIdMoreLikeThisPath, + zGetMessagesByMessageIdMoreLikeThisQuery, + zGetMessagesByMessageIdMoreLikeThisResponse, + zGetMessagesByMessageIdSuggestedQuestionsPath, + zGetMessagesByMessageIdSuggestedQuestionsResponse, + zGetMessagesQuery, + zGetMessagesResponse, + zGetMetaResponse, + zGetParametersResponse, + zGetPassportResponse, + zGetRemoteFilesByUrlPath, + zGetRemoteFilesByUrlResponse, + zGetSavedMessagesQuery, + zGetSavedMessagesResponse, + zGetSiteResponse, + zGetSystemFeaturesResponse, + zGetWebappAccessModeQuery, + zGetWebappAccessModeResponse, + zGetWebappPermissionQuery, + zGetWebappPermissionResponse, + zGetWorkflowByTaskIdEventsPath, + zGetWorkflowByTaskIdEventsResponse, + zPatchConversationsByCIdPinPath, + zPatchConversationsByCIdPinResponse, + zPatchConversationsByCIdUnpinPath, + zPatchConversationsByCIdUnpinResponse, + zPostAudioToTextResponse, + zPostChatMessagesBody, + zPostChatMessagesByTaskIdStopPath, + zPostChatMessagesByTaskIdStopResponse, + zPostChatMessagesResponse, + zPostCompletionMessagesBody, + zPostCompletionMessagesByTaskIdStopPath, + zPostCompletionMessagesByTaskIdStopResponse, + zPostCompletionMessagesResponse, + zPostConversationsByCIdNamePath, + zPostConversationsByCIdNameQuery, + zPostConversationsByCIdNameResponse, + zPostEmailCodeLoginBody, + zPostEmailCodeLoginResponse, + zPostEmailCodeLoginValidityBody, + zPostEmailCodeLoginValidityResponse, + zPostFilesUploadResponse, + zPostForgotPasswordBody, + zPostForgotPasswordResetsBody, + zPostForgotPasswordResetsResponse, + zPostForgotPasswordResponse, + zPostForgotPasswordValidityBody, + zPostForgotPasswordValidityResponse, + zPostFormHumanInputByFormTokenPath, + zPostFormHumanInputByFormTokenResponse, + zPostLoginBody, + zPostLoginResponse, + zPostLogoutResponse, + zPostMessagesByMessageIdFeedbacksPath, + zPostMessagesByMessageIdFeedbacksQuery, + zPostMessagesByMessageIdFeedbacksResponse, + zPostRemoteFilesUploadResponse, + zPostSavedMessagesQuery, + zPostSavedMessagesResponse, + zPostTextToAudioBody, + zPostTextToAudioResponse, + zPostWorkflowsRunBody, + zPostWorkflowsRunResponse, + zPostWorkflowsTasksByTaskIdStopPath, + zPostWorkflowsTasksByTaskIdStopResponse, +} from './zod.gen' + +/** + * Convert audio to text + * + * Convert audio file to text using speech-to-text service. + */ +export const post = oc + .route({ + description: 'Convert audio file to text using speech-to-text service.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postAudioToText', + path: '/audio-to-text', + summary: 'Convert audio to text', + tags: ['web'], + }) + .output(zPostAudioToTextResponse) + +export const audioToText = { + post, +} + +/** + * Stop a running chat message task. + */ +export const post2 = oc + .route({ + description: 'Stop a running chat message task.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postChatMessagesByTaskIdStop', + path: '/chat-messages/{task_id}/stop', + tags: ['web'], + }) + .input(z.object({ params: zPostChatMessagesByTaskIdStopPath })) + .output(zPostChatMessagesByTaskIdStopResponse) + +export const stop = { + post: post2, +} + +export const byTaskId = { + stop, +} + +/** + * Create a chat message for conversational applications. + */ +export const post3 = oc + .route({ + description: 'Create a chat message for conversational applications.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postChatMessages', + path: '/chat-messages', + tags: ['web'], + }) + .input(z.object({ body: zPostChatMessagesBody })) + .output(zPostChatMessagesResponse) + +export const chatMessages = { + post: post3, + byTaskId, +} + +/** + * Stop a running completion message task. + */ +export const post4 = oc + .route({ + description: 'Stop a running completion message task.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postCompletionMessagesByTaskIdStop', + path: '/completion-messages/{task_id}/stop', + tags: ['web'], + }) + .input(z.object({ params: zPostCompletionMessagesByTaskIdStopPath })) + .output(zPostCompletionMessagesByTaskIdStopResponse) + +export const stop2 = { + post: post4, +} + +export const byTaskId2 = { + stop: stop2, +} + +/** + * Create a completion message for text generation applications. + */ +export const post5 = oc + .route({ + description: 'Create a completion message for text generation applications.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postCompletionMessages', + path: '/completion-messages', + tags: ['web'], + }) + .input(z.object({ body: zPostCompletionMessagesBody })) + .output(zPostCompletionMessagesResponse) + +export const completionMessages = { + post: post5, + byTaskId: byTaskId2, +} + +/** + * Rename a specific conversation with a custom name or auto-generate one. + */ +export const post6 = oc + .route({ + description: 'Rename a specific conversation with a custom name or auto-generate one.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postConversationsByCIdName', + path: '/conversations/{c_id}/name', + tags: ['web'], + }) + .input( + z.object({ + params: zPostConversationsByCIdNamePath, + query: zPostConversationsByCIdNameQuery.optional(), + }), + ) + .output(zPostConversationsByCIdNameResponse) + +export const name = { + post: post6, +} + +/** + * Pin a specific conversation to keep it at the top of the list. + */ +export const patch = oc + .route({ + description: 'Pin a specific conversation to keep it at the top of the list.', + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchConversationsByCIdPin', + path: '/conversations/{c_id}/pin', + tags: ['web'], + }) + .input(z.object({ params: zPatchConversationsByCIdPinPath })) + .output(zPatchConversationsByCIdPinResponse) + +export const pin = { + patch, +} + +/** + * Unpin a specific conversation to remove it from the top of the list. + */ +export const patch2 = oc + .route({ + description: 'Unpin a specific conversation to remove it from the top of the list.', + inputStructure: 'detailed', + method: 'PATCH', + operationId: 'patchConversationsByCIdUnpin', + path: '/conversations/{c_id}/unpin', + tags: ['web'], + }) + .input(z.object({ params: zPatchConversationsByCIdUnpinPath })) + .output(zPatchConversationsByCIdUnpinResponse) + +export const unpin = { + patch: patch2, +} + +/** + * Delete a specific conversation. + */ +export const delete_ = oc + .route({ + description: 'Delete a specific conversation.', + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteConversationsByCId', + path: '/conversations/{c_id}', + successStatus: 204, + tags: ['web'], + }) + .input(z.object({ params: zDeleteConversationsByCIdPath })) + .output(zDeleteConversationsByCIdResponse) + +export const byCId = { + delete: delete_, + name, + pin, + unpin, +} + +/** + * Retrieve paginated list of conversations for a chat application. + */ +export const get = oc + .route({ + description: 'Retrieve paginated list of conversations for a chat application.', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getConversations', + path: '/conversations', + tags: ['web'], + }) + .input(z.object({ query: zGetConversationsQuery.optional() })) + .output(zGetConversationsResponse) + +export const conversations = { + get, + byCId, +} + +/** + * Verify email code and complete login + */ +export const post7 = oc + .route({ + description: 'Verify email code and complete login', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postEmailCodeLoginValidity', + path: '/email-code-login/validity', + tags: ['web'], + }) + .input(z.object({ body: zPostEmailCodeLoginValidityBody })) + .output(zPostEmailCodeLoginValidityResponse) + +export const validity = { + post: post7, +} + +/** + * Send email verification code for login + */ +export const post8 = oc + .route({ + description: 'Send email verification code for login', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postEmailCodeLogin', + path: '/email-code-login', + tags: ['web'], + }) + .input(z.object({ body: zPostEmailCodeLoginBody })) + .output(zPostEmailCodeLoginResponse) + +export const emailCodeLogin = { + post: post8, + validity, +} + +/** + * Upload a file for use in web applications + * + * Upload a file for use in web applications + * Accepts file uploads for use within web applications, supporting + * multiple file types with automatic validation and storage. + * + * Args: + * app_model: The associated application model + * end_user: The end user uploading the file + * + * Form Parameters: + * file: The file to upload (required) + * source: Optional source type (datasets or None) + * + * Returns: + * dict: File information including ID, URL, and metadata + * int: HTTP status code 201 for success + * + * Raises: + * NoFileUploadedError: No file provided in request + * TooManyFilesError: Multiple files provided (only one allowed) + * FilenameNotExistsError: File has no filename + * FileTooLargeError: File exceeds size limit + * UnsupportedFileTypeError: File type not supported + */ +export const post9 = oc + .route({ + description: + 'Upload a file for use in web applications\nAccepts file uploads for use within web applications, supporting\nmultiple file types with automatic validation and storage.\n\nArgs:\n app_model: The associated application model\n end_user: The end user uploading the file\n\nForm Parameters:\n file: The file to upload (required)\n source: Optional source type (datasets or None)\n\nReturns:\n dict: File information including ID, URL, and metadata\n int: HTTP status code 201 for success\n\nRaises:\n NoFileUploadedError: No file provided in request\n TooManyFilesError: Multiple files provided (only one allowed)\n FilenameNotExistsError: File has no filename\n FileTooLargeError: File exceeds size limit\n UnsupportedFileTypeError: File type not supported', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postFilesUpload', + path: '/files/upload', + successStatus: 201, + summary: 'Upload a file for use in web applications', + tags: ['web'], + }) + .output(zPostFilesUploadResponse) + +export const upload = { + post: post9, +} + +export const files = { + upload, +} + +/** + * Reset user password with verification token + */ +export const post10 = oc + .route({ + description: 'Reset user password with verification token', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postForgotPasswordResets', + path: '/forgot-password/resets', + tags: ['web'], + }) + .input(z.object({ body: zPostForgotPasswordResetsBody })) + .output(zPostForgotPasswordResetsResponse) + +export const resets = { + post: post10, +} + +/** + * Verify password reset token validity + */ +export const post11 = oc + .route({ + description: 'Verify password reset token validity', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postForgotPasswordValidity', + path: '/forgot-password/validity', + tags: ['web'], + }) + .input(z.object({ body: zPostForgotPasswordValidityBody })) + .output(zPostForgotPasswordValidityResponse) + +export const validity2 = { + post: post11, +} + +/** + * Send password reset email + */ +export const post12 = oc + .route({ + description: 'Send password reset email', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postForgotPassword', + path: '/forgot-password', + tags: ['web'], + }) + .input(z.object({ body: zPostForgotPasswordBody })) + .output(zPostForgotPasswordResponse) + +export const forgotPassword = { + post: post12, + resets, + validity: validity2, +} + +/** + * Get human input form definition by token + * + * GET /api/form/human_input/ + */ +export const get2 = oc + .route({ + description: 'GET /api/form/human_input/', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getFormHumanInputByFormToken', + path: '/form/human_input/{form_token}', + summary: 'Get human input form definition by token', + tags: ['web'], + }) + .input(z.object({ params: zGetFormHumanInputByFormTokenPath })) + .output(zGetFormHumanInputByFormTokenResponse) + +/** + * Submit human input form by token + * + * POST /api/form/human_input/ + * + * Request body: + * { + * "inputs": { + * "content": "User input content" + * }, + * "action": "Approve" + * } + */ +export const post13 = oc + .route({ + description: + 'POST /api/form/human_input/\n\nRequest body:\n{\n "inputs": {\n "content": "User input content"\n },\n "action": "Approve"\n}', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postFormHumanInputByFormToken', + path: '/form/human_input/{form_token}', + summary: 'Submit human input form by token', + tags: ['web'], + }) + .input(z.object({ params: zPostFormHumanInputByFormTokenPath })) + .output(zPostFormHumanInputByFormTokenResponse) + +export const byFormToken = { + get: get2, + post: post13, +} + +export const humanInput = { + byFormToken, +} + +export const form = { + humanInput, +} + +/** + * Check login status + */ +export const get3 = oc + .route({ + description: 'Check login status', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getLoginStatus', + path: '/login/status', + tags: ['web'], + }) + .output(zGetLoginStatusResponse) + +export const status = { + get: get3, +} + +/** + * Authenticate user and login + * + * Authenticate user for web application access + */ +export const post14 = oc + .route({ + description: 'Authenticate user for web application access', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postLogin', + path: '/login', + summary: 'Authenticate user and login', + tags: ['web'], + }) + .input(z.object({ body: zPostLoginBody })) + .output(zPostLoginResponse) + +export const login = { + post: post14, + status, +} + +/** + * Logout user from web application + */ +export const post15 = oc + .route({ + description: 'Logout user from web application', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postLogout', + path: '/logout', + tags: ['web'], + }) + .output(zPostLogoutResponse) + +export const logout = { + post: post15, +} + +/** + * Submit feedback (like/dislike) for a specific message. + */ +export const post16 = oc + .route({ + description: 'Submit feedback (like/dislike) for a specific message.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postMessagesByMessageIdFeedbacks', + path: '/messages/{message_id}/feedbacks', + tags: ['web'], + }) + .input( + z.object({ + params: zPostMessagesByMessageIdFeedbacksPath, + query: zPostMessagesByMessageIdFeedbacksQuery.optional(), + }), + ) + .output(zPostMessagesByMessageIdFeedbacksResponse) + +export const feedbacks = { + post: post16, +} + +/** + * Generate a new completion similar to an existing message (completion apps only). + */ +export const get4 = oc + .route({ + description: 'Generate a new completion similar to an existing message (completion apps only).', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getMessagesByMessageIdMoreLikeThis', + path: '/messages/{message_id}/more-like-this', + tags: ['web'], + }) + .input( + z.object({ + params: zGetMessagesByMessageIdMoreLikeThisPath, + query: zGetMessagesByMessageIdMoreLikeThisQuery, + }), + ) + .output(zGetMessagesByMessageIdMoreLikeThisResponse) + +export const moreLikeThis = { + get: get4, +} + +/** + * Get suggested follow-up questions after a message (chat apps only). + */ +export const get5 = oc + .route({ + description: 'Get suggested follow-up questions after a message (chat apps only).', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getMessagesByMessageIdSuggestedQuestions', + path: '/messages/{message_id}/suggested-questions', + tags: ['web'], + }) + .input(z.object({ params: zGetMessagesByMessageIdSuggestedQuestionsPath })) + .output(zGetMessagesByMessageIdSuggestedQuestionsResponse) + +export const suggestedQuestions = { + get: get5, +} + +export const byMessageId = { + feedbacks, + moreLikeThis, + suggestedQuestions, +} + +/** + * Retrieve paginated list of messages from a conversation in a chat application. + */ +export const get6 = oc + .route({ + description: 'Retrieve paginated list of messages from a conversation in a chat application.', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getMessages', + path: '/messages', + tags: ['web'], + }) + .input(z.object({ query: zGetMessagesQuery })) + .output(zGetMessagesResponse) + +export const messages = { + get: get6, + byMessageId, +} + +/** + * Get app meta + * + * Retrieve the metadata for a specific app. + */ +export const get7 = oc + .route({ + description: 'Retrieve the metadata for a specific app.', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getMeta', + path: '/meta', + summary: 'Get app meta', + tags: ['web'], + }) + .output(zGetMetaResponse) + +export const meta = { + get: get7, +} + +/** + * Retrieve app parameters + * + * Retrieve the parameters for a specific app. + */ +export const get8 = oc + .route({ + description: 'Retrieve the parameters for a specific app.', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getParameters', + path: '/parameters', + summary: 'Retrieve app parameters', + tags: ['web'], + }) + .output(zGetParametersResponse) + +export const parameters = { + get: get8, +} + +/** + * Get authentication passport for web application access + */ +export const get9 = oc + .route({ + description: 'Get authentication passport for web application access', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getPassport', + path: '/passport', + tags: ['web'], + }) + .output(zGetPassportResponse) + +export const passport = { + get: get9, +} + +/** + * Upload a file from a remote URL + * + * Upload a file from a remote URL + * Downloads a file from the provided remote URL and uploads it + * to the platform storage for use in web applications. + * + * Args: + * app_model: The associated application model + * end_user: The end user making the request + * + * JSON Parameters: + * url: The remote URL to download the file from (required) + * + * Returns: + * dict: File information including ID, signed URL, and metadata + * int: HTTP status code 201 for success + * + * Raises: + * RemoteFileUploadError: Failed to fetch file from remote URL + * FileTooLargeError: File exceeds size limit + * UnsupportedFileTypeError: File type not supported + */ +export const post17 = oc + .route({ + description: + 'Upload a file from a remote URL\nDownloads a file from the provided remote URL and uploads it\nto the platform storage for use in web applications.\n\nArgs:\n app_model: The associated application model\n end_user: The end user making the request\n\nJSON Parameters:\n url: The remote URL to download the file from (required)\n\nReturns:\n dict: File information including ID, signed URL, and metadata\n int: HTTP status code 201 for success\n\nRaises:\n RemoteFileUploadError: Failed to fetch file from remote URL\n FileTooLargeError: File exceeds size limit\n UnsupportedFileTypeError: File type not supported', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postRemoteFilesUpload', + path: '/remote-files/upload', + successStatus: 201, + summary: 'Upload a file from a remote URL', + tags: ['web'], + }) + .output(zPostRemoteFilesUploadResponse) + +export const upload2 = { + post: post17, +} + +/** + * Get information about a remote file + * + * Get information about a remote file + * Retrieves basic information about a file located at a remote URL, + * including content type and content length. + * + * Args: + * app_model: The associated application model + * end_user: The end user making the request + * url: URL-encoded path to the remote file + * + * Returns: + * dict: Remote file information including type and length + * + * Raises: + * HTTPException: If the remote file cannot be accessed + */ +export const get10 = oc + .route({ + description: + 'Get information about a remote file\nRetrieves basic information about a file located at a remote URL,\nincluding content type and content length.\n\nArgs:\n app_model: The associated application model\n end_user: The end user making the request\n url: URL-encoded path to the remote file\n\nReturns:\n dict: Remote file information including type and length\n\nRaises:\n HTTPException: If the remote file cannot be accessed', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getRemoteFilesByUrl', + path: '/remote-files/{url}', + summary: 'Get information about a remote file', + tags: ['web'], + }) + .input(z.object({ params: zGetRemoteFilesByUrlPath })) + .output(zGetRemoteFilesByUrlResponse) + +export const byUrl = { + get: get10, +} + +export const remoteFiles = { + upload: upload2, + byUrl, +} + +/** + * Remove a message from saved messages. + */ +export const delete2 = oc + .route({ + description: 'Remove a message from saved messages.', + inputStructure: 'detailed', + method: 'DELETE', + operationId: 'deleteSavedMessagesByMessageId', + path: '/saved-messages/{message_id}', + successStatus: 204, + tags: ['web'], + }) + .input(z.object({ params: zDeleteSavedMessagesByMessageIdPath })) + .output(zDeleteSavedMessagesByMessageIdResponse) + +export const byMessageId2 = { + delete: delete2, +} + +/** + * Retrieve paginated list of saved messages for a completion application. + */ +export const get11 = oc + .route({ + description: 'Retrieve paginated list of saved messages for a completion application.', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getSavedMessages', + path: '/saved-messages', + tags: ['web'], + }) + .input(z.object({ query: zGetSavedMessagesQuery.optional() })) + .output(zGetSavedMessagesResponse) + +/** + * Save a specific message for later reference. + */ +export const post18 = oc + .route({ + description: 'Save a specific message for later reference.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postSavedMessages', + path: '/saved-messages', + tags: ['web'], + }) + .input(z.object({ query: zPostSavedMessagesQuery })) + .output(zPostSavedMessagesResponse) + +export const savedMessages = { + get: get11, + post: post18, + byMessageId: byMessageId2, +} + +/** + * Retrieve app site info + * + * Retrieve app site information and configuration. + */ +export const get12 = oc + .route({ + description: 'Retrieve app site information and configuration.', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getSite', + path: '/site', + summary: 'Retrieve app site info', + tags: ['web'], + }) + .output(zGetSiteResponse) + +export const site = { + get: get12, +} + +/** + * Get system feature flags and configuration + * + * Get system feature flags and configuration + * Returns the current system feature flags and configuration + * that control various functionalities across the platform. + * + * Returns: + * dict: System feature configuration object + * + * This endpoint is akin to the `SystemFeatureApi` endpoint in api/controllers/console/feature.py, + * except it is intended for use by the web app, instead of the console dashboard. + * + * NOTE: This endpoint is unauthenticated by design, as it provides system features + * data required for webapp initialization. + * + * Authentication would create circular dependency (can't authenticate without webapp loading). + * + * Only non-sensitive configuration data should be returned by this endpoint. + */ +export const get13 = oc + .route({ + description: + 'Get system feature flags and configuration\nReturns the current system feature flags and configuration\nthat control various functionalities across the platform.\n\nReturns:\n dict: System feature configuration object\n\nThis endpoint is akin to the `SystemFeatureApi` endpoint in api/controllers/console/feature.py,\nexcept it is intended for use by the web app, instead of the console dashboard.\n\nNOTE: This endpoint is unauthenticated by design, as it provides system features\ndata required for webapp initialization.\n\nAuthentication would create circular dependency (can\'t authenticate without webapp loading).\n\nOnly non-sensitive configuration data should be returned by this endpoint.', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getSystemFeatures', + path: '/system-features', + summary: 'Get system feature flags and configuration', + tags: ['web'], + }) + .output(zGetSystemFeaturesResponse) + +export const systemFeatures = { + get: get13, +} + +/** + * Convert text to audio + * + * Convert text to audio using text-to-speech service. + */ +export const post19 = oc + .route({ + description: 'Convert text to audio using text-to-speech service.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postTextToAudio', + path: '/text-to-audio', + summary: 'Convert text to audio', + tags: ['web'], + }) + .input(z.object({ body: zPostTextToAudioBody })) + .output(zPostTextToAudioResponse) + +export const textToAudio = { + post: post19, +} + +/** + * Retrieve the access mode for a web application (public or restricted). + */ +export const get14 = oc + .route({ + description: 'Retrieve the access mode for a web application (public or restricted).', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWebappAccessMode', + path: '/webapp/access-mode', + tags: ['web'], + }) + .input(z.object({ query: zGetWebappAccessModeQuery.optional() })) + .output(zGetWebappAccessModeResponse) + +export const accessMode = { + get: get14, +} + +/** + * Check if user has permission to access a web application. + */ +export const get15 = oc + .route({ + description: 'Check if user has permission to access a web application.', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWebappPermission', + path: '/webapp/permission', + tags: ['web'], + }) + .input(z.object({ query: zGetWebappPermissionQuery })) + .output(zGetWebappPermissionResponse) + +export const permission = { + get: get15, +} + +export const webapp = { + accessMode, + permission, +} + +/** + * Get workflow execution events stream after resume + * + * GET /api/workflow//events + * + * Returns Server-Sent Events stream. + */ +export const get16 = oc + .route({ + description: 'GET /api/workflow//events\n\nReturns Server-Sent Events stream.', + inputStructure: 'detailed', + method: 'GET', + operationId: 'getWorkflowByTaskIdEvents', + path: '/workflow/{task_id}/events', + summary: 'Get workflow execution events stream after resume', + tags: ['default'], + }) + .input(z.object({ params: zGetWorkflowByTaskIdEventsPath })) + .output(zGetWorkflowByTaskIdEventsResponse) + +export const events = { + get: get16, +} + +export const byTaskId3 = { + events, +} + +export const workflow = { + byTaskId: byTaskId3, +} + +/** + * Run workflow + * + * Execute a workflow with provided inputs and files. + */ +export const post20 = oc + .route({ + description: 'Execute a workflow with provided inputs and files.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkflowsRun', + path: '/workflows/run', + summary: 'Run workflow', + tags: ['web'], + }) + .input(z.object({ body: zPostWorkflowsRunBody })) + .output(zPostWorkflowsRunResponse) + +export const run = { + post: post20, +} + +/** + * Stop workflow task + * + * Stop a running workflow task. + */ +export const post21 = oc + .route({ + description: 'Stop a running workflow task.', + inputStructure: 'detailed', + method: 'POST', + operationId: 'postWorkflowsTasksByTaskIdStop', + path: '/workflows/tasks/{task_id}/stop', + summary: 'Stop workflow task', + tags: ['web'], + }) + .input(z.object({ params: zPostWorkflowsTasksByTaskIdStopPath })) + .output(zPostWorkflowsTasksByTaskIdStopResponse) + +export const stop3 = { + post: post21, +} + +export const byTaskId4 = { + stop: stop3, +} + +export const tasks = { + byTaskId: byTaskId4, +} + +export const workflows = { + run, + tasks, +} + +export const contract = { + audioToText, + chatMessages, + completionMessages, + conversations, + emailCodeLogin, + files, + forgotPassword, + form, + login, + logout, + messages, + meta, + parameters, + passport, + remoteFiles, + savedMessages, + site, + systemFeatures, + textToAudio, + webapp, + workflow, + workflows, +} diff --git a/packages/contracts/generated/api/web/types.gen.ts b/packages/contracts/generated/api/web/types.gen.ts new file mode 100644 index 0000000000..f2009b966b --- /dev/null +++ b/packages/contracts/generated/api/web/types.gen.ts @@ -0,0 +1,1461 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}/api` | (string & {}) +} + +export type ChatMessagePayload = { + conversation_id?: string | null + files?: Array<{ + [key: string]: unknown + }> | null + inputs: { + [key: string]: unknown + } + parent_message_id?: string | null + query: string + response_mode?: 'blocking' | 'streaming' | null + retriever_from?: string +} + +export type CompletionMessagePayload = { + files?: Array<{ + [key: string]: unknown + }> | null + inputs: { + [key: string]: unknown + } + query?: string + response_mode?: 'blocking' | 'streaming' | null + retriever_from?: string +} + +export type EmailCodeLoginSendPayload = { + email: string + language?: string | null +} + +export type EmailCodeLoginVerifyPayload = { + code: string + email: string + token: string +} + +export type FileResponse = { + conversation_id?: string | null + created_at?: number | null + created_by?: string | null + extension?: string | null + file_key?: string | null + id: string + mime_type?: string | null + name: string + original_url?: string | null + preview_url?: string | null + size: number + source_url?: string | null + tenant_id?: string | null + user_id?: string | null +} + +export type FileWithSignedUrl = { + created_at?: number | null + created_by?: string | null + extension?: string | null + id: string + mime_type?: string | null + name: string + size: number + url?: string | null +} + +export type ForgotPasswordCheckPayload = { + code: string + email: string + token: string +} + +export type ForgotPasswordResetPayload = { + new_password: string + password_confirm: string + token: string +} + +export type ForgotPasswordSendPayload = { + email: string + language?: string | null +} + +export type LoginPayload = { + email: string + password: string +} + +export type MessageMoreLikeThisQuery = { + response_mode: 'blocking' | 'streaming' +} + +export type RemoteFileInfo = { + file_length: number + file_type: string +} + +export type TextToAudioPayload = { + message_id?: string | null + streaming?: boolean | null + text?: string | null + voice?: string | null +} + +export type WorkflowRunPayload = { + files?: Array<{ + [key: string]: unknown + }> | null + inputs: { + [key: string]: unknown + } +} + +export type PostAudioToTextData = { + body?: never + path?: never + query?: never + url: '/audio-to-text' +} + +export type PostAudioToTextErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 413: { + [key: string]: unknown + } + 415: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type PostAudioToTextError = PostAudioToTextErrors[keyof PostAudioToTextErrors] + +export type PostAudioToTextResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostAudioToTextResponse = PostAudioToTextResponses[keyof PostAudioToTextResponses] + +export type PostChatMessagesData = { + body: ChatMessagePayload + path?: never + query?: never + url: '/chat-messages' +} + +export type PostChatMessagesErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type PostChatMessagesError = PostChatMessagesErrors[keyof PostChatMessagesErrors] + +export type PostChatMessagesResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostChatMessagesResponse = PostChatMessagesResponses[keyof PostChatMessagesResponses] + +export type PostChatMessagesByTaskIdStopData = { + body?: never + path: { + task_id: string + } + query?: never + url: '/chat-messages/{task_id}/stop' +} + +export type PostChatMessagesByTaskIdStopErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type PostChatMessagesByTaskIdStopError + = PostChatMessagesByTaskIdStopErrors[keyof PostChatMessagesByTaskIdStopErrors] + +export type PostChatMessagesByTaskIdStopResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostChatMessagesByTaskIdStopResponse + = PostChatMessagesByTaskIdStopResponses[keyof PostChatMessagesByTaskIdStopResponses] + +export type PostCompletionMessagesData = { + body: CompletionMessagePayload + path?: never + query?: never + url: '/completion-messages' +} + +export type PostCompletionMessagesErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type PostCompletionMessagesError + = PostCompletionMessagesErrors[keyof PostCompletionMessagesErrors] + +export type PostCompletionMessagesResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostCompletionMessagesResponse + = PostCompletionMessagesResponses[keyof PostCompletionMessagesResponses] + +export type PostCompletionMessagesByTaskIdStopData = { + body?: never + path: { + task_id: string + } + query?: never + url: '/completion-messages/{task_id}/stop' +} + +export type PostCompletionMessagesByTaskIdStopErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type PostCompletionMessagesByTaskIdStopError + = PostCompletionMessagesByTaskIdStopErrors[keyof PostCompletionMessagesByTaskIdStopErrors] + +export type PostCompletionMessagesByTaskIdStopResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostCompletionMessagesByTaskIdStopResponse + = PostCompletionMessagesByTaskIdStopResponses[keyof PostCompletionMessagesByTaskIdStopResponses] + +export type GetConversationsData = { + body?: never + path?: never + query?: { + last_id?: string + limit?: number + pinned?: 'true' | 'false' + sort_by?: 'created_at' | '-created_at' | 'updated_at' | '-updated_at' + } + url: '/conversations' +} + +export type GetConversationsErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type GetConversationsError = GetConversationsErrors[keyof GetConversationsErrors] + +export type GetConversationsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetConversationsResponse = GetConversationsResponses[keyof GetConversationsResponses] + +export type DeleteConversationsByCIdData = { + body?: never + path: { + c_id: string + } + query?: never + url: '/conversations/{c_id}' +} + +export type DeleteConversationsByCIdErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type DeleteConversationsByCIdError + = DeleteConversationsByCIdErrors[keyof DeleteConversationsByCIdErrors] + +export type DeleteConversationsByCIdResponses = { + 204: { + [key: string]: unknown + } +} + +export type DeleteConversationsByCIdResponse + = DeleteConversationsByCIdResponses[keyof DeleteConversationsByCIdResponses] + +export type PostConversationsByCIdNameData = { + body?: never + path: { + c_id: string + } + query?: { + name?: string + auto_generate?: boolean + } + url: '/conversations/{c_id}/name' +} + +export type PostConversationsByCIdNameErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type PostConversationsByCIdNameError + = PostConversationsByCIdNameErrors[keyof PostConversationsByCIdNameErrors] + +export type PostConversationsByCIdNameResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostConversationsByCIdNameResponse + = PostConversationsByCIdNameResponses[keyof PostConversationsByCIdNameResponses] + +export type PatchConversationsByCIdPinData = { + body?: never + path: { + c_id: string + } + query?: never + url: '/conversations/{c_id}/pin' +} + +export type PatchConversationsByCIdPinErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type PatchConversationsByCIdPinError + = PatchConversationsByCIdPinErrors[keyof PatchConversationsByCIdPinErrors] + +export type PatchConversationsByCIdPinResponses = { + 200: { + [key: string]: unknown + } +} + +export type PatchConversationsByCIdPinResponse + = PatchConversationsByCIdPinResponses[keyof PatchConversationsByCIdPinResponses] + +export type PatchConversationsByCIdUnpinData = { + body?: never + path: { + c_id: string + } + query?: never + url: '/conversations/{c_id}/unpin' +} + +export type PatchConversationsByCIdUnpinErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type PatchConversationsByCIdUnpinError + = PatchConversationsByCIdUnpinErrors[keyof PatchConversationsByCIdUnpinErrors] + +export type PatchConversationsByCIdUnpinResponses = { + 200: { + [key: string]: unknown + } +} + +export type PatchConversationsByCIdUnpinResponse + = PatchConversationsByCIdUnpinResponses[keyof PatchConversationsByCIdUnpinResponses] + +export type PostEmailCodeLoginData = { + body: EmailCodeLoginSendPayload + path?: never + query?: never + url: '/email-code-login' +} + +export type PostEmailCodeLoginErrors = { + 400: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostEmailCodeLoginError = PostEmailCodeLoginErrors[keyof PostEmailCodeLoginErrors] + +export type PostEmailCodeLoginResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostEmailCodeLoginResponse + = PostEmailCodeLoginResponses[keyof PostEmailCodeLoginResponses] + +export type PostEmailCodeLoginValidityData = { + body: EmailCodeLoginVerifyPayload + path?: never + query?: never + url: '/email-code-login/validity' +} + +export type PostEmailCodeLoginValidityErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostEmailCodeLoginValidityError + = PostEmailCodeLoginValidityErrors[keyof PostEmailCodeLoginValidityErrors] + +export type PostEmailCodeLoginValidityResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostEmailCodeLoginValidityResponse + = PostEmailCodeLoginValidityResponses[keyof PostEmailCodeLoginValidityResponses] + +export type PostFilesUploadData = { + body?: never + path?: never + query?: never + url: '/files/upload' +} + +export type PostFilesUploadErrors = { + 400: { + [key: string]: unknown + } + 413: { + [key: string]: unknown + } + 415: { + [key: string]: unknown + } +} + +export type PostFilesUploadError = PostFilesUploadErrors[keyof PostFilesUploadErrors] + +export type PostFilesUploadResponses = { + 201: FileResponse +} + +export type PostFilesUploadResponse = PostFilesUploadResponses[keyof PostFilesUploadResponses] + +export type PostForgotPasswordData = { + body: ForgotPasswordSendPayload + path?: never + query?: never + url: '/forgot-password' +} + +export type PostForgotPasswordErrors = { + 400: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + 429: { + [key: string]: unknown + } +} + +export type PostForgotPasswordError = PostForgotPasswordErrors[keyof PostForgotPasswordErrors] + +export type PostForgotPasswordResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostForgotPasswordResponse + = PostForgotPasswordResponses[keyof PostForgotPasswordResponses] + +export type PostForgotPasswordResetsData = { + body: ForgotPasswordResetPayload + path?: never + query?: never + url: '/forgot-password/resets' +} + +export type PostForgotPasswordResetsErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostForgotPasswordResetsError + = PostForgotPasswordResetsErrors[keyof PostForgotPasswordResetsErrors] + +export type PostForgotPasswordResetsResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostForgotPasswordResetsResponse + = PostForgotPasswordResetsResponses[keyof PostForgotPasswordResetsResponses] + +export type PostForgotPasswordValidityData = { + body: ForgotPasswordCheckPayload + path?: never + query?: never + url: '/forgot-password/validity' +} + +export type PostForgotPasswordValidityErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } +} + +export type PostForgotPasswordValidityError + = PostForgotPasswordValidityErrors[keyof PostForgotPasswordValidityErrors] + +export type PostForgotPasswordValidityResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostForgotPasswordValidityResponse + = PostForgotPasswordValidityResponses[keyof PostForgotPasswordValidityResponses] + +export type GetFormHumanInputByFormTokenData = { + body?: never + path: { + form_token: string + } + query?: never + url: '/form/human_input/{form_token}' +} + +export type GetFormHumanInputByFormTokenResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetFormHumanInputByFormTokenResponse + = GetFormHumanInputByFormTokenResponses[keyof GetFormHumanInputByFormTokenResponses] + +export type PostFormHumanInputByFormTokenData = { + body?: never + path: { + form_token: string + } + query?: never + url: '/form/human_input/{form_token}' +} + +export type PostFormHumanInputByFormTokenResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostFormHumanInputByFormTokenResponse + = PostFormHumanInputByFormTokenResponses[keyof PostFormHumanInputByFormTokenResponses] + +export type PostLoginData = { + body: LoginPayload + path?: never + query?: never + url: '/login' +} + +export type PostLoginErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type PostLoginError = PostLoginErrors[keyof PostLoginErrors] + +export type PostLoginResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostLoginResponse = PostLoginResponses[keyof PostLoginResponses] + +export type GetLoginStatusData = { + body?: never + path?: never + query?: never + url: '/login/status' +} + +export type GetLoginStatusErrors = { + 401: { + [key: string]: unknown + } +} + +export type GetLoginStatusError = GetLoginStatusErrors[keyof GetLoginStatusErrors] + +export type GetLoginStatusResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetLoginStatusResponse = GetLoginStatusResponses[keyof GetLoginStatusResponses] + +export type PostLogoutData = { + body?: never + path?: never + query?: never + url: '/logout' +} + +export type PostLogoutResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostLogoutResponse = PostLogoutResponses[keyof PostLogoutResponses] + +export type GetMessagesData = { + body?: never + path?: never + query: { + conversation_id: string + first_id?: string + limit?: number + } + url: '/messages' +} + +export type GetMessagesErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type GetMessagesError = GetMessagesErrors[keyof GetMessagesErrors] + +export type GetMessagesResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetMessagesResponse = GetMessagesResponses[keyof GetMessagesResponses] + +export type PostMessagesByMessageIdFeedbacksData = { + body?: never + path: { + message_id: string + } + query?: { + rating?: 'like' | 'dislike' + content?: string + } + url: '/messages/{message_id}/feedbacks' +} + +export type PostMessagesByMessageIdFeedbacksErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type PostMessagesByMessageIdFeedbacksError + = PostMessagesByMessageIdFeedbacksErrors[keyof PostMessagesByMessageIdFeedbacksErrors] + +export type PostMessagesByMessageIdFeedbacksResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostMessagesByMessageIdFeedbacksResponse + = PostMessagesByMessageIdFeedbacksResponses[keyof PostMessagesByMessageIdFeedbacksResponses] + +export type GetMessagesByMessageIdMoreLikeThisData = { + body?: never + path: { + message_id: string + } + query: { + response_mode: 'blocking' | 'streaming' + } + url: '/messages/{message_id}/more-like-this' +} + +export type GetMessagesByMessageIdMoreLikeThisErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type GetMessagesByMessageIdMoreLikeThisError + = GetMessagesByMessageIdMoreLikeThisErrors[keyof GetMessagesByMessageIdMoreLikeThisErrors] + +export type GetMessagesByMessageIdMoreLikeThisResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetMessagesByMessageIdMoreLikeThisResponse + = GetMessagesByMessageIdMoreLikeThisResponses[keyof GetMessagesByMessageIdMoreLikeThisResponses] + +export type GetMessagesByMessageIdSuggestedQuestionsData = { + body?: never + path: { + message_id: string + } + query?: never + url: '/messages/{message_id}/suggested-questions' +} + +export type GetMessagesByMessageIdSuggestedQuestionsErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type GetMessagesByMessageIdSuggestedQuestionsError + = GetMessagesByMessageIdSuggestedQuestionsErrors[keyof GetMessagesByMessageIdSuggestedQuestionsErrors] + +export type GetMessagesByMessageIdSuggestedQuestionsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetMessagesByMessageIdSuggestedQuestionsResponse + = GetMessagesByMessageIdSuggestedQuestionsResponses[keyof GetMessagesByMessageIdSuggestedQuestionsResponses] + +export type GetMetaData = { + body?: never + path?: never + query?: never + url: '/meta' +} + +export type GetMetaErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type GetMetaError = GetMetaErrors[keyof GetMetaErrors] + +export type GetMetaResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetMetaResponse = GetMetaResponses[keyof GetMetaResponses] + +export type GetParametersData = { + body?: never + path?: never + query?: never + url: '/parameters' +} + +export type GetParametersErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type GetParametersError = GetParametersErrors[keyof GetParametersErrors] + +export type GetParametersResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetParametersResponse = GetParametersResponses[keyof GetParametersResponses] + +export type GetPassportData = { + body?: never + path?: never + query?: never + url: '/passport' +} + +export type GetPassportErrors = { + 401: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } +} + +export type GetPassportError = GetPassportErrors[keyof GetPassportErrors] + +export type GetPassportResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetPassportResponse = GetPassportResponses[keyof GetPassportResponses] + +export type PostRemoteFilesUploadData = { + body?: never + path?: never + query?: never + url: '/remote-files/upload' +} + +export type PostRemoteFilesUploadErrors = { + 400: { + [key: string]: unknown + } + 413: { + [key: string]: unknown + } + 415: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type PostRemoteFilesUploadError + = PostRemoteFilesUploadErrors[keyof PostRemoteFilesUploadErrors] + +export type PostRemoteFilesUploadResponses = { + 201: FileWithSignedUrl +} + +export type PostRemoteFilesUploadResponse + = PostRemoteFilesUploadResponses[keyof PostRemoteFilesUploadResponses] + +export type GetRemoteFilesByUrlData = { + body?: never + path: { + url: string + } + query?: never + url: '/remote-files/{url}' +} + +export type GetRemoteFilesByUrlErrors = { + 400: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type GetRemoteFilesByUrlError = GetRemoteFilesByUrlErrors[keyof GetRemoteFilesByUrlErrors] + +export type GetRemoteFilesByUrlResponses = { + 200: RemoteFileInfo +} + +export type GetRemoteFilesByUrlResponse + = GetRemoteFilesByUrlResponses[keyof GetRemoteFilesByUrlResponses] + +export type GetSavedMessagesData = { + body?: never + path?: never + query?: { + last_id?: string + limit?: number + } + url: '/saved-messages' +} + +export type GetSavedMessagesErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type GetSavedMessagesError = GetSavedMessagesErrors[keyof GetSavedMessagesErrors] + +export type GetSavedMessagesResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetSavedMessagesResponse = GetSavedMessagesResponses[keyof GetSavedMessagesResponses] + +export type PostSavedMessagesData = { + body?: never + path?: never + query: { + message_id: string + } + url: '/saved-messages' +} + +export type PostSavedMessagesErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type PostSavedMessagesError = PostSavedMessagesErrors[keyof PostSavedMessagesErrors] + +export type PostSavedMessagesResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostSavedMessagesResponse = PostSavedMessagesResponses[keyof PostSavedMessagesResponses] + +export type DeleteSavedMessagesByMessageIdData = { + body?: never + path: { + message_id: string + } + query?: never + url: '/saved-messages/{message_id}' +} + +export type DeleteSavedMessagesByMessageIdErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type DeleteSavedMessagesByMessageIdError + = DeleteSavedMessagesByMessageIdErrors[keyof DeleteSavedMessagesByMessageIdErrors] + +export type DeleteSavedMessagesByMessageIdResponses = { + 204: { + [key: string]: unknown + } +} + +export type DeleteSavedMessagesByMessageIdResponse + = DeleteSavedMessagesByMessageIdResponses[keyof DeleteSavedMessagesByMessageIdResponses] + +export type GetSiteData = { + body?: never + path?: never + query?: never + url: '/site' +} + +export type GetSiteErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type GetSiteError = GetSiteErrors[keyof GetSiteErrors] + +export type GetSiteResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetSiteResponse = GetSiteResponses[keyof GetSiteResponses] + +export type GetSystemFeaturesData = { + body?: never + path?: never + query?: never + url: '/system-features' +} + +export type GetSystemFeaturesErrors = { + 500: { + [key: string]: unknown + } +} + +export type GetSystemFeaturesError = GetSystemFeaturesErrors[keyof GetSystemFeaturesErrors] + +export type GetSystemFeaturesResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetSystemFeaturesResponse = GetSystemFeaturesResponses[keyof GetSystemFeaturesResponses] + +export type PostTextToAudioData = { + body: TextToAudioPayload + path?: never + query?: never + url: '/text-to-audio' +} + +export type PostTextToAudioErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type PostTextToAudioError = PostTextToAudioErrors[keyof PostTextToAudioErrors] + +export type PostTextToAudioResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostTextToAudioResponse = PostTextToAudioResponses[keyof PostTextToAudioResponses] + +export type GetWebappAccessModeData = { + body?: never + path?: never + query?: { + appId?: string + appCode?: string + } + url: '/webapp/access-mode' +} + +export type GetWebappAccessModeErrors = { + 400: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type GetWebappAccessModeError = GetWebappAccessModeErrors[keyof GetWebappAccessModeErrors] + +export type GetWebappAccessModeResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWebappAccessModeResponse + = GetWebappAccessModeResponses[keyof GetWebappAccessModeResponses] + +export type GetWebappPermissionData = { + body?: never + path?: never + query: { + appId: string + } + url: '/webapp/permission' +} + +export type GetWebappPermissionErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type GetWebappPermissionError = GetWebappPermissionErrors[keyof GetWebappPermissionErrors] + +export type GetWebappPermissionResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWebappPermissionResponse + = GetWebappPermissionResponses[keyof GetWebappPermissionResponses] + +export type GetWorkflowByTaskIdEventsData = { + body?: never + path: { + task_id: string + } + query?: never + url: '/workflow/{task_id}/events' +} + +export type GetWorkflowByTaskIdEventsResponses = { + 200: { + [key: string]: unknown + } +} + +export type GetWorkflowByTaskIdEventsResponse + = GetWorkflowByTaskIdEventsResponses[keyof GetWorkflowByTaskIdEventsResponses] + +export type PostWorkflowsRunData = { + body: WorkflowRunPayload + path?: never + query?: never + url: '/workflows/run' +} + +export type PostWorkflowsRunErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type PostWorkflowsRunError = PostWorkflowsRunErrors[keyof PostWorkflowsRunErrors] + +export type PostWorkflowsRunResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkflowsRunResponse = PostWorkflowsRunResponses[keyof PostWorkflowsRunResponses] + +export type PostWorkflowsTasksByTaskIdStopData = { + body?: never + path: { + task_id: string + } + query?: never + url: '/workflows/tasks/{task_id}/stop' +} + +export type PostWorkflowsTasksByTaskIdStopErrors = { + 400: { + [key: string]: unknown + } + 401: { + [key: string]: unknown + } + 403: { + [key: string]: unknown + } + 404: { + [key: string]: unknown + } + 500: { + [key: string]: unknown + } +} + +export type PostWorkflowsTasksByTaskIdStopError + = PostWorkflowsTasksByTaskIdStopErrors[keyof PostWorkflowsTasksByTaskIdStopErrors] + +export type PostWorkflowsTasksByTaskIdStopResponses = { + 200: { + [key: string]: unknown + } +} + +export type PostWorkflowsTasksByTaskIdStopResponse + = PostWorkflowsTasksByTaskIdStopResponses[keyof PostWorkflowsTasksByTaskIdStopResponses] diff --git a/packages/contracts/generated/api/web/zod.gen.ts b/packages/contracts/generated/api/web/zod.gen.ts new file mode 100644 index 0000000000..ec4bd19aff --- /dev/null +++ b/packages/contracts/generated/api/web/zod.gen.ts @@ -0,0 +1,478 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * ChatMessagePayload + */ +export const zChatMessagePayload = z.object({ + conversation_id: z.string().nullish(), + files: z.array(z.record(z.string(), z.unknown())).nullish(), + inputs: z.record(z.string(), z.unknown()), + parent_message_id: z.string().nullish(), + query: z.string(), + response_mode: z.enum(['blocking', 'streaming']).nullish(), + retriever_from: z.string().optional().default('web_app'), +}) + +/** + * CompletionMessagePayload + */ +export const zCompletionMessagePayload = z.object({ + files: z.array(z.record(z.string(), z.unknown())).nullish(), + inputs: z.record(z.string(), z.unknown()), + query: z.string().optional().default(''), + response_mode: z.enum(['blocking', 'streaming']).nullish(), + retriever_from: z.string().optional().default('web_app'), +}) + +/** + * EmailCodeLoginSendPayload + */ +export const zEmailCodeLoginSendPayload = z.object({ + email: z.string(), + language: z.string().nullish(), +}) + +/** + * EmailCodeLoginVerifyPayload + */ +export const zEmailCodeLoginVerifyPayload = z.object({ + code: z.string(), + email: z.string(), + token: z.string().min(1), +}) + +/** + * FileResponse + */ +export const zFileResponse = z.object({ + conversation_id: z.string().nullish(), + created_at: z.int().nullish(), + created_by: z.string().nullish(), + extension: z.string().nullish(), + file_key: z.string().nullish(), + id: z.string(), + mime_type: z.string().nullish(), + name: z.string(), + original_url: z.string().nullish(), + preview_url: z.string().nullish(), + size: z.int(), + source_url: z.string().nullish(), + tenant_id: z.string().nullish(), + user_id: z.string().nullish(), +}) + +/** + * FileWithSignedUrl + */ +export const zFileWithSignedUrl = z.object({ + created_at: z.int().nullish(), + created_by: z.string().nullish(), + extension: z.string().nullish(), + id: z.string(), + mime_type: z.string().nullish(), + name: z.string(), + size: z.int(), + url: z.string().nullish(), +}) + +/** + * ForgotPasswordCheckPayload + */ +export const zForgotPasswordCheckPayload = z.object({ + code: z.string(), + email: z.string(), + token: z.string().min(1), +}) + +/** + * ForgotPasswordResetPayload + */ +export const zForgotPasswordResetPayload = z.object({ + new_password: z.string(), + password_confirm: z.string(), + token: z.string().min(1), +}) + +/** + * ForgotPasswordSendPayload + */ +export const zForgotPasswordSendPayload = z.object({ + email: z.string(), + language: z.string().nullish(), +}) + +/** + * LoginPayload + */ +export const zLoginPayload = z.object({ + email: z.string(), + password: z.string(), +}) + +/** + * MessageMoreLikeThisQuery + */ +export const zMessageMoreLikeThisQuery = z.object({ + response_mode: z.enum(['blocking', 'streaming']), +}) + +/** + * RemoteFileInfo + */ +export const zRemoteFileInfo = z.object({ + file_length: z.int(), + file_type: z.string(), +}) + +/** + * TextToAudioPayload + */ +export const zTextToAudioPayload = z.object({ + message_id: z.string().nullish(), + streaming: z.boolean().nullish(), + text: z.string().nullish(), + voice: z.string().nullish(), +}) + +/** + * WorkflowRunPayload + */ +export const zWorkflowRunPayload = z.object({ + files: z.array(z.record(z.string(), z.unknown())).nullish(), + inputs: z.record(z.string(), z.unknown()), +}) + +/** + * Success + */ +export const zPostAudioToTextResponse = z.record(z.string(), z.unknown()) + +export const zPostChatMessagesBody = zChatMessagePayload + +/** + * Success + */ +export const zPostChatMessagesResponse = z.record(z.string(), z.unknown()) + +export const zPostChatMessagesByTaskIdStopPath = z.object({ + task_id: z.string(), +}) + +/** + * Success + */ +export const zPostChatMessagesByTaskIdStopResponse = z.record(z.string(), z.unknown()) + +export const zPostCompletionMessagesBody = zCompletionMessagePayload + +/** + * Success + */ +export const zPostCompletionMessagesResponse = z.record(z.string(), z.unknown()) + +export const zPostCompletionMessagesByTaskIdStopPath = z.object({ + task_id: z.string(), +}) + +/** + * Success + */ +export const zPostCompletionMessagesByTaskIdStopResponse = z.record(z.string(), z.unknown()) + +export const zGetConversationsQuery = z.object({ + last_id: z.string().optional(), + limit: z.int().optional().default(20), + pinned: z.enum(['true', 'false']).optional(), + sort_by: z + .enum(['created_at', '-created_at', 'updated_at', '-updated_at']) + .optional() + .default('-updated_at'), +}) + +/** + * Success + */ +export const zGetConversationsResponse = z.record(z.string(), z.unknown()) + +export const zDeleteConversationsByCIdPath = z.object({ + c_id: z.string(), +}) + +/** + * Conversation deleted successfully + */ +export const zDeleteConversationsByCIdResponse = z.record(z.string(), z.unknown()) + +export const zPostConversationsByCIdNamePath = z.object({ + c_id: z.string(), +}) + +export const zPostConversationsByCIdNameQuery = z.object({ + name: z.string().optional(), + auto_generate: z.boolean().optional().default(false), +}) + +/** + * Conversation renamed successfully + */ +export const zPostConversationsByCIdNameResponse = z.record(z.string(), z.unknown()) + +export const zPatchConversationsByCIdPinPath = z.object({ + c_id: z.string(), +}) + +/** + * Conversation pinned successfully + */ +export const zPatchConversationsByCIdPinResponse = z.record(z.string(), z.unknown()) + +export const zPatchConversationsByCIdUnpinPath = z.object({ + c_id: z.string(), +}) + +/** + * Conversation unpinned successfully + */ +export const zPatchConversationsByCIdUnpinResponse = z.record(z.string(), z.unknown()) + +export const zPostEmailCodeLoginBody = zEmailCodeLoginSendPayload + +/** + * Email code sent successfully + */ +export const zPostEmailCodeLoginResponse = z.record(z.string(), z.unknown()) + +export const zPostEmailCodeLoginValidityBody = zEmailCodeLoginVerifyPayload + +/** + * Email code verified and login successful + */ +export const zPostEmailCodeLoginValidityResponse = z.record(z.string(), z.unknown()) + +/** + * File uploaded successfully + */ +export const zPostFilesUploadResponse = zFileResponse + +export const zPostForgotPasswordBody = zForgotPasswordSendPayload + +/** + * Password reset email sent successfully + */ +export const zPostForgotPasswordResponse = z.record(z.string(), z.unknown()) + +export const zPostForgotPasswordResetsBody = zForgotPasswordResetPayload + +/** + * Password reset successfully + */ +export const zPostForgotPasswordResetsResponse = z.record(z.string(), z.unknown()) + +export const zPostForgotPasswordValidityBody = zForgotPasswordCheckPayload + +/** + * Token is valid + */ +export const zPostForgotPasswordValidityResponse = z.record(z.string(), z.unknown()) + +export const zGetFormHumanInputByFormTokenPath = z.object({ + form_token: z.string(), +}) + +/** + * Success + */ +export const zGetFormHumanInputByFormTokenResponse = z.record(z.string(), z.unknown()) + +export const zPostFormHumanInputByFormTokenPath = z.object({ + form_token: z.string(), +}) + +/** + * Success + */ +export const zPostFormHumanInputByFormTokenResponse = z.record(z.string(), z.unknown()) + +export const zPostLoginBody = zLoginPayload + +/** + * Authentication successful + */ +export const zPostLoginResponse = z.record(z.string(), z.unknown()) + +/** + * Login status + */ +export const zGetLoginStatusResponse = z.record(z.string(), z.unknown()) + +/** + * Logout successful + */ +export const zPostLogoutResponse = z.record(z.string(), z.unknown()) + +export const zGetMessagesQuery = z.object({ + conversation_id: z.string(), + first_id: z.string().optional(), + limit: z.int().optional().default(20), +}) + +/** + * Success + */ +export const zGetMessagesResponse = z.record(z.string(), z.unknown()) + +export const zPostMessagesByMessageIdFeedbacksPath = z.object({ + message_id: z.string(), +}) + +export const zPostMessagesByMessageIdFeedbacksQuery = z.object({ + rating: z.enum(['like', 'dislike']).optional(), + content: z.string().optional(), +}) + +/** + * Feedback submitted successfully + */ +export const zPostMessagesByMessageIdFeedbacksResponse = z.record(z.string(), z.unknown()) + +export const zGetMessagesByMessageIdMoreLikeThisPath = z.object({ + message_id: z.string(), +}) + +export const zGetMessagesByMessageIdMoreLikeThisQuery = z.object({ + response_mode: z.enum(['blocking', 'streaming']), +}) + +/** + * Success + */ +export const zGetMessagesByMessageIdMoreLikeThisResponse = z.record(z.string(), z.unknown()) + +export const zGetMessagesByMessageIdSuggestedQuestionsPath = z.object({ + message_id: z.string(), +}) + +/** + * Success + */ +export const zGetMessagesByMessageIdSuggestedQuestionsResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zGetMetaResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zGetParametersResponse = z.record(z.string(), z.unknown()) + +/** + * Passport retrieved successfully + */ +export const zGetPassportResponse = z.record(z.string(), z.unknown()) + +/** + * Remote file uploaded successfully + */ +export const zPostRemoteFilesUploadResponse = zFileWithSignedUrl + +export const zGetRemoteFilesByUrlPath = z.object({ + url: z.string(), +}) + +/** + * Remote file information retrieved successfully + */ +export const zGetRemoteFilesByUrlResponse = zRemoteFileInfo + +export const zGetSavedMessagesQuery = z.object({ + last_id: z.string().optional(), + limit: z.int().optional().default(20), +}) + +/** + * Success + */ +export const zGetSavedMessagesResponse = z.record(z.string(), z.unknown()) + +export const zPostSavedMessagesQuery = z.object({ + message_id: z.string(), +}) + +/** + * Message saved successfully + */ +export const zPostSavedMessagesResponse = z.record(z.string(), z.unknown()) + +export const zDeleteSavedMessagesByMessageIdPath = z.object({ + message_id: z.string(), +}) + +/** + * Message removed successfully + */ +export const zDeleteSavedMessagesByMessageIdResponse = z.record(z.string(), z.unknown()) + +/** + * Success + */ +export const zGetSiteResponse = z.record(z.string(), z.unknown()) + +/** + * System features retrieved successfully + */ +export const zGetSystemFeaturesResponse = z.record(z.string(), z.unknown()) + +export const zPostTextToAudioBody = zTextToAudioPayload + +/** + * Success + */ +export const zPostTextToAudioResponse = z.record(z.string(), z.unknown()) + +export const zGetWebappAccessModeQuery = z.object({ + appId: z.string().optional(), + appCode: z.string().optional(), +}) + +/** + * Success + */ +export const zGetWebappAccessModeResponse = z.record(z.string(), z.unknown()) + +export const zGetWebappPermissionQuery = z.object({ + appId: z.string(), +}) + +/** + * Success + */ +export const zGetWebappPermissionResponse = z.record(z.string(), z.unknown()) + +export const zGetWorkflowByTaskIdEventsPath = z.object({ + task_id: z.string(), +}) + +/** + * Success + */ +export const zGetWorkflowByTaskIdEventsResponse = z.record(z.string(), z.unknown()) + +export const zPostWorkflowsRunBody = zWorkflowRunPayload + +/** + * Success + */ +export const zPostWorkflowsRunResponse = z.record(z.string(), z.unknown()) + +export const zPostWorkflowsTasksByTaskIdStopPath = z.object({ + task_id: z.string(), +}) + +/** + * Success + */ +export const zPostWorkflowsTasksByTaskIdStopResponse = z.record(z.string(), z.unknown()) diff --git a/packages/contracts/generated/enterprise/orpc.gen.ts b/packages/contracts/generated/enterprise/orpc.gen.ts new file mode 100644 index 0000000000..6b9b76470a --- /dev/null +++ b/packages/contracts/generated/enterprise/orpc.gen.ts @@ -0,0 +1,138 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import { oc } from '@orpc/contract' +import * as z from 'zod' + +import { + zConsoleSsoOAuth2LoginResponse, + zConsoleSsoOidcLoginResponse, + zConsoleSsoSamlLoginResponse, + zWebAppAuthGetGroupSubjectsQuery, + zWebAppAuthGetGroupSubjectsResponse, + zWebAppAuthGetWebAppAccessModeQuery, + zWebAppAuthGetWebAppAccessModeResponse, + zWebAppAuthGetWebAppWhitelistSubjectsQuery, + zWebAppAuthGetWebAppWhitelistSubjectsResponse, + zWebAppAuthIsUserAllowedToAccessWebAppQuery, + zWebAppAuthIsUserAllowedToAccessWebAppResponse, + zWebAppAuthSearchForWhilteListCandidatesQuery, + zWebAppAuthSearchForWhilteListCandidatesResponse, + zWebAppAuthUpdateWebAppWhitelistSubjectsBody, + zWebAppAuthUpdateWebAppWhitelistSubjectsResponse, +} from './zod.gen' + +export const oAuth2Login = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'ConsoleSSO_OAuth2Login', + path: '/enterprise/sso/oauth2/login', + tags: ['ConsoleSSO'], + }) + .output(zConsoleSsoOAuth2LoginResponse) + +export const oidcLogin = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'ConsoleSSO_OIDCLogin', + path: '/enterprise/sso/oidc/login', + tags: ['ConsoleSSO'], + }) + .output(zConsoleSsoOidcLoginResponse) + +export const samlLogin = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'ConsoleSSO_SAMLLogin', + path: '/enterprise/sso/saml/login', + tags: ['ConsoleSSO'], + }) + .output(zConsoleSsoSamlLoginResponse) + +export const consoleSso = { + oAuth2Login, + oidcLogin, + samlLogin, +} + +export const getWebAppAccessMode = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'WebAppAuth_GetWebAppAccessMode', + path: '/enterprise/webapp/app/access-mode', + tags: ['WebAppAuth'], + }) + .input(z.object({ query: zWebAppAuthGetWebAppAccessModeQuery.optional() })) + .output(zWebAppAuthGetWebAppAccessModeResponse) + +export const updateWebAppWhitelistSubjects = oc + .route({ + inputStructure: 'detailed', + method: 'POST', + operationId: 'WebAppAuth_UpdateWebAppWhitelistSubjects', + path: '/enterprise/webapp/app/access-mode', + tags: ['WebAppAuth'], + }) + .input(z.object({ body: zWebAppAuthUpdateWebAppWhitelistSubjectsBody })) + .output(zWebAppAuthUpdateWebAppWhitelistSubjectsResponse) + +export const searchForWhilteListCandidates = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'WebAppAuth_SearchForWhilteListCandidates', + path: '/enterprise/webapp/app/subject/search', + tags: ['WebAppAuth'], + }) + .input(z.object({ query: zWebAppAuthSearchForWhilteListCandidatesQuery.optional() })) + .output(zWebAppAuthSearchForWhilteListCandidatesResponse) + +export const getWebAppWhitelistSubjects = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'WebAppAuth_GetWebAppWhitelistSubjects', + path: '/enterprise/webapp/app/subjects', + tags: ['WebAppAuth'], + }) + .input(z.object({ query: zWebAppAuthGetWebAppWhitelistSubjectsQuery.optional() })) + .output(zWebAppAuthGetWebAppWhitelistSubjectsResponse) + +export const getGroupSubjects = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'WebAppAuth_GetGroupSubjects', + path: '/enterprise/webapp/group/subjects', + tags: ['WebAppAuth'], + }) + .input(z.object({ query: zWebAppAuthGetGroupSubjectsQuery.optional() })) + .output(zWebAppAuthGetGroupSubjectsResponse) + +export const isUserAllowedToAccessWebApp = oc + .route({ + inputStructure: 'detailed', + method: 'GET', + operationId: 'WebAppAuth_IsUserAllowedToAccessWebApp', + path: '/enterprise/webapp/permission', + tags: ['WebAppAuth'], + }) + .input(z.object({ query: zWebAppAuthIsUserAllowedToAccessWebAppQuery.optional() })) + .output(zWebAppAuthIsUserAllowedToAccessWebAppResponse) + +export const webAppAuth = { + getWebAppAccessMode, + updateWebAppWhitelistSubjects, + searchForWhilteListCandidates, + getWebAppWhitelistSubjects, + getGroupSubjects, + isUserAllowedToAccessWebApp, +} + +export const contract = { + consoleSso, + webAppAuth, +} diff --git a/packages/contracts/generated/enterprise/types.gen.ts b/packages/contracts/generated/enterprise/types.gen.ts new file mode 100644 index 0000000000..b747c4baa8 --- /dev/null +++ b/packages/contracts/generated/enterprise/types.gen.ts @@ -0,0 +1,1103 @@ +// This file is auto-generated by @hey-api/openapi-ts + +export type ClientOptions = { + baseUrl: `${string}://${string}` | (string & {}) +} + +export type Account = { + id?: string + email?: string + name?: string +} + +export type AccountDetail = { + account?: Account + status?: string + createdAt?: string + lastActiveAt?: string + workspaces?: Array + groups?: Array +} + +export type AccountDetailGroup = { + id?: string + name?: string +} + +export type AccountInWorkspace = { + workspaceId?: string + workspaceName?: string + role?: string +} + +export type AddGroupAppsRequest = { + id?: string + app_ids?: Array +} + +export type AuthSettingsReply = { + userSsoSettings?: SsoSettings + webSsoSettings?: SsoSettings + dashboardSsoSettings?: SsoSettings + userSsoSamlAcsUrl?: string + userSsoOidcCallbackUrl?: string + userSsoOauth2CallbackUrl?: string + webSsoSamlAcsUrl?: string + webSsoOidcCallbackUrl?: string + webSsoOauth2CallbackUrl?: string + webSsoMembersSamlAcsUrl?: string + webSsoMembersOidcCallbackUrl?: string + webSsoMembersOauth2CallbackUrl?: string + dashboardSsoSamlAcsUrl?: string + dashboardSsoOidcCallbackUrl?: string + dashboardSsoOauth2CallbackUrl?: string +} + +export type AuthSettingsReq = { + ssoType?: string + ssoSettings?: SsoSettings +} + +export type BrandingInfo = { + enabled?: boolean + applicationTitle?: string + loginPageLogo?: string + workspaceLogo?: string + favicon?: string +} + +export type CheckPasswordStatusReply = { + requirePasswordChange?: boolean + changeReason?: number + daysToExpire?: number + message?: string +} + +export type ClearDefaultWorkspaceReply = { + [key: string]: unknown +} + +export type CreateBearerTokenResponse = { + token?: string +} + +export type CreateMemberReply = { + id?: string + password?: string +} + +export type CreateMemberReq = { + name?: string + email?: string + status?: string +} + +export type CreateNewGroupsReq = { + groups?: Array +} + +export type CreateNewGroupsReqGroup = { + name?: string +} + +export type CreateNewGroupsRes = { + groups?: Array +} + +export type CreateResourceGroupRequest = { + name?: string + description?: string +} + +export type CreateSecretKeyReply = { + id?: string + name?: string + secretKey?: string + createdAt?: string + lastActive?: string +} + +export type CreateSecretKeyReq = { + name?: string +} + +export type CreateUserReply = { + id?: string + password?: string +} + +export type CreateUserReq = { + name?: string + email?: string + status?: string +} + +export type CreateWorkspaceReply = { + workspace?: Workspace +} + +export type CreateWorkspaceReq = { + name?: string + email?: string + status?: string +} + +export type CurrentUserReply = { + id?: string + name?: string + email?: string + interfaceLanguage?: string + timezone?: string +} + +export type DashboardSsooidcLoginReply = { + url?: string + state?: string +} + +export type DashboardSsoOauth2LoginReply = { + url?: string + state?: string +} + +export type DashboardSsosamlLoginReply = { + url?: string +} + +export type DeleteGroupsRes = { + message?: string +} + +export type DeleteMemberReply = { + account?: Account +} + +export type DeleteSecretKeyReply = { + message?: string +} + +export type DeleteUserReply = { + account?: Account +} + +export type DeleteWorkspaceReply = { + [key: string]: unknown +} + +export type EndpointReply = { + mode?: number + metricsEndpoint?: OtelExporterEndpoint + tracesEndpoint?: OtelExporterEndpoint +} + +export type EnterpriseSystemUserSettingReply = { + ssoEnforcedForSignin?: boolean + ssoEnforcedForSigninProtocol?: string + enableEmailPasswordLogin?: boolean +} + +export type GetBearerTokenResponse = { + maskedToken?: string +} + +export type GetClusterInfoReply = { + mode?: string + clusterId?: string + verifyMode?: string +} + +export type GetDefaultWorkspaceReply = { + workspaceId?: string + workspace?: Workspace +} + +export type GetGroupSubjectsRes = { + subjects?: Array +} + +export type GetGroupsRes = { + groups?: Array +} + +export type GetJoinedGroupsRes = { + groups?: Array +} + +export type GetLicenseReply = { + license?: LicenseInfo +} + +export type GetLicenseStatusReply = { + status?: string +} + +export type GetMfaInfoReply = { + userEnabled?: boolean + userSetup?: boolean + globalEnabled?: boolean +} + +export type GetMemberReply = { + account?: AccountDetail +} + +export type GetUserReply = { + account?: AccountDetail +} + +export type GetWebAppAccessModeRes = { + accessMode?: string +} + +export type GetWebAppAuthInfoRes = { + allowSso?: boolean + allowEmailCodeLogin?: boolean + allowEmailPasswordLogin?: boolean +} + +export type GetWebAppWhitelistSubjectsRes = { + groups?: Array + members?: Array +} + +export type GetWebAppWhitelistSubjectsResMember = { + id?: string + name?: string + email?: string + avatar?: string +} + +export type GetWorkspacePermissionReply = { + permission?: WorkspacePermission +} + +export type GetWorkspaceReply = { + workspace?: Workspace +} + +export type GroupAppItem = { + app_id?: string + app_name?: string + workspace_id?: string + workspace_name?: string + app_status?: number + token_usage?: string + rpm?: string + concurrency?: string +} + +export type HealthzReply = { + message?: string + status?: string +} + +export type InfoConfigReply = { + SSOEnforcedForSignin?: boolean + SSOEnforcedForSigninProtocol?: string + SSOEnforcedForWeb?: boolean + SSOEnforcedForWebProtocol?: string + EnableEmailCodeLogin?: boolean + EnableEmailPasswordLogin?: boolean + IsAllowRegister?: boolean + IsAllowCreateWorkspace?: boolean + License?: LicenseStatus + Branding?: BrandingInfo + WebAppAuth?: WebAppAuthInfo + PluginInstallationPermission?: PluginInstallationPermissionInfo +} + +export type InnerAdmission = { + marker?: string + concurrencyGroupIds?: Array +} + +export type InnerBatchGetWebAppAccessModesByIdReq = { + appIds?: Array +} + +export type InnerBatchGetWebAppAccessModesByIdRes = { + accessModes?: { + [key: string]: string + } +} + +export type InnerBatchIsUserAllowedToAccessWebAppReq = { + userId?: string + appIds?: Array +} + +export type InnerBatchIsUserAllowedToAccessWebAppRes = { + permissions?: { + [key: string]: boolean + } +} + +export type InnerCleanAppRes = { + message?: string +} + +export type InnerGetWebAppAccessModeByCodeRes = { + accessMode?: string +} + +export type InnerGetWebAppAccessModeByIdRes = { + accessMode?: string +} + +export type InnerGroupConfig = { + id?: string + enabled?: boolean + membershipId?: string + limits?: Array +} + +export type InnerIsUserAllowedToAccessWebAppRes = { + result?: boolean +} + +export type InnerReleaseAdmissionRequest = { + admission?: InnerAdmission +} + +export type InnerReleaseAdmissionResponse = { + [key: string]: unknown +} + +export type InnerResolveResponse = { + appId?: string + groups?: Array + blocked?: boolean + blockGroupId?: string + blockReason?: string + admission?: InnerAdmission +} + +export type InnerTryAddAccountToDefaultWorkspaceReply = { + workspaceId?: string + joined?: boolean + message?: string +} + +export type InnerTryAddAccountToDefaultWorkspaceReq = { + accountId?: string +} + +export type IsUserAllowedToAccessWebAppRes = { + result?: boolean +} + +export type JoinWorkspaceReply = { + message?: string +} + +export type JoinWorkspaceReq = { + id?: string + email?: string + role?: string +} + +export type LicenseInfo = { + uuid?: string + expiredAt?: string + clusterId?: string + product?: string + limits?: LimitFields +} + +export type LicenseStatus = { + status?: string + expiredAt?: string + workspaces?: ResourceQuota +} + +export type LimitConfig = { + type?: number + threshold?: string + action?: number + reached?: boolean +} + +export type LimitFields = { + workspaceMembers?: number + workspaces?: ResourceQuota +} + +export type ListGroupAppsResponse = { + items?: Array + total?: string +} + +export type ListMembersReply = { + data?: Array + pagination?: Pagination +} + +export type ListResourceGroupsResponse = { + items?: Array + total?: string +} + +export type ListSecretKeysReply = { + data?: Array + pagination?: Pagination +} + +export type ListUsersReply = { + data?: Array + pagination?: Pagination +} + +export type ListWorkspacesReply = { + data?: Array + pagination?: Pagination +} + +export type LoginTypesReply = { + enabledEmailCodeLogin?: boolean + enableEmailPasswordLogin?: boolean + isAllowRegister?: boolean + isAllowCreateWorkspace?: boolean +} + +export type LoginTypesReq = { + enabledEmailCodeLogin?: boolean + enableEmailPasswordLogin?: boolean + isAllowRegister?: boolean + isAllowCreateWorkspace?: boolean +} + +export type MfaBackupCodesRes = { + codes?: Array + validCounts?: number + createdAt?: string +} + +export type MfaDeleteBackupCodesRes = { + message?: string +} + +export type MfaDeleteRes = { + token?: string +} + +export type MfaDownloadBackupCodesSummaryRes = { + content?: string +} + +export type MfaEnrollReq = { + code?: string +} + +export type MfaEnrollRes = { + token?: string +} + +export type MfaGetEnrollInfoRes = { + qrCode?: string + secret?: string +} + +export type MfaModifyRes = { + message?: string +} + +export type OAuth2Config = { + clientId?: string + clientSecret?: string + authUrl?: string + tokenUrl?: string + userinfoUrl?: string + scopes?: string + enablePkce?: boolean +} + +export type OAuth2LoginReply = { + url?: string + state?: string +} + +export type OidcConfig = { + issuerUrl?: string + clientId?: string + clientSecret?: string + enablePkce?: boolean +} + +export type OidcReply = { + url?: string + state?: string +} + +export type OtelExporterEndpoint = { + endpoint?: string + compression?: string + protocol?: number + timeout?: string + headers?: { + [key: string]: string + } + tlsCaPem?: string + tlsInsecure?: boolean + tlsClientCertPem?: string + tlsClientKeyPem?: string + enabled?: boolean + tlsInsecureSkipVerify?: boolean +} + +export type OtelExporterStatusReply = { + connectedAt?: string + bytesPushed?: string + itemsInQueue?: string + logs?: string + status?: number +} + +export type PasswordPolicyConfig = { + minLength?: number + requireDigit?: boolean + requireLowercase?: boolean + requireUppercase?: boolean + requireSpecial?: boolean + forbidRepeated?: boolean + forbidSequential?: boolean + expiryEnabled?: boolean + expiryDays?: number +} + +export type PasswordStrengthReply = { + level?: number +} + +export type PasswordStrengthReq = { + password?: string +} + +export type PluginInstallationPermissionInfo = { + pluginInstallationScope?: string + restrictToMarketplaceOnly?: boolean +} + +export type PluginInstallationSettingsReply = { + pluginInstallationScope?: number + restrictToMarketplaceOnly?: boolean +} + +export type ResetMemberPasswordReply = { + id?: string + password?: string +} + +export type ResetMemberPasswordReq = { + id?: string +} + +export type ResetPasswordReply = { + message?: string +} + +export type ResetPasswordReq = { + currentPassword?: string + newPassword?: string + confirmPassword?: string +} + +export type ResetUserPasswordReply = { + id?: string + password?: string +} + +export type ResetUserPasswordReq = { + id?: string +} + +export type ResourceGroupDetail = { + id?: string + name?: string + description?: string + enabled?: boolean + rpm_limit?: number + rpm_action?: number + concurrency_limit?: number + concurrency_action?: number + token_quota?: string + token_action?: number + created_at?: string + updated_at?: string +} + +export type ResourceGroupItem = { + id?: string + name?: string + description?: string + enabled?: boolean + rpm_limit?: number + concurrency_limit?: number + token_quota?: string + token_usage?: string + app_count?: string + rpm_status?: number + conc_status?: number + created_at?: string + updated_at?: string +} + +export type ResourceQuota = { + used?: number + limit?: number + enabled?: boolean +} + +export type SamlConfig = { + idpSsoUrl?: string + certificate?: string +} + +export type SamlLoginReply = { + url?: string +} + +export type SsoIdPProvider = { + protocol?: string + provider?: string + samlConfig?: SamlConfig + oidcConfig?: OidcConfig + oauth2Config?: OAuth2Config +} + +export type SsoSettings = { + ssoEnforced?: boolean + sessionTimeout?: number + ssoIdpProvider?: SsoIdPProvider +} + +export type SsoSettingsReply = { + enabled?: boolean +} + +export type ScimSettings = { + enabled?: boolean + lastSyncTime?: string +} + +export type SearchAppItem = { + app_id?: string + app_name?: string + workspace_id?: string + workspace_name?: string + app_status?: number + icon?: string + icon_type?: string + icon_background?: string + created_by_name?: string +} + +export type SearchAppsResponse = { + items?: Array + total?: string +} + +export type SearchForWhilteListCandidatesRes = { + subjects?: Array + currPage?: number + hasMore?: boolean +} + +export type SecretKey = { + id?: string + name?: string + secretKeyMasked?: string + createdAt?: string + lastActive?: string +} + +export type SetDefaultWorkspaceReply = { + workspaceId?: string +} + +export type SetDefaultWorkspaceReq = { + id?: string +} + +export type Subject = { + subjectId?: string + subjectType?: string + accountData?: SubjectAccountData + groupData?: SubjectGroupData +} + +export type SubjectAccountData = { + id?: string + name?: string + email?: string + avatar?: string +} + +export type SubjectGroupData = { + id?: string + name?: string + groupSize?: number +} + +export type SystemUserSettingReply = { + isAllowRegister?: boolean + enableEmailPasswordLogin?: boolean +} + +export type SystemUserSettingReq = { + isAllowRegister?: boolean + enableEmailPasswordLogin?: boolean +} + +export type TestConnectionReply = { + success?: boolean + error?: string +} + +export type ToggleEndpointRequest = { + enabled?: boolean +} + +export type UpdateAccessModeReq = { + appId?: string + accessMode?: string +} + +export type UpdateAccessModeRes = { + message?: string +} + +export type UpdateBrandingInfoReq = { + enabled?: boolean + applicationTitle?: string + loginPageLogo?: string + workspaceLogo?: string + favicon?: string +} + +export type UpdateGroupSubjectsReq = { + groupId?: string + subjects?: Array +} + +export type UpdateGroupSubjectsRes = { + message?: string +} + +export type UpdateGroupsReq = { + groups?: Array +} + +export type UpdateGroupsReqGroup = { + id?: string + name?: string +} + +export type UpdateGroupsRes = { + groups?: Array +} + +export type UpdateJoinedGroupsReq = { + accountId?: string + groupIds?: Array +} + +export type UpdateJoinedGroupsRes = { + message?: string +} + +export type UpdateLicenseReply = { + message?: string +} + +export type UpdateLicenseReq = { + licenseId?: string +} + +export type UpdateMfaStatusReq = { + enabled?: boolean +} + +export type UpdateMfaStatusRes = { + message?: string +} + +export type UpdateMemberReply = { + account?: Account +} + +export type UpdateMemberReq = { + id?: string + name?: string + email?: string + status?: string +} + +export type UpdateMembersInGroupsReq = { + groupId?: string + accountIds?: Array +} + +export type UpdateMembersInGroupsRes = { + message?: string +} + +export type UpdateOfflineLicenseReply = { + message?: string +} + +export type UpdateOfflineLicenseReq = { + offlineCode?: string +} + +export type UpdatePluginInstallationSettingsRequest = { + pluginInstallationScope?: number + restrictToMarketplaceOnly?: boolean +} + +export type UpdateResourceGroupRequest = { + id?: string + name?: string + description?: string + enabled?: boolean + rpm_limit?: number + rpm_action?: number + concurrency_limit?: number + concurrency_action?: number + token_quota?: string + token_action?: number +} + +export type UpdateUserReply = { + account?: AccountDetail +} + +export type UpdateUserReq = { + id?: string + name?: string + email?: string + status?: string +} + +export type UpdateWebAppAuthInfoReq = { + allowSso?: boolean + allowEmailCodeLogin?: boolean + allowEmailPasswordLogin?: boolean +} + +export type UpdateWebAppAuthInfoRes = { + message?: string +} + +export type UpdateWebAppWhitelistSubjectsReq = { + appId?: string + subjects?: Array + accessMode?: string +} + +export type UpdateWebAppWhitelistSubjectsRes = { + message?: string +} + +export type UpdateWorkspacePermissionReply = { + message?: string + permission?: WorkspacePermission +} + +export type UpdateWorkspacePermissionReq = { + id?: string + permission?: WorkspacePermission +} + +export type UpdateWorkspaceReply = { + workspace?: Workspace +} + +export type UpdateWorkspaceReq = { + id?: string + name?: string + email?: string + status?: string +} + +export type WebAppAuthInfo = { + allowSso?: boolean + allowEmailCodeLogin?: boolean + allowEmailPasswordLogin?: boolean +} + +export type WebOAuth2LoginReply = { + url?: string + state?: string +} + +export type WebOidcLoginReply = { + url?: string +} + +export type WebSamlLoginReply = { + url?: string +} + +export type Workspace = { + id?: string + name?: string + status?: string + createdAt?: string + owner?: Account +} + +export type WorkspaceInfoReply = { + WorkspaceMembers?: ResourceQuota +} + +export type WorkspacePermission = { + workspaceId?: string + allowMemberInvite?: boolean + allowOwnerTransfer?: boolean +} + +export type Pagination = { + totalCount?: number + perPage?: number + currentPage?: number + totalPages?: number +} + +export type ConsoleSsoOAuth2LoginData = { + body?: never + path?: never + query?: never + url: '/enterprise/sso/oauth2/login' +} + +export type ConsoleSsoOAuth2LoginResponses = { + 200: OAuth2LoginReply +} + +export type ConsoleSsoOAuth2LoginResponse + = ConsoleSsoOAuth2LoginResponses[keyof ConsoleSsoOAuth2LoginResponses] + +export type ConsoleSsoOidcLoginData = { + body?: never + path?: never + query?: never + url: '/enterprise/sso/oidc/login' +} + +export type ConsoleSsoOidcLoginResponses = { + 200: OidcReply +} + +export type ConsoleSsoOidcLoginResponse + = ConsoleSsoOidcLoginResponses[keyof ConsoleSsoOidcLoginResponses] + +export type ConsoleSsoSamlLoginData = { + body?: never + path?: never + query?: never + url: '/enterprise/sso/saml/login' +} + +export type ConsoleSsoSamlLoginResponses = { + 200: SamlLoginReply +} + +export type ConsoleSsoSamlLoginResponse + = ConsoleSsoSamlLoginResponses[keyof ConsoleSsoSamlLoginResponses] + +export type WebAppAuthGetWebAppAccessModeData = { + body?: never + path?: never + query?: { + appId?: string + } + url: '/enterprise/webapp/app/access-mode' +} + +export type WebAppAuthGetWebAppAccessModeResponses = { + 200: GetWebAppAccessModeRes +} + +export type WebAppAuthGetWebAppAccessModeResponse + = WebAppAuthGetWebAppAccessModeResponses[keyof WebAppAuthGetWebAppAccessModeResponses] + +export type WebAppAuthUpdateWebAppWhitelistSubjectsData = { + body: UpdateWebAppWhitelistSubjectsReq + path?: never + query?: never + url: '/enterprise/webapp/app/access-mode' +} + +export type WebAppAuthUpdateWebAppWhitelistSubjectsResponses = { + 200: UpdateWebAppWhitelistSubjectsRes +} + +export type WebAppAuthUpdateWebAppWhitelistSubjectsResponse + = WebAppAuthUpdateWebAppWhitelistSubjectsResponses[keyof WebAppAuthUpdateWebAppWhitelistSubjectsResponses] + +export type WebAppAuthSearchForWhilteListCandidatesData = { + body?: never + path?: never + query?: { + keyword?: string + pageNumber?: number + resultsPerPage?: number + groupId?: string + } + url: '/enterprise/webapp/app/subject/search' +} + +export type WebAppAuthSearchForWhilteListCandidatesResponses = { + 200: SearchForWhilteListCandidatesRes +} + +export type WebAppAuthSearchForWhilteListCandidatesResponse + = WebAppAuthSearchForWhilteListCandidatesResponses[keyof WebAppAuthSearchForWhilteListCandidatesResponses] + +export type WebAppAuthGetWebAppWhitelistSubjectsData = { + body?: never + path?: never + query?: { + appId?: string + } + url: '/enterprise/webapp/app/subjects' +} + +export type WebAppAuthGetWebAppWhitelistSubjectsResponses = { + 200: GetWebAppWhitelistSubjectsRes +} + +export type WebAppAuthGetWebAppWhitelistSubjectsResponse + = WebAppAuthGetWebAppWhitelistSubjectsResponses[keyof WebAppAuthGetWebAppWhitelistSubjectsResponses] + +export type WebAppAuthGetGroupSubjectsData = { + body?: never + path?: never + query?: { + groupId?: string + } + url: '/enterprise/webapp/group/subjects' +} + +export type WebAppAuthGetGroupSubjectsResponses = { + 200: GetGroupSubjectsRes +} + +export type WebAppAuthGetGroupSubjectsResponse + = WebAppAuthGetGroupSubjectsResponses[keyof WebAppAuthGetGroupSubjectsResponses] + +export type WebAppAuthIsUserAllowedToAccessWebAppData = { + body?: never + path?: never + query?: { + appId?: string + } + url: '/enterprise/webapp/permission' +} + +export type WebAppAuthIsUserAllowedToAccessWebAppResponses = { + 200: IsUserAllowedToAccessWebAppRes +} + +export type WebAppAuthIsUserAllowedToAccessWebAppResponse + = WebAppAuthIsUserAllowedToAccessWebAppResponses[keyof WebAppAuthIsUserAllowedToAccessWebAppResponses] diff --git a/packages/contracts/generated/enterprise/zod.gen.ts b/packages/contracts/generated/enterprise/zod.gen.ts new file mode 100644 index 0000000000..cef500a906 --- /dev/null +++ b/packages/contracts/generated/enterprise/zod.gen.ts @@ -0,0 +1,1183 @@ +// This file is auto-generated by @hey-api/openapi-ts + +import * as z from 'zod' + +/** + * Account represents a basic user account + */ +export const zAccount = z.object({ + id: z.string().optional(), + email: z.string().optional(), + name: z.string().optional(), +}) + +export const zAccountDetailGroup = z.object({ + id: z.string().optional(), + name: z.string().optional(), +}) + +/** + * AccountInWorkspace represents account's role in a workspace + */ +export const zAccountInWorkspace = z.object({ + workspaceId: z.string().optional(), + workspaceName: z.string().optional(), + role: z.string().optional(), +}) + +/** + * AccountDetail contains detailed account information + */ +export const zAccountDetail = z.object({ + account: zAccount.optional(), + status: z.string().optional(), + createdAt: z.iso.datetime().optional(), + lastActiveAt: z.iso.datetime().optional(), + workspaces: z.array(zAccountInWorkspace).optional(), + groups: z.array(zAccountDetailGroup).optional(), +}) + +export const zAddGroupAppsRequest = z.object({ + id: z.string().optional(), + app_ids: z.array(z.string()).optional(), +}) + +export const zBrandingInfo = z.object({ + enabled: z.boolean().optional(), + applicationTitle: z.string().optional(), + loginPageLogo: z.string().optional(), + workspaceLogo: z.string().optional(), + favicon: z.string().optional(), +}) + +export const zCheckPasswordStatusReply = z.object({ + requirePasswordChange: z.boolean().optional(), + changeReason: z.int().optional(), + daysToExpire: z + .int() + .min(-2147483648, { error: 'Invalid value: Expected int32 to be >= -2147483648' }) + .max(2147483647, { error: 'Invalid value: Expected int32 to be <= 2147483647' }) + .optional(), + message: z.string().optional(), +}) + +export const zClearDefaultWorkspaceReply = z.record(z.string(), z.unknown()) + +export const zCreateBearerTokenResponse = z.object({ + token: z.string().optional(), +}) + +export const zCreateMemberReply = z.object({ + id: z.string().optional(), + password: z.string().optional(), +}) + +/** + * Create member messages + */ +export const zCreateMemberReq = z.object({ + name: z.string().optional(), + email: z.string().optional(), + status: z.string().optional(), +}) + +export const zCreateNewGroupsReqGroup = z.object({ + name: z.string().optional(), +}) + +export const zCreateNewGroupsReq = z.object({ + groups: z.array(zCreateNewGroupsReqGroup).optional(), +}) + +export const zCreateResourceGroupRequest = z.object({ + name: z.string().optional(), + description: z.string().optional(), +}) + +export const zCreateSecretKeyReply = z.object({ + id: z.string().optional(), + name: z.string().optional(), + secretKey: z.string().optional(), + createdAt: z.iso.datetime().optional(), + lastActive: z.iso.datetime().optional(), +}) + +export const zCreateSecretKeyReq = z.object({ + name: z.string().optional(), +}) + +export const zCreateUserReply = z.object({ + id: z.string().optional(), + password: z.string().optional(), +}) + +export const zCreateUserReq = z.object({ + name: z.string().optional(), + email: z.string().optional(), + status: z.string().optional(), +}) + +/** + * Create workspace messages + */ +export const zCreateWorkspaceReq = z.object({ + name: z.string().optional(), + email: z.string().optional(), + status: z.string().optional(), +}) + +export const zCurrentUserReply = z.object({ + id: z.string().optional(), + name: z.string().optional(), + email: z.string().optional(), + interfaceLanguage: z.string().optional(), + timezone: z.string().optional(), +}) + +export const zDashboardSsooidcLoginReply = z.object({ + url: z.string().optional(), + state: z.string().optional(), +}) + +export const zDashboardSsoOauth2LoginReply = z.object({ + url: z.string().optional(), + state: z.string().optional(), +}) + +/** + * Dashboard SSO Login messages + */ +export const zDashboardSsosamlLoginReply = z.object({ + url: z.string().optional(), +}) + +export const zDeleteGroupsRes = z.object({ + message: z.string().optional(), +}) + +export const zDeleteMemberReply = z.object({ + account: zAccount.optional(), +}) + +export const zDeleteSecretKeyReply = z.object({ + message: z.string().optional(), +}) + +export const zDeleteUserReply = z.object({ + account: zAccount.optional(), +}) + +export const zDeleteWorkspaceReply = z.record(z.string(), z.unknown()) + +/** + * System user setting messages + */ +export const zEnterpriseSystemUserSettingReply = z.object({ + ssoEnforcedForSignin: z.boolean().optional(), + ssoEnforcedForSigninProtocol: z.string().optional(), + enableEmailPasswordLogin: z.boolean().optional(), +}) + +export const zGetBearerTokenResponse = z.object({ + maskedToken: z.string().optional(), +}) + +export const zGetClusterInfoReply = z.object({ + mode: z.string().optional(), + clusterId: z.string().optional(), + verifyMode: z.string().optional(), +}) + +export const zGetLicenseStatusReply = z.object({ + status: z.string().optional(), +}) + +export const zGetMfaInfoReply = z.object({ + userEnabled: z.boolean().optional(), + userSetup: z.boolean().optional(), + globalEnabled: z.boolean().optional(), +}) + +export const zGetMemberReply = z.object({ + account: zAccountDetail.optional(), +}) + +export const zGetUserReply = z.object({ + account: zAccountDetail.optional(), +}) + +export const zGetWebAppAccessModeRes = z.object({ + accessMode: z.string().optional(), +}) + +export const zGetWebAppAuthInfoRes = z.object({ + allowSso: z.boolean().optional(), + allowEmailCodeLogin: z.boolean().optional(), + allowEmailPasswordLogin: z.boolean().optional(), +}) + +export const zGetWebAppWhitelistSubjectsResMember = z.object({ + id: z.string().optional(), + name: z.string().optional(), + email: z.string().optional(), + avatar: z.string().optional(), +}) + +export const zGroupAppItem = z.object({ + app_id: z.string().optional(), + app_name: z.string().optional(), + workspace_id: z.string().optional(), + workspace_name: z.string().optional(), + app_status: z.int().optional(), + token_usage: z.string().optional(), + rpm: z.string().optional(), + concurrency: z.string().optional(), +}) + +export const zHealthzReply = z.object({ + message: z.string().optional(), + status: z.string().optional(), +}) + +export const zInnerAdmission = z.object({ + marker: z.string().optional(), + concurrencyGroupIds: z.array(z.string()).optional(), +}) + +export const zInnerBatchGetWebAppAccessModesByIdReq = z.object({ + appIds: z.array(z.string()).optional(), +}) + +export const zInnerBatchGetWebAppAccessModesByIdRes = z.object({ + accessModes: z.record(z.string(), z.string()).optional(), +}) + +export const zInnerBatchIsUserAllowedToAccessWebAppReq = z.object({ + userId: z.string().optional(), + appIds: z.array(z.string()).optional(), +}) + +export const zInnerBatchIsUserAllowedToAccessWebAppRes = z.object({ + permissions: z.record(z.string(), z.boolean()).optional(), +}) + +export const zInnerCleanAppRes = z.object({ + message: z.string().optional(), +}) + +export const zInnerGetWebAppAccessModeByCodeRes = z.object({ + accessMode: z.string().optional(), +}) + +export const zInnerGetWebAppAccessModeByIdRes = z.object({ + accessMode: z.string().optional(), +}) + +export const zInnerIsUserAllowedToAccessWebAppRes = z.object({ + result: z.boolean().optional(), +}) + +export const zInnerReleaseAdmissionRequest = z.object({ + admission: zInnerAdmission.optional(), +}) + +export const zInnerReleaseAdmissionResponse = z.record(z.string(), z.unknown()) + +export const zInnerTryAddAccountToDefaultWorkspaceReply = z.object({ + workspaceId: z.string().optional(), + joined: z.boolean().optional(), + message: z.string().optional(), +}) + +/** + * Inner API messages + */ +export const zInnerTryAddAccountToDefaultWorkspaceReq = z.object({ + accountId: z.string().optional(), +}) + +export const zIsUserAllowedToAccessWebAppRes = z.object({ + result: z.boolean().optional(), +}) + +export const zJoinWorkspaceReply = z.object({ + message: z.string().optional(), +}) + +/** + * Join workspace messages + */ +export const zJoinWorkspaceReq = z.object({ + id: z.string().optional(), + email: z.string().optional(), + role: z.string().optional(), +}) + +export const zLimitConfig = z.object({ + type: z.int().optional(), + threshold: z.string().optional(), + action: z.int().optional(), + reached: z.boolean().optional(), +}) + +export const zInnerGroupConfig = z.object({ + id: z.string().optional(), + enabled: z.boolean().optional(), + membershipId: z.string().optional(), + limits: z.array(zLimitConfig).optional(), +}) + +export const zInnerResolveResponse = z.object({ + appId: z.string().optional(), + groups: z.array(zInnerGroupConfig).optional(), + blocked: z.boolean().optional(), + blockGroupId: z.string().optional(), + blockReason: z.string().optional(), + admission: zInnerAdmission.optional(), +}) + +export const zListGroupAppsResponse = z.object({ + items: z.array(zGroupAppItem).optional(), + total: z.string().optional(), +}) + +export const zLoginTypesReply = z.object({ + enabledEmailCodeLogin: z.boolean().optional(), + enableEmailPasswordLogin: z.boolean().optional(), + isAllowRegister: z.boolean().optional(), + isAllowCreateWorkspace: z.boolean().optional(), +}) + +export const zLoginTypesReq = z.object({ + enabledEmailCodeLogin: z.boolean().optional(), + enableEmailPasswordLogin: z.boolean().optional(), + isAllowRegister: z.boolean().optional(), + isAllowCreateWorkspace: z.boolean().optional(), +}) + +export const zMfaBackupCodesRes = z.object({ + codes: z.array(z.string()).optional(), + validCounts: z + .int() + .min(-2147483648, { error: 'Invalid value: Expected int32 to be >= -2147483648' }) + .max(2147483647, { error: 'Invalid value: Expected int32 to be <= 2147483647' }) + .optional(), + createdAt: z.iso.datetime().optional(), +}) + +export const zMfaDeleteBackupCodesRes = z.object({ + message: z.string().optional(), +}) + +export const zMfaDeleteRes = z.object({ + token: z.string().optional(), +}) + +export const zMfaDownloadBackupCodesSummaryRes = z.object({ + content: z.string().optional(), +}) + +export const zMfaEnrollReq = z.object({ + code: z.string().optional(), +}) + +export const zMfaEnrollRes = z.object({ + token: z.string().optional(), +}) + +export const zMfaGetEnrollInfoRes = z.object({ + qrCode: z.string().optional(), + secret: z.string().optional(), +}) + +export const zMfaModifyRes = z.object({ + message: z.string().optional(), +}) + +export const zOAuth2Config = z.object({ + clientId: z.string().optional(), + clientSecret: z.string().optional(), + authUrl: z.string().optional(), + tokenUrl: z.string().optional(), + userinfoUrl: z.string().optional(), + scopes: z.string().optional(), + enablePkce: z.boolean().optional(), +}) + +export const zOAuth2LoginReply = z.object({ + url: z.string().optional(), + state: z.string().optional(), +}) + +export const zOidcConfig = z.object({ + issuerUrl: z.string().optional(), + clientId: z.string().optional(), + clientSecret: z.string().optional(), + enablePkce: z.boolean().optional(), +}) + +export const zOidcReply = z.object({ + url: z.string().optional(), + state: z.string().optional(), +}) + +export const zOtelExporterEndpoint = z.object({ + endpoint: z.string().optional(), + compression: z.string().optional(), + protocol: z.int().optional(), + timeout: z + .string() + .regex(/^-?(?:0|[1-9]\d{0,11})(?:\.\d{1,9})?s$/) + .optional(), + headers: z.record(z.string(), z.string()).optional(), + tlsCaPem: z.string().optional(), + tlsInsecure: z.boolean().optional(), + tlsClientCertPem: z.string().optional(), + tlsClientKeyPem: z.string().optional(), + enabled: z.boolean().optional(), + tlsInsecureSkipVerify: z.boolean().optional(), +}) + +export const zEndpointReply = z.object({ + mode: z.int().optional(), + metricsEndpoint: zOtelExporterEndpoint.optional(), + tracesEndpoint: zOtelExporterEndpoint.optional(), +}) + +export const zOtelExporterStatusReply = z.object({ + connectedAt: z.iso.datetime().optional(), + bytesPushed: z.string().optional(), + itemsInQueue: z.string().optional(), + logs: z.string().optional(), + status: z.int().optional(), +}) + +export const zPasswordPolicyConfig = z.object({ + minLength: z + .int() + .min(0, { error: 'Invalid value: Expected uint32 to be >= 0' }) + .max(4294967295, { error: 'Invalid value: Expected uint32 to be <= 4294967295' }) + .optional(), + requireDigit: z.boolean().optional(), + requireLowercase: z.boolean().optional(), + requireUppercase: z.boolean().optional(), + requireSpecial: z.boolean().optional(), + forbidRepeated: z.boolean().optional(), + forbidSequential: z.boolean().optional(), + expiryEnabled: z.boolean().optional(), + expiryDays: z + .int() + .min(0, { error: 'Invalid value: Expected uint32 to be >= 0' }) + .max(4294967295, { error: 'Invalid value: Expected uint32 to be <= 4294967295' }) + .optional(), +}) + +export const zPasswordStrengthReply = z.object({ + level: z.int().optional(), +}) + +export const zPasswordStrengthReq = z.object({ + password: z.string().optional(), +}) + +export const zPluginInstallationPermissionInfo = z.object({ + pluginInstallationScope: z.string().optional(), + restrictToMarketplaceOnly: z.boolean().optional(), +}) + +export const zPluginInstallationSettingsReply = z.object({ + pluginInstallationScope: z.int().optional(), + restrictToMarketplaceOnly: z.boolean().optional(), +}) + +export const zResetMemberPasswordReply = z.object({ + id: z.string().optional(), + password: z.string().optional(), +}) + +/** + * Reset member password messages + */ +export const zResetMemberPasswordReq = z.object({ + id: z.string().optional(), +}) + +export const zResetPasswordReply = z.object({ + message: z.string().optional(), +}) + +/** + * Password reset messages + */ +export const zResetPasswordReq = z.object({ + currentPassword: z.string().optional(), + newPassword: z.string().optional(), + confirmPassword: z.string().optional(), +}) + +export const zResetUserPasswordReply = z.object({ + id: z.string().optional(), + password: z.string().optional(), +}) + +export const zResetUserPasswordReq = z.object({ + id: z.string().optional(), +}) + +export const zResourceGroupDetail = z.object({ + id: z.string().optional(), + name: z.string().optional(), + description: z.string().optional(), + enabled: z.boolean().optional(), + rpm_limit: z + .int() + .min(-2147483648, { error: 'Invalid value: Expected int32 to be >= -2147483648' }) + .max(2147483647, { error: 'Invalid value: Expected int32 to be <= 2147483647' }) + .optional(), + rpm_action: z.int().optional(), + concurrency_limit: z + .int() + .min(-2147483648, { error: 'Invalid value: Expected int32 to be >= -2147483648' }) + .max(2147483647, { error: 'Invalid value: Expected int32 to be <= 2147483647' }) + .optional(), + concurrency_action: z.int().optional(), + token_quota: z.string().optional(), + token_action: z.int().optional(), + created_at: z.string().optional(), + updated_at: z.string().optional(), +}) + +export const zResourceGroupItem = z.object({ + id: z.string().optional(), + name: z.string().optional(), + description: z.string().optional(), + enabled: z.boolean().optional(), + rpm_limit: z + .int() + .min(-2147483648, { error: 'Invalid value: Expected int32 to be >= -2147483648' }) + .max(2147483647, { error: 'Invalid value: Expected int32 to be <= 2147483647' }) + .optional(), + concurrency_limit: z + .int() + .min(-2147483648, { error: 'Invalid value: Expected int32 to be >= -2147483648' }) + .max(2147483647, { error: 'Invalid value: Expected int32 to be <= 2147483647' }) + .optional(), + token_quota: z.string().optional(), + token_usage: z.string().optional(), + app_count: z.string().optional(), + rpm_status: z.int().optional(), + conc_status: z.int().optional(), + created_at: z.string().optional(), + updated_at: z.string().optional(), +}) + +export const zListResourceGroupsResponse = z.object({ + items: z.array(zResourceGroupItem).optional(), + total: z.string().optional(), +}) + +/** + * ResourceQuota represents usage quota for a resource + */ +export const zResourceQuota = z.object({ + used: z + .int() + .min(-2147483648, { error: 'Invalid value: Expected int32 to be >= -2147483648' }) + .max(2147483647, { error: 'Invalid value: Expected int32 to be <= 2147483647' }) + .optional(), + limit: z + .int() + .min(-2147483648, { error: 'Invalid value: Expected int32 to be >= -2147483648' }) + .max(2147483647, { error: 'Invalid value: Expected int32 to be <= 2147483647' }) + .optional(), + enabled: z.boolean().optional(), +}) + +export const zLicenseStatus = z.object({ + status: z.string().optional(), + expiredAt: z.string().optional(), + workspaces: zResourceQuota.optional(), +}) + +export const zLimitFields = z.object({ + workspaceMembers: z + .int() + .min(-2147483648, { error: 'Invalid value: Expected int32 to be >= -2147483648' }) + .max(2147483647, { error: 'Invalid value: Expected int32 to be <= 2147483647' }) + .optional(), + workspaces: zResourceQuota.optional(), +}) + +/** + * License information + */ +export const zLicenseInfo = z.object({ + uuid: z.string().optional(), + expiredAt: z.iso.datetime().optional(), + clusterId: z.string().optional(), + product: z.string().optional(), + limits: zLimitFields.optional(), +}) + +/** + * License RPC messages + */ +export const zGetLicenseReply = z.object({ + license: zLicenseInfo.optional(), +}) + +/** + * SSO Configuration messages + */ +export const zSamlConfig = z.object({ + idpSsoUrl: z.string().optional(), + certificate: z.string().optional(), +}) + +export const zSamlLoginReply = z.object({ + url: z.string().optional(), +}) + +export const zSsoIdPProvider = z.object({ + protocol: z.string().optional(), + provider: z.string().optional(), + samlConfig: zSamlConfig.optional(), + oidcConfig: zOidcConfig.optional(), + oauth2Config: zOAuth2Config.optional(), +}) + +export const zSsoSettings = z.object({ + ssoEnforced: z.boolean().optional(), + sessionTimeout: z + .int() + .min(-2147483648, { error: 'Invalid value: Expected int32 to be >= -2147483648' }) + .max(2147483647, { error: 'Invalid value: Expected int32 to be <= 2147483647' }) + .optional(), + ssoIdpProvider: zSsoIdPProvider.optional(), +}) + +export const zAuthSettingsReply = z.object({ + userSsoSettings: zSsoSettings.optional(), + webSsoSettings: zSsoSettings.optional(), + dashboardSsoSettings: zSsoSettings.optional(), + userSsoSamlAcsUrl: z.string().optional(), + userSsoOidcCallbackUrl: z.string().optional(), + userSsoOauth2CallbackUrl: z.string().optional(), + webSsoSamlAcsUrl: z.string().optional(), + webSsoOidcCallbackUrl: z.string().optional(), + webSsoOauth2CallbackUrl: z.string().optional(), + webSsoMembersSamlAcsUrl: z.string().optional(), + webSsoMembersOidcCallbackUrl: z.string().optional(), + webSsoMembersOauth2CallbackUrl: z.string().optional(), + dashboardSsoSamlAcsUrl: z.string().optional(), + dashboardSsoOidcCallbackUrl: z.string().optional(), + dashboardSsoOauth2CallbackUrl: z.string().optional(), +}) + +export const zAuthSettingsReq = z.object({ + ssoType: z.string().optional(), + ssoSettings: zSsoSettings.optional(), +}) + +export const zSsoSettingsReply = z.object({ + enabled: z.boolean().optional(), +}) + +export const zScimSettings = z.object({ + enabled: z.boolean().optional(), + lastSyncTime: z.iso.datetime().optional(), +}) + +export const zSearchAppItem = z.object({ + app_id: z.string().optional(), + app_name: z.string().optional(), + workspace_id: z.string().optional(), + workspace_name: z.string().optional(), + app_status: z.int().optional(), + icon: z.string().optional(), + icon_type: z.string().optional(), + icon_background: z.string().optional(), + created_by_name: z.string().optional(), +}) + +export const zSearchAppsResponse = z.object({ + items: z.array(zSearchAppItem).optional(), + total: z.string().optional(), +}) + +export const zSecretKey = z.object({ + id: z.string().optional(), + name: z.string().optional(), + secretKeyMasked: z.string().optional(), + createdAt: z.iso.datetime().optional(), + lastActive: z.iso.datetime().optional(), +}) + +export const zSetDefaultWorkspaceReply = z.object({ + workspaceId: z.string().optional(), +}) + +export const zSetDefaultWorkspaceReq = z.object({ + id: z.string().optional(), +}) + +export const zSubjectAccountData = z.object({ + id: z.string().optional(), + name: z.string().optional(), + email: z.string().optional(), + avatar: z.string().optional(), +}) + +export const zSubjectGroupData = z.object({ + id: z.string().optional(), + name: z.string().optional(), + groupSize: z + .int() + .min(-2147483648, { error: 'Invalid value: Expected int32 to be >= -2147483648' }) + .max(2147483647, { error: 'Invalid value: Expected int32 to be <= 2147483647' }) + .optional(), +}) + +export const zCreateNewGroupsRes = z.object({ + groups: z.array(zSubjectGroupData).optional(), +}) + +export const zGetGroupsRes = z.object({ + groups: z.array(zSubjectGroupData).optional(), +}) + +export const zGetJoinedGroupsRes = z.object({ + groups: z.array(zSubjectGroupData).optional(), +}) + +export const zGetWebAppWhitelistSubjectsRes = z.object({ + groups: z.array(zSubjectGroupData).optional(), + members: z.array(zGetWebAppWhitelistSubjectsResMember).optional(), +}) + +/** + * Subject represents a subject (user or group) in access control + */ +export const zSubject = z.object({ + subjectId: z.string().optional(), + subjectType: z.string().optional(), + accountData: zSubjectAccountData.optional(), + groupData: zSubjectGroupData.optional(), +}) + +export const zGetGroupSubjectsRes = z.object({ + subjects: z.array(zSubject).optional(), +}) + +export const zSearchForWhilteListCandidatesRes = z.object({ + subjects: z.array(zSubject).optional(), + currPage: z + .int() + .min(-2147483648, { error: 'Invalid value: Expected int32 to be >= -2147483648' }) + .max(2147483647, { error: 'Invalid value: Expected int32 to be <= 2147483647' }) + .optional(), + hasMore: z.boolean().optional(), +}) + +export const zSystemUserSettingReply = z.object({ + isAllowRegister: z.boolean().optional(), + enableEmailPasswordLogin: z.boolean().optional(), +}) + +export const zSystemUserSettingReq = z.object({ + isAllowRegister: z.boolean().optional(), + enableEmailPasswordLogin: z.boolean().optional(), +}) + +export const zTestConnectionReply = z.object({ + success: z.boolean().optional(), + error: z.string().optional(), +}) + +export const zToggleEndpointRequest = z.object({ + enabled: z.boolean().optional(), +}) + +export const zUpdateAccessModeReq = z.object({ + appId: z.string().optional(), + accessMode: z.string().optional(), +}) + +export const zUpdateAccessModeRes = z.object({ + message: z.string().optional(), +}) + +export const zUpdateBrandingInfoReq = z.object({ + enabled: z.boolean().optional(), + applicationTitle: z.string().optional(), + loginPageLogo: z.string().optional(), + workspaceLogo: z.string().optional(), + favicon: z.string().optional(), +}) + +export const zUpdateGroupSubjectsReq = z.object({ + groupId: z.string().optional(), + subjects: z.array(zSubject).optional(), +}) + +export const zUpdateGroupSubjectsRes = z.object({ + message: z.string().optional(), +}) + +export const zUpdateGroupsReqGroup = z.object({ + id: z.string().optional(), + name: z.string().optional(), +}) + +export const zUpdateGroupsReq = z.object({ + groups: z.array(zUpdateGroupsReqGroup).optional(), +}) + +export const zUpdateGroupsRes = z.object({ + groups: z.array(zSubjectGroupData).optional(), +}) + +export const zUpdateJoinedGroupsReq = z.object({ + accountId: z.string().optional(), + groupIds: z.array(z.string()).optional(), +}) + +export const zUpdateJoinedGroupsRes = z.object({ + message: z.string().optional(), +}) + +export const zUpdateLicenseReply = z.object({ + message: z.string().optional(), +}) + +export const zUpdateLicenseReq = z.object({ + licenseId: z.string().optional(), +}) + +export const zUpdateMfaStatusReq = z.object({ + enabled: z.boolean().optional(), +}) + +export const zUpdateMfaStatusRes = z.object({ + message: z.string().optional(), +}) + +export const zUpdateMemberReply = z.object({ + account: zAccount.optional(), +}) + +/** + * Update member messages + */ +export const zUpdateMemberReq = z.object({ + id: z.string().optional(), + name: z.string().optional(), + email: z.string().optional(), + status: z.string().optional(), +}) + +export const zUpdateMembersInGroupsReq = z.object({ + groupId: z.string().optional(), + accountIds: z.array(z.string()).optional(), +}) + +export const zUpdateMembersInGroupsRes = z.object({ + message: z.string().optional(), +}) + +export const zUpdateOfflineLicenseReply = z.object({ + message: z.string().optional(), +}) + +export const zUpdateOfflineLicenseReq = z.object({ + offlineCode: z.string().optional(), +}) + +export const zUpdatePluginInstallationSettingsRequest = z.object({ + pluginInstallationScope: z.int().optional(), + restrictToMarketplaceOnly: z.boolean().optional(), +}) + +export const zUpdateResourceGroupRequest = z.object({ + id: z.string().optional(), + name: z.string().optional(), + description: z.string().optional(), + enabled: z.boolean().optional(), + rpm_limit: z + .int() + .min(-2147483648, { error: 'Invalid value: Expected int32 to be >= -2147483648' }) + .max(2147483647, { error: 'Invalid value: Expected int32 to be <= 2147483647' }) + .optional(), + rpm_action: z.int().optional(), + concurrency_limit: z + .int() + .min(-2147483648, { error: 'Invalid value: Expected int32 to be >= -2147483648' }) + .max(2147483647, { error: 'Invalid value: Expected int32 to be <= 2147483647' }) + .optional(), + concurrency_action: z.int().optional(), + token_quota: z.string().optional(), + token_action: z.int().optional(), +}) + +export const zUpdateUserReply = z.object({ + account: zAccountDetail.optional(), +}) + +export const zUpdateUserReq = z.object({ + id: z.string().optional(), + name: z.string().optional(), + email: z.string().optional(), + status: z.string().optional(), +}) + +/** + * Web app auth info messages + */ +export const zUpdateWebAppAuthInfoReq = z.object({ + allowSso: z.boolean().optional(), + allowEmailCodeLogin: z.boolean().optional(), + allowEmailPasswordLogin: z.boolean().optional(), +}) + +export const zUpdateWebAppAuthInfoRes = z.object({ + message: z.string().optional(), +}) + +export const zUpdateWebAppWhitelistSubjectsReq = z.object({ + appId: z.string().optional(), + subjects: z.array(zSubject).optional(), + accessMode: z.string().optional(), +}) + +export const zUpdateWebAppWhitelistSubjectsRes = z.object({ + message: z.string().optional(), +}) + +/** + * Update workspace messages + */ +export const zUpdateWorkspaceReq = z.object({ + id: z.string().optional(), + name: z.string().optional(), + email: z.string().optional(), + status: z.string().optional(), +}) + +export const zWebAppAuthInfo = z.object({ + allowSso: z.boolean().optional(), + allowEmailCodeLogin: z.boolean().optional(), + allowEmailPasswordLogin: z.boolean().optional(), +}) + +/** + * Info configuration messages + */ +export const zInfoConfigReply = z.object({ + SSOEnforcedForSignin: z.boolean().optional(), + SSOEnforcedForSigninProtocol: z.string().optional(), + SSOEnforcedForWeb: z.boolean().optional(), + SSOEnforcedForWebProtocol: z.string().optional(), + EnableEmailCodeLogin: z.boolean().optional(), + EnableEmailPasswordLogin: z.boolean().optional(), + IsAllowRegister: z.boolean().optional(), + IsAllowCreateWorkspace: z.boolean().optional(), + License: zLicenseStatus.optional(), + Branding: zBrandingInfo.optional(), + WebAppAuth: zWebAppAuthInfo.optional(), + PluginInstallationPermission: zPluginInstallationPermissionInfo.optional(), +}) + +export const zWebOAuth2LoginReply = z.object({ + url: z.string().optional(), + state: z.string().optional(), +}) + +export const zWebOidcLoginReply = z.object({ + url: z.string().optional(), +}) + +export const zWebSamlLoginReply = z.object({ + url: z.string().optional(), +}) + +/** + * Workspace represents a workspace entity + */ +export const zWorkspace = z.object({ + id: z.string().optional(), + name: z.string().optional(), + status: z.string().optional(), + createdAt: z.iso.datetime().optional(), + owner: zAccount.optional(), +}) + +export const zCreateWorkspaceReply = z.object({ + workspace: zWorkspace.optional(), +}) + +export const zGetDefaultWorkspaceReply = z.object({ + workspaceId: z.string().optional(), + workspace: zWorkspace.optional(), +}) + +export const zGetWorkspaceReply = z.object({ + workspace: zWorkspace.optional(), +}) + +export const zUpdateWorkspaceReply = z.object({ + workspace: zWorkspace.optional(), +}) + +export const zWorkspaceInfoReply = z.object({ + WorkspaceMembers: zResourceQuota.optional(), +}) + +/** + * Workspace permission + */ +export const zWorkspacePermission = z.object({ + workspaceId: z.string().optional(), + allowMemberInvite: z.boolean().optional(), + allowOwnerTransfer: z.boolean().optional(), +}) + +export const zGetWorkspacePermissionReply = z.object({ + permission: zWorkspacePermission.optional(), +}) + +export const zUpdateWorkspacePermissionReply = z.object({ + message: z.string().optional(), + permission: zWorkspacePermission.optional(), +}) + +/** + * Update workspace permission messages + */ +export const zUpdateWorkspacePermissionReq = z.object({ + id: z.string().optional(), + permission: zWorkspacePermission.optional(), +}) + +/** + * Pagination : Just for pagination by page + */ +export const zPagination = z.object({ + totalCount: z + .int() + .min(-2147483648, { error: 'Invalid value: Expected int32 to be >= -2147483648' }) + .max(2147483647, { error: 'Invalid value: Expected int32 to be <= 2147483647' }) + .optional(), + perPage: z + .int() + .min(-2147483648, { error: 'Invalid value: Expected int32 to be >= -2147483648' }) + .max(2147483647, { error: 'Invalid value: Expected int32 to be <= 2147483647' }) + .optional(), + currentPage: z + .int() + .min(-2147483648, { error: 'Invalid value: Expected int32 to be >= -2147483648' }) + .max(2147483647, { error: 'Invalid value: Expected int32 to be <= 2147483647' }) + .optional(), + totalPages: z + .int() + .min(-2147483648, { error: 'Invalid value: Expected int32 to be >= -2147483648' }) + .max(2147483647, { error: 'Invalid value: Expected int32 to be <= 2147483647' }) + .optional(), +}) + +export const zListMembersReply = z.object({ + data: z.array(zAccountDetail).optional(), + pagination: zPagination.optional(), +}) + +export const zListSecretKeysReply = z.object({ + data: z.array(zSecretKey).optional(), + pagination: zPagination.optional(), +}) + +export const zListUsersReply = z.object({ + data: z.array(zAccountDetail).optional(), + pagination: zPagination.optional(), +}) + +export const zListWorkspacesReply = z.object({ + data: z.array(zWorkspace).optional(), + pagination: zPagination.optional(), +}) + +/** + * OK + */ +export const zConsoleSsoOAuth2LoginResponse = zOAuth2LoginReply + +/** + * OK + */ +export const zConsoleSsoOidcLoginResponse = zOidcReply + +/** + * OK + */ +export const zConsoleSsoSamlLoginResponse = zSamlLoginReply + +export const zWebAppAuthGetWebAppAccessModeQuery = z.object({ + appId: z.string().optional(), +}) + +/** + * OK + */ +export const zWebAppAuthGetWebAppAccessModeResponse = zGetWebAppAccessModeRes + +export const zWebAppAuthUpdateWebAppWhitelistSubjectsBody = zUpdateWebAppWhitelistSubjectsReq + +/** + * OK + */ +export const zWebAppAuthUpdateWebAppWhitelistSubjectsResponse = zUpdateWebAppWhitelistSubjectsRes + +export const zWebAppAuthSearchForWhilteListCandidatesQuery = z.object({ + keyword: z.string().optional(), + pageNumber: z + .int() + .min(-2147483648, { error: 'Invalid value: Expected int32 to be >= -2147483648' }) + .max(2147483647, { error: 'Invalid value: Expected int32 to be <= 2147483647' }) + .optional(), + resultsPerPage: z + .int() + .min(-2147483648, { error: 'Invalid value: Expected int32 to be >= -2147483648' }) + .max(2147483647, { error: 'Invalid value: Expected int32 to be <= 2147483647' }) + .optional(), + groupId: z.string().optional(), +}) + +/** + * OK + */ +export const zWebAppAuthSearchForWhilteListCandidatesResponse = zSearchForWhilteListCandidatesRes + +export const zWebAppAuthGetWebAppWhitelistSubjectsQuery = z.object({ + appId: z.string().optional(), +}) + +/** + * OK + */ +export const zWebAppAuthGetWebAppWhitelistSubjectsResponse = zGetWebAppWhitelistSubjectsRes + +export const zWebAppAuthGetGroupSubjectsQuery = z.object({ + groupId: z.string().optional(), +}) + +/** + * OK + */ +export const zWebAppAuthGetGroupSubjectsResponse = zGetGroupSubjectsRes + +export const zWebAppAuthIsUserAllowedToAccessWebAppQuery = z.object({ + appId: z.string().optional(), +}) + +/** + * OK + */ +export const zWebAppAuthIsUserAllowedToAccessWebAppResponse = zIsUserAllowedToAccessWebAppRes diff --git a/packages/contracts/openapi-ts.api.config.ts b/packages/contracts/openapi-ts.api.config.ts new file mode 100644 index 0000000000..79c8ec8322 --- /dev/null +++ b/packages/contracts/openapi-ts.api.config.ts @@ -0,0 +1,610 @@ +import type { UserConfig } from '@hey-api/openapi-ts' +import fs from 'node:fs' +import path from 'node:path' +import { fileURLToPath } from 'node:url' +import { defineConfig } from '@hey-api/openapi-ts' + +type JsonObject = Record + +type SwaggerSchema = JsonObject & { + '$defs'?: Record + '$ref'?: string + 'x-nullable'?: boolean + 'additionalProperties'?: unknown + 'anyOf'?: SwaggerSchema[] + 'const'?: unknown + 'default'?: unknown + 'definitions'?: Record + 'description'?: string + 'enum'?: unknown[] + 'format'?: string + 'items'?: SwaggerSchema + 'properties'?: Record + 'required'?: string[] + 'type'?: string +} + +type SwaggerParameter = JsonObject & { + in?: string + name?: string + required?: boolean + schema?: SwaggerSchema + type?: string +} + +type SwaggerResponse = JsonObject & { + description?: string + schema?: SwaggerSchema +} + +type SwaggerOperation = JsonObject & { + operationId?: string + parameters?: SwaggerParameter[] + responses?: Record +} + +type SwaggerDocument = JsonObject & { + definitions?: Record + paths?: Record> +} + +type ApiSpec = { + filename: string + name: string +} + +type ApiJob = { + document: SwaggerDocument + outputPath: string +} + +type ApiContractOperation = { + method: string + path: string +} + +const currentDir = path.dirname(fileURLToPath(import.meta.url)) +const apiOpenApiDir = path.resolve(currentDir, 'openapi') + +const operationMethods = new Set(['delete', 'get', 'patch', 'post', 'put']) + +const apiSpecs: ApiSpec[] = [ + { filename: 'console-swagger.json', name: 'console' }, + { filename: 'web-swagger.json', name: 'web' }, + { filename: 'service-swagger.json', name: 'service' }, +] + +const isObject = (value: unknown): value is JsonObject => { + return !!value && typeof value === 'object' && !Array.isArray(value) +} + +const unknownObjectSchema = (): SwaggerSchema => ({ + additionalProperties: true, + type: 'object', +}) + +const toWords = (value: string) => { + return value + .replace(/[{}]/g, '') + .replace(/([a-z0-9])([A-Z])/g, '$1 $2') + .split(/[^a-z0-9]+/i) + .filter(Boolean) +} + +const toPascalCase = (words: string[]) => { + return words.map(word => `${word.charAt(0).toUpperCase()}${word.slice(1)}`).join('') +} + +const toCamelCase = (words: string[]) => { + const pascal = toPascalCase(words) + return `${pascal.charAt(0).toLowerCase()}${pascal.slice(1)}` +} + +const toKebabCase = (value: string) => { + return toWords(value).join('-').toLowerCase() +} + +const segmentWords = (segment: string) => { + if (segment.startsWith('{') && segment.endsWith('}')) + return ['by', ...toWords(segment)] + + return toWords(segment) +} + +const routeWords = (routePath: string) => { + return routePath + .split('/') + .filter(Boolean) + .flatMap(segmentWords) +} + +const operationId = (method: string, routePath: string) => { + return toCamelCase([method, ...(routeWords(routePath).length > 0 ? routeWords(routePath) : ['root'])]) +} + +const contractPathSegments = (operation: ApiContractOperation) => { + const segments = operation.path + .split('/') + .filter(Boolean) + .map(segment => toCamelCase(segmentWords(segment))) + + return [...(segments.length > 0 ? segments : ['root']), operation.method.toLowerCase()] +} + +const readApiSwagger = (filename: string): SwaggerDocument => { + const specPath = path.join(apiOpenApiDir, filename) + + if (!fs.existsSync(specPath)) { + throw new Error( + `Missing API OpenAPI spec: ${specPath}. Run "pnpm gen-api-openapi" from packages/contracts/ first.`, + ) + } + + const rawSpec = JSON.parse(fs.readFileSync(specPath, 'utf8')) + if (!isObject(rawSpec) || !isObject(rawSpec.paths)) + throw new Error(`Invalid API OpenAPI spec: ${specPath}`) + + return rawSpec as SwaggerDocument +} + +const clone = (value: T): T => { + return JSON.parse(JSON.stringify(value)) as T +} + +const collectDefinitionRefs = (value: unknown, refs: Set, visited = new WeakSet()) => { + if (!value || typeof value !== 'object') + return + + if (visited.has(value)) + return + + visited.add(value) + + if (Array.isArray(value)) { + value.forEach(item => collectDefinitionRefs(item, refs, visited)) + return + } + + const objectValue = value as JsonObject + const ref = objectValue.$ref + if (typeof ref === 'string' && ref.startsWith('#/definitions/')) + refs.add(ref.slice('#/definitions/'.length)) + + Object.values(objectValue).forEach(item => collectDefinitionRefs(item, refs, visited)) +} + +const removeNullDefaults = (value: unknown, visited = new WeakSet()) => { + if (!value || typeof value !== 'object' || visited.has(value)) + return + + visited.add(value) + + if (Array.isArray(value)) { + value.forEach(item => removeNullDefaults(item, visited)) + return + } + + const schema = value as SwaggerSchema + if (schema.default === null) + delete schema.default + + Object.values(schema).forEach(item => removeNullDefaults(item, visited)) +} + +const isNullSchema = (schema: SwaggerSchema) => { + return schema.type === 'null' +} + +const normalizeNullableAnyOf = (value: unknown, visited = new WeakSet()) => { + if (!value || typeof value !== 'object' || visited.has(value)) + return + + visited.add(value) + + if (Array.isArray(value)) { + value.forEach(item => normalizeNullableAnyOf(item, visited)) + return + } + + const schema = value as SwaggerSchema + + if (Array.isArray(schema.anyOf)) { + const nonNullSchemas = schema.anyOf.filter(item => !isNullSchema(item)) + const hasNullSchema = nonNullSchemas.length !== schema.anyOf.length + + if (hasNullSchema && nonNullSchemas.length === 1) { + const { anyOf: _anyOf, ...rest } = schema + Object.keys(schema).forEach(key => delete schema[key]) + Object.assign(schema, rest, nonNullSchemas[0], { 'x-nullable': true }) + } + } + + Object.values(schema).forEach(item => normalizeNullableAnyOf(item, visited)) +} + +const hoistNestedDefinitions = (definitions: Record) => { + const visited = new WeakSet() + + const visit = (value: unknown) => { + if (!value || typeof value !== 'object' || visited.has(value)) + return + + visited.add(value) + + if (Array.isArray(value)) { + value.forEach(visit) + return + } + + const schema = value as SwaggerSchema + for (const key of ['$defs', 'definitions'] as const) { + const nestedDefinitions = schema[key] + if (!isObject(nestedDefinitions)) + continue + + for (const [name, nestedSchema] of Object.entries(nestedDefinitions)) { + definitions[name] ??= nestedSchema + visit(nestedSchema) + } + + delete schema[key] + } + + Object.values(schema).forEach(visit) + } + + Object.values(definitions).forEach(visit) +} + +const ensureReferencedDefinitions = (document: SwaggerDocument) => { + const definitions = document.definitions ??= {} + const refs = new Set() + collectDefinitionRefs(document, refs) + + for (const refName of refs) + definitions[refName] ??= unknownObjectSchema() +} + +const resolveDefinitionRef = ( + schema: SwaggerSchema | undefined, + definitions: Record, +): SwaggerSchema | undefined => { + const ref = schema?.$ref + + if (!ref?.startsWith('#/definitions/')) + return schema + + return definitions[ref.slice('#/definitions/'.length)] ?? schema +} + +const withoutNullableWrapper = (schema: SwaggerSchema | undefined): SwaggerSchema => { + if (!schema) + return {} + + const nonNullSchema = schema.anyOf?.find(item => item.type !== 'null') + if (!nonNullSchema) + return schema + + const { anyOf: _anyOf, ...rest } = schema + return { + ...rest, + ...nonNullSchema, + } +} + +const isNullEnumItem = (item: unknown) => { + return isObject(item) && (item.type === 'null' || item.const === null) +} + +const markNullableEnumSchema = (ctx: { schema: JsonObject }): undefined => { + const items = ctx.schema.items + + if (ctx.schema['x-nullable'] !== true || !Array.isArray(items) || items.some(isNullEnumItem)) + return undefined + + // Hey API's enum visitors infer nullable from a null enum item, not x-nullable. + ctx.schema.items = [...items, { const: null, type: 'null' }] + + return undefined +} + +const queryParameterFromSchema = ( + name: string, + schema: SwaggerSchema | undefined, + required: boolean, +): SwaggerParameter => { + const querySchema = withoutNullableWrapper(schema) + const parameter: SwaggerParameter = { + in: 'query', + name, + required, + } + + if (querySchema.default !== undefined) + parameter.default = querySchema.default + + if (querySchema.description) + parameter.description = querySchema.description + + if (querySchema.enum) + parameter.enum = querySchema.enum + + if (querySchema.format) + parameter.format = querySchema.format + + if (querySchema.items) + parameter.items = querySchema.items + + for (const key of [ + 'exclusiveMaximum', + 'exclusiveMinimum', + 'maxItems', + 'maxLength', + 'maximum', + 'minItems', + 'minLength', + 'minimum', + 'multipleOf', + 'pattern', + 'uniqueItems', + 'x-nullable', + ]) { + if (querySchema[key] !== undefined) + parameter[key] = querySchema[key] + } + + parameter.type = ['array', 'boolean', 'integer', 'number', 'string'].includes(querySchema.type ?? '') + ? querySchema.type + : 'string' + + return parameter +} + +const mergeQueryParameter = ( + parameters: SwaggerParameter[], + queryParameter: SwaggerParameter, +) => { + const existingIndex = parameters.findIndex((parameter) => { + return parameter.in === 'query' && parameter.name === queryParameter.name + }) + + if (existingIndex === -1) { + parameters.push(queryParameter) + return + } + + const existingParameter = parameters[existingIndex] + if (!existingParameter) { + parameters.push(queryParameter) + return + } + + parameters[existingIndex] = { + ...existingParameter, + ...queryParameter, + description: queryParameter.description ?? existingParameter.description, + required: Boolean(existingParameter.required) || Boolean(queryParameter.required), + } +} + +const normalizeGetBodyParameters = ( + operation: SwaggerOperation, + definitions: Record, +) => { + if (!Array.isArray(operation.parameters)) + return + + const bodyParameters: SwaggerParameter[] = [] + const normalizedParameters: SwaggerParameter[] = [] + + for (const parameter of operation.parameters) { + if (parameter.in === 'body') { + bodyParameters.push(parameter) + continue + } + + normalizedParameters.push(parameter) + } + + for (const parameter of bodyParameters) { + const schema = resolveDefinitionRef(parameter.schema, definitions) + const properties = schema?.properties ?? {} + const required = new Set(schema?.required ?? []) + + for (const [name, propertySchema] of Object.entries(properties)) { + mergeQueryParameter( + normalizedParameters, + queryParameterFromSchema(name, propertySchema, required.has(name)), + ) + } + } + + operation.parameters = normalizedParameters +} + +const normalizeResponses = (operation: SwaggerOperation) => { + const responses = operation.responses ??= {} + + for (const response of Object.values(responses)) { + if (!response.schema) + response.schema = unknownObjectSchema() + } + + if (!Object.keys(responses).some(status => /^2\d\d$/.test(status))) { + responses['200'] = { + description: 'Success', + schema: unknownObjectSchema(), + } + } +} + +const normalizeOperations = (document: SwaggerDocument) => { + const definitions = document.definitions ??= {} + + for (const [routePath, pathItem] of Object.entries(document.paths ?? {})) { + for (const [method, operation] of Object.entries(pathItem)) { + if (!operationMethods.has(method) || !isObject(operation)) + continue + + const swaggerOperation = operation as SwaggerOperation + swaggerOperation.operationId = operationId(method, routePath) + + normalizeResponses(swaggerOperation) + + if (method === 'get') + normalizeGetBodyParameters(swaggerOperation, definitions) + } + } +} + +const normalizeApiSwagger = (document: SwaggerDocument) => { + document.definitions ??= {} + + // Flask-RESTX emits Pydantic nested $defs inside individual schemas while + // refs point at the root Swagger 2.0 definitions object. + hoistNestedDefinitions(document.definitions) + ensureReferencedDefinitions(document) + normalizeNullableAnyOf(document) + removeNullDefaults(document) + normalizeOperations(document) + + return document +} + +const topLevelPathSegment = (routePath: string) => { + return routePath.split('/').filter(Boolean)[0] ?? 'root' +} + +const selectReferencedDefinitions = ( + definitions: Record, + paths: Record>, +) => { + const selectedDefinitions: Record = {} + const pendingRefs = new Set() + collectDefinitionRefs(paths, pendingRefs) + + while (pendingRefs.size > 0) { + const refName = pendingRefs.values().next().value + if (!refName) + break + + pendingRefs.delete(refName) + + if (selectedDefinitions[refName]) + continue + + selectedDefinitions[refName] = definitions[refName] ?? unknownObjectSchema() + + const nestedRefs = new Set() + collectDefinitionRefs(selectedDefinitions[refName], nestedRefs) + for (const nestedRef of nestedRefs) { + if (!selectedDefinitions[nestedRef]) + pendingRefs.add(nestedRef) + } + } + + return selectedDefinitions +} + +const cloneDocumentWithPaths = ( + document: SwaggerDocument, + paths: Record>, +) => { + const { definitions: _definitions, paths: _paths, ...metadata } = document + const clonedPaths = clone(paths) + + return { + ...clone(metadata), + definitions: selectReferencedDefinitions(document.definitions ?? {}, clonedPaths), + paths: clonedPaths, + } satisfies SwaggerDocument +} + +const splitConsoleDocument = (document: SwaggerDocument) => { + const pathsBySegment = new Map>>() + + for (const [routePath, pathItem] of Object.entries(document.paths ?? {})) { + const segment = topLevelPathSegment(routePath) + const paths = pathsBySegment.get(segment) ?? {} + paths[routePath] = pathItem + pathsBySegment.set(segment, paths) + } + + return [...pathsBySegment.entries()] + .sort(([left], [right]) => left.localeCompare(right)) + .map(([segment, paths]): ApiJob => ({ + document: cloneDocumentWithPaths(document, paths), + outputPath: `generated/api/console/${toKebabCase(segment)}`, + })) +} + +const createApiJobs = (spec: ApiSpec): ApiJob[] => { + const document = normalizeApiSwagger(readApiSwagger(spec.filename)) + + if (spec.name === 'console') + return splitConsoleDocument(document) + + return [ + { + document, + outputPath: `generated/api/${spec.name}`, + }, + ] +} + +const createApiConfig = (job: ApiJob): UserConfig => ({ + input: job.document, + logs: { + file: false, + }, + output: { + entryFile: false, + fileName: { + suffix: '.gen', + }, + path: job.outputPath, + postProcess: [ + { + args: ['fmt', '{{path}}'], + command: 'vp', + }, + { + args: ['--fix', '{{path}}/*.ts'], + command: 'eslint', + }, + ], + }, + plugins: [ + { + 'comments': false, + 'name': '@hey-api/typescript', + '~resolvers': { + enum: markNullableEnumSchema, + }, + }, + { + 'name': 'zod', + '~resolvers': { + enum: markNullableEnumSchema, + }, + }, + { + contracts: { + contractName: { + casing: 'camelCase', + name: '{{name}}', + }, + nesting: contractPathSegments, + segmentName: { + casing: 'camelCase', + name: '{{name}}', + }, + strategy: 'single', + }, + name: 'orpc', + validator: 'zod', + }, + ], +}) + +export default defineConfig(apiSpecs.flatMap(createApiJobs).map(createApiConfig)) diff --git a/packages/contracts/openapi-ts.enterprise.config.ts b/packages/contracts/openapi-ts.enterprise.config.ts new file mode 100644 index 0000000000..3c9bc903ab --- /dev/null +++ b/packages/contracts/openapi-ts.enterprise.config.ts @@ -0,0 +1,119 @@ +import fs from 'node:fs' +import path from 'node:path' +import { fileURLToPath } from 'node:url' +import { defineConfig } from '@hey-api/openapi-ts' +import yaml from 'js-yaml' + +type JsonObject = Record + +type OpenApiDocument = JsonObject & { + paths?: Record +} + +type ContractOperation = { + id: string + operationId?: string + tags?: readonly string[] +} + +const currentDir = path.dirname(fileURLToPath(import.meta.url)) +const enterpriseServerDir = process.env.DIFY_ENTERPRISE_SERVER + ? path.resolve(process.env.DIFY_ENTERPRISE_SERVER) + : path.resolve(currentDir, '../../../dify-enterprise/server') +const enterpriseOpenApiPath = path.join(enterpriseServerDir, 'pkg/apis/enterprise/openapi.yaml') + +const isConsoleApiPath = (routePath: string) => routePath.startsWith('/console/api/') + +const stripConsoleApiPrefix = (routePath: string) => { + if (isConsoleApiPath(routePath)) + return routePath.replace('/console/api', '') + + return routePath +} + +const stripSchemaNamePrefix = (schemaName: string) => { + return schemaName + .replace(/^dify\.enterprise\.api\.enterprise\./, '') + .replace(/^pagination\./, '') +} + +const contractNameSegments = (operation: ContractOperation) => { + const operationId = operation.operationId || operation.id + const tag = operation.tags?.[0] + const tagPrefixPattern = tag ? new RegExp(`^${tag}[._/-]`) : undefined + const name = tagPrefixPattern ? operationId.replace(tagPrefixPattern, '') : operationId + const segments = name.split(/[._/-]+/).filter(Boolean) + + return segments.length > 0 ? segments : [operationId] +} + +const contractPathSegments = (operation: ContractOperation) => { + return [operation.tags?.[0] || 'default', ...contractNameSegments(operation)] +} + +const normalizeEnterpriseOpenApi = () => { + const openApi = yaml.load(fs.readFileSync(enterpriseOpenApiPath, 'utf8')) + + if (!openApi || typeof openApi !== 'object' || Array.isArray(openApi)) + throw new Error(`Invalid enterprise OpenAPI document: ${enterpriseOpenApiPath}`) + + const document = openApi as OpenApiDocument + const paths = document.paths ?? {} + + document.paths = Object.fromEntries( + Object.entries(paths) + .filter(([routePath]) => isConsoleApiPath(routePath)) + .map(([routePath, pathItem]) => [stripConsoleApiPrefix(routePath), pathItem]), + ) + + return document +} + +export default defineConfig({ + input: normalizeEnterpriseOpenApi(), + output: { + entryFile: false, + path: 'generated/enterprise', + fileName: { + suffix: '.gen', + }, + postProcess: [ + { + command: 'vp', + args: ['fmt', '{{path}}'], + }, + { + command: 'eslint', + args: ['--fix', '{{path}}/*.ts'], + }, + ], + }, + parser: { + transforms: { + schemaName: stripSchemaNamePrefix, + }, + }, + plugins: [ + { + name: '@hey-api/typescript', + comments: false, + }, + 'zod', + { + name: 'orpc', + contracts: { + strategy: 'single', + contractName: { + name: '{{name}}', + casing: 'camelCase', + }, + nesting: contractPathSegments, + segmentName: { + name: '{{name}}', + casing: 'camelCase', + }, + }, + validator: 'zod', + }, + ], +}) diff --git a/packages/contracts/package.json b/packages/contracts/package.json new file mode 100644 index 0000000000..5e9af5e0f1 --- /dev/null +++ b/packages/contracts/package.json @@ -0,0 +1,37 @@ +{ + "name": "@dify/contracts", + "type": "module", + "version": "0.0.0-private", + "private": true, + "exports": { + "./api/*": { + "types": "./generated/api/*.ts", + "import": "./generated/api/*.ts" + }, + "./enterprise/*": { + "types": "./generated/enterprise/*.ts", + "import": "./generated/enterprise/*.ts" + } + }, + "scripts": { + "gen-api-contract": "pnpm gen-api-openapi && node -e \"fs.rmSync('generated/api', { recursive: true, force: true })\" && openapi-ts -f openapi-ts.api.config.ts", + "gen-api-openapi": "uv run --project ../../api ../../api/dev/generate_swagger_specs.py --output-dir openapi", + "gen-enterprise-contract": "openapi-ts -f openapi-ts.enterprise.config.ts", + "type-check": "tsgo" + }, + "dependencies": { + "@orpc/contract": "catalog:", + "zod": "catalog:" + }, + "devDependencies": { + "@dify/tsconfig": "workspace:*", + "@hey-api/openapi-ts": "catalog:", + "@types/js-yaml": "catalog:", + "@types/node": "catalog:", + "@typescript/native-preview": "catalog:", + "eslint": "catalog:", + "js-yaml": "catalog:", + "typescript": "catalog:", + "vite-plus": "catalog:" + } +} diff --git a/packages/contracts/tsconfig.json b/packages/contracts/tsconfig.json new file mode 100644 index 0000000000..4ebf36d2d3 --- /dev/null +++ b/packages/contracts/tsconfig.json @@ -0,0 +1,10 @@ +{ + "extends": "@dify/tsconfig/node.json", + "include": [ + "*.ts", + "generated/**/*.ts" + ], + "exclude": [ + "node_modules" + ] +} diff --git a/packages/dev-proxy/README.md b/packages/dev-proxy/README.md new file mode 100644 index 0000000000..6b9d7298c4 --- /dev/null +++ b/packages/dev-proxy/README.md @@ -0,0 +1,196 @@ +# @langgenius/dev-proxy + +Generic Hono-based development proxy for frontend projects. The package does not ship any product-specific routes, cookie names, or environment variable conventions. Every proxied path and upstream target is declared in a local config file. + +## Installation + +```bash +pnpm add -D @langgenius/dev-proxy +``` + +Add a script in your frontend project: + +```json +{ + "scripts": { + "dev:proxy": "dev-proxy --config ./dev-proxy.config.ts --env-file ./.env" + } +} +``` + +Run it with: + +```bash +pnpm dev:proxy +``` + +## CLI + +```bash +dev-proxy --config ./dev-proxy.config.ts +``` + +Supported options: + +- `--config`, `-c`: config file path. Defaults to `dev-proxy.config.ts`. +- `--env-file`: load environment variables before evaluating the config file. +- `--host`: override `server.host` from config. +- `--port`: override `server.port` from config. +- `--help`, `-h`: print help. + +`--target` is not supported. Put targets in the config file so routes and upstreams stay explicit. + +## Config Shape + +```ts +import { defineDevProxyConfig } from '@langgenius/dev-proxy' + +export default defineDevProxyConfig({ + server: { + host: '127.0.0.1', + port: 5001, + }, + routes: [ + { + paths: '/api', + target: 'https://example.com', + }, + ], + cors: { + allowedOrigins: 'local', + }, +}) +``` + +Config files can be `.ts`, `.mts`, `.js`, or `.mjs`. + +`routes` are matched in declaration order. The first matching route wins. Each configured path matches both the exact path and all child paths, so `paths: '/api'` matches `/api`, `/api/apps`, and `/api/apps/123`. + +By default, credentialed CORS is allowed for local development origins such as `localhost`, `127.0.0.1`, and `::1`. To restrict it to specific origins: + +``` +cors: { + allowedOrigins: ['http://localhost:3000'], +} +``` + +## Scenario 1: Proxy One Local Route Group To An Online Backend + +Use this when a local frontend should call an online backend through one proxy server. For example, the frontend calls `http://127.0.0.1:5001/api/apps`, and the proxy forwards it to `https://cloud.example.com/api/apps`. + +```ts +import { defineDevProxyConfig } from '@langgenius/dev-proxy' + +const target = process.env.DEV_PROXY_TARGET || 'https://cloud.example.com' + +export default defineDevProxyConfig({ + server: { + host: process.env.DEV_PROXY_HOST || '127.0.0.1', + port: Number(process.env.DEV_PROXY_PORT || 5001), + }, + routes: [ + { + paths: '/api', + target, + }, + ], +}) +``` + +Optional `.env`: + +```env +DEV_PROXY_TARGET=https://cloud.example.com +DEV_PROXY_HOST=127.0.0.1 +DEV_PROXY_PORT=5001 +``` + +Command: + +```bash +dev-proxy --config ./dev-proxy.config.ts --env-file ./.env +``` + +## Scenario 2: Proxy Two Route Groups To Two Local Backends + +Use this when one frontend needs to talk to two different local services. For example: + +- `/console/api/*` goes to a local console backend at `http://127.0.0.1:5001` +- `/api/*` goes to a local public API backend at `http://127.0.0.1:5002` + +```ts +import { defineDevProxyConfig } from '@langgenius/dev-proxy' + +const consoleApiTarget = process.env.DEV_PROXY_CONSOLE_API_TARGET || 'http://127.0.0.1:5001' +const publicApiTarget = process.env.DEV_PROXY_PUBLIC_API_TARGET || 'http://127.0.0.1:5002' + +export default defineDevProxyConfig({ + server: { + host: process.env.DEV_PROXY_HOST || '127.0.0.1', + port: Number(process.env.DEV_PROXY_PORT || 8082), + }, + routes: [ + { + paths: '/console/api', + target: consoleApiTarget, + }, + { + paths: '/api', + target: publicApiTarget, + }, + ], +}) +``` + +Optional `.env`: + +```env +DEV_PROXY_CONSOLE_API_TARGET=http://127.0.0.1:5001 +DEV_PROXY_PUBLIC_API_TARGET=http://127.0.0.1:5002 +DEV_PROXY_HOST=127.0.0.1 +DEV_PROXY_PORT=8082 +``` + +When two route groups overlap, put the more specific one first: + +```ts +routes: [ + { paths: '/api/enterprise', target: 'http://127.0.0.1:5003' }, + { paths: '/api', target: 'http://127.0.0.1:5002' }, +] +``` + +## Cookie Rewrite + +Cookie rewriting is opt-in and config-driven. The package does not know any application cookie names. + +Use `cookieRewrite` when an upstream uses secure cookie prefixes such as `__Host-` or `__Secure-`, but local development needs cookies to work over `http://localhost`. + +```ts +import type { CookieRewriteOptions } from '@langgenius/dev-proxy' +import { defineDevProxyConfig } from '@langgenius/dev-proxy' + +const cookieRewrite: CookieRewriteOptions = { + hostPrefixCookies: ['access_token', 'refresh_token', /^passport-/], +} + +export default defineDevProxyConfig({ + routes: [ + { + paths: '/api', + target: 'https://cloud.example.com', + cookieRewrite, + }, + ], +}) +``` + +Set `cookieRewrite: false` to disable cookie rewriting for a route. + +## Behavior + +- The proxy preserves the matched path prefix when forwarding requests. +- Request bodies are forwarded as streams. +- Hop-by-hop headers are removed before forwarding. +- Local credentialed CORS and preflight requests are handled by the proxy. +- Route matching is explicit and order-sensitive. diff --git a/packages/dev-proxy/bin/dev-proxy.js b/packages/dev-proxy/bin/dev-proxy.js new file mode 100755 index 0000000000..02e37f3525 --- /dev/null +++ b/packages/dev-proxy/bin/dev-proxy.js @@ -0,0 +1,3 @@ +#!/usr/bin/env node + +import '../dist/cli.mjs' diff --git a/packages/dev-proxy/package.json b/packages/dev-proxy/package.json new file mode 100644 index 0000000000..d5524290eb --- /dev/null +++ b/packages/dev-proxy/package.json @@ -0,0 +1,43 @@ +{ + "name": "@langgenius/dev-proxy", + "type": "module", + "version": "0.0.5", + "exports": { + ".": { + "types": "./dist/index.d.mts", + "import": "./dist/index.mjs" + } + }, + "types": "./dist/index.d.mts", + "bin": { + "dev-proxy": "./bin/dev-proxy.js" + }, + "files": [ + "bin", + "dist", + "src" + ], + "engines": { + "node": "^22.22.1" + }, + "scripts": { + "build": "vp pack", + "prepare": "pnpm run build", + "test": "vp test", + "type-check": "tsgo", + "prepublish": "pnpm run build" + }, + "dependencies": { + "@hono/node-server": "catalog:", + "c12": "catalog:", + "hono": "catalog:" + }, + "devDependencies": { + "@dify/tsconfig": "workspace:*", + "@types/node": "catalog:", + "@typescript/native-preview": "catalog:", + "vite": "catalog:", + "vite-plus": "catalog:", + "vitest": "catalog:" + } +} diff --git a/packages/dev-proxy/src/cli.spec.ts b/packages/dev-proxy/src/cli.spec.ts new file mode 100644 index 0000000000..e8a87a0588 --- /dev/null +++ b/packages/dev-proxy/src/cli.spec.ts @@ -0,0 +1,158 @@ +/** + * @vitest-environment node + */ +import type { ChildProcessByStdio } from 'node:child_process' +import type { Readable } from 'node:stream' +import { spawn } from 'node:child_process' +import { once } from 'node:events' +import fs from 'node:fs/promises' +import net from 'node:net' +import os from 'node:os' +import path from 'node:path' +import { fileURLToPath } from 'node:url' +import { afterEach, describe, expect, it } from 'vitest' + +const tempDirs: string[] = [] +type DevProxyCliProcess = ChildProcessByStdio + +const childProcesses: DevProxyCliProcess[] = [] +const binPath = fileURLToPath(new URL('../bin/dev-proxy.js', import.meta.url)) + +const createTempDir = async () => { + const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), 'dev-proxy-cli-test-')) + tempDirs.push(tempDir) + return tempDir +} + +const getFreePort = async () => { + const server = net.createServer() + await new Promise((resolve, reject) => { + server.once('error', reject) + server.listen(0, '127.0.0.1', resolve) + }) + + const address = server.address() + if (!address || typeof address === 'string') + throw new Error('Failed to allocate a test port.') + + const { port } = address + await new Promise((resolve, reject) => { + server.close((error) => { + if (error) + reject(error) + else + resolve() + }) + }) + + return port +} + +const waitForOutput = ( + child: DevProxyCliProcess, + output: () => string, + expectedOutput: string, +) => new Promise((resolve, reject) => { + let timeout: ReturnType + + function cleanup() { + clearTimeout(timeout) + child.stdout.off('data', onData) + child.stderr.off('data', onData) + child.off('exit', onExit) + } + + function onData() { + if (!output().includes(expectedOutput)) + return + + cleanup() + resolve() + } + + function onExit(code: number | null, signal: NodeJS.Signals | null) { + cleanup() + reject(new Error(`dev-proxy exited before writing "${expectedOutput}" with code ${code} and signal ${signal}. Output:\n${output()}`)) + } + + timeout = setTimeout(() => { + cleanup() + reject(new Error(`Timed out waiting for "${expectedOutput}". Output:\n${output()}`)) + }, 3000) + + child.stdout.on('data', onData) + child.stderr.on('data', onData) + child.once('exit', onExit) + onData() +}) + +const spawnCli = (args: readonly string[], cwd: string) => { + const child = spawn(process.execPath, [binPath, ...args], { + cwd, + env: { + ...process.env, + FORCE_COLOR: '0', + }, + stdio: ['ignore', 'pipe', 'pipe'], + }) + childProcesses.push(child) + return child +} + +const stopChildProcess = async (child: DevProxyCliProcess) => { + if (child.exitCode !== null || child.signalCode !== null) + return + + child.kill('SIGTERM') + await once(child, 'exit') +} + +describe('dev proxy CLI', () => { + afterEach(async () => { + await Promise.all(childProcesses.splice(0).map(stopChildProcess)) + await Promise.all(tempDirs.splice(0).map(tempDir => fs.rm(tempDir, { + force: true, + recursive: true, + }))) + }) + + // Scenario: help output should still be a normal short-lived command. + it('should print help and exit', async () => { + // Arrange + const tempDir = await createTempDir() + const child = spawnCli(['--help'], tempDir) + + // Act + const [code] = await once(child, 'exit') + + // Assert + expect(code).toBe(0) + }) + + // Scenario: successful server startup should keep the CLI process alive. + it('should keep running after starting the proxy server', async () => { + // Arrange + const tempDir = await createTempDir() + const port = await getFreePort() + await fs.writeFile(path.join(tempDir, 'dev-proxy.config.ts'), ` + export default { + routes: [{ paths: '/api', target: 'https://api.example.com' }], + } + `) + + let output = '' + const child = spawnCli(['--config', './dev-proxy.config.ts', '--host', '127.0.0.1', '--port', String(port)], tempDir) + child.stdout.on('data', chunk => output += chunk.toString()) + child.stderr.on('data', chunk => output += chunk.toString()) + + // Act + await waitForOutput(child, () => output, `[dev-proxy] listening on http://127.0.0.1:${port}`) + await new Promise(resolve => setTimeout(resolve, 100)) + const response = await fetch(`http://127.0.0.1:${port}/not-proxied`) + + // Assert + expect(child.exitCode).toBeNull() + expect(child.signalCode).toBeNull() + expect(response.status).toBe(404) + }) +}) diff --git a/packages/dev-proxy/src/cli.ts b/packages/dev-proxy/src/cli.ts new file mode 100644 index 0000000000..05234cb359 --- /dev/null +++ b/packages/dev-proxy/src/cli.ts @@ -0,0 +1,56 @@ +import process from 'node:process' +import { serve } from '@hono/node-server' +import { loadDevProxyConfig, parseDevProxyCliArgs, resolveDevProxyServerOptions } from './config' +import { createDevProxyApp } from './server' + +function printUsage() { + console.log(`Usage: + dev-proxy --config [options] + +Options: + --config, -c Path to a dev proxy config file. Defaults to dev-proxy.config.ts. + --env-file Load environment variables before evaluating the config file. + --host Override the configured host. + --port Override the configured port. + --help, -h Show this help message.`) +} + +async function flushStandardStreams() { + await Promise.all([ + new Promise(resolve => process.stdout.write('', () => resolve())), + new Promise(resolve => process.stderr.write('', () => resolve())), + ]) +} + +async function main() { + const cliOptions = parseDevProxyCliArgs(process.argv.slice(2)) + + if (cliOptions.help) { + printUsage() + return + } + + const config = await loadDevProxyConfig(cliOptions.config, process.cwd(), { + envFile: cliOptions.envFile, + }) + const { host, port } = resolveDevProxyServerOptions(config.server, cliOptions) + const app = createDevProxyApp(config) + + serve({ + fetch: app.fetch, + hostname: host, + port, + }) + + console.log(`[dev-proxy] listening on http://${host}:${port}`) +} + +try { + await main() + await flushStandardStreams() +} +catch (error) { + console.error(error instanceof Error ? error.message : error) + await flushStandardStreams() + process.exit(1) +} diff --git a/packages/dev-proxy/src/config.spec.ts b/packages/dev-proxy/src/config.spec.ts new file mode 100644 index 0000000000..6f681bcbae --- /dev/null +++ b/packages/dev-proxy/src/config.spec.ts @@ -0,0 +1,145 @@ +/** + * @vitest-environment node + */ +import fs from 'node:fs/promises' +import os from 'node:os' +import path from 'node:path' +import { afterEach, describe, expect, it } from 'vitest' +import { loadDevProxyConfig, parseDevProxyCliArgs, resolveDevProxyServerOptions } from './config' + +const tempDirs: string[] = [] + +const createTempDir = async () => { + const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), 'dev-proxy-test-')) + tempDirs.push(tempDir) + return tempDir +} + +describe('dev proxy config', () => { + afterEach(async () => { + delete process.env.DEV_PROXY_TEST_PORT + delete process.env.DEV_PROXY_TEST_TARGET + + await Promise.all(tempDirs.splice(0).map(tempDir => fs.rm(tempDir, { + force: true, + recursive: true, + }))) + }) + + // Scenario: CLI options should support both inline and separated values. + it('should parse proxy CLI options', () => { + // Act + const options = parseDevProxyCliArgs([ + '--config=./dev-proxy.config.ts', + '--env-file', + './.env.proxy', + '--host', + '0.0.0.0', + '--port', + '8083', + ]) + + // Assert + expect(options).toEqual({ + config: './dev-proxy.config.ts', + envFile: './.env.proxy', + host: '0.0.0.0', + port: '8083', + }) + }) + + // Scenario: removed target shortcuts should fail instead of silently doing the wrong thing. + it('should reject unsupported target shortcuts', () => { + // Assert + expect(() => parseDevProxyCliArgs(['--target', 'enterprise'])).toThrow('Unsupported dev proxy option') + }) + + // Scenario: package manager argument separators should not be treated as proxy options. + it('should ignore package manager argument separators', () => { + // Act + const options = parseDevProxyCliArgs(['--config', './dev-proxy.config.ts', '--', '--help']) + + // Assert + expect(options).toEqual({ + config: './dev-proxy.config.ts', + help: true, + }) + }) + + // Scenario: CLI host and port should override config defaults. + it('should resolve server options with CLI overrides', () => { + // Act + const options = resolveDevProxyServerOptions({ + host: '127.0.0.1', + port: 5001, + }, { + host: '0.0.0.0', + port: '9002', + }) + + // Assert + expect(options).toEqual({ + host: '0.0.0.0', + port: 9002, + }) + }) + + // Scenario: TS config files should load through c12. + it('should load a TypeScript config file', async () => { + // Arrange + const tempDir = await createTempDir() + await fs.writeFile(path.join(tempDir, 'dev-proxy.config.ts'), ` + export default { + server: { host: '127.0.0.1', port: 7777 }, + routes: [{ paths: ['/api', '/files'], target: 'https://api.example.com' }], + } + `) + + // Act + const config = await loadDevProxyConfig('dev-proxy.config.ts', tempDir) + + // Assert + expect(config.server).toEqual({ + host: '127.0.0.1', + port: 7777, + }) + expect(config.routes).toEqual([ + { + paths: ['/api', '/files'], + target: 'https://api.example.com', + }, + ]) + }) + + // Scenario: env files should be loaded before the TypeScript config is evaluated. + it('should load a TypeScript config file with env file values', async () => { + // Arrange + const tempDir = await createTempDir() + await fs.writeFile(path.join(tempDir, '.env.proxy'), [ + 'DEV_PROXY_TEST_PORT=7788', + 'DEV_PROXY_TEST_TARGET=https://env.example.com', + ].join('\n')) + await fs.writeFile(path.join(tempDir, 'dev-proxy.config.ts'), ` + export default { + server: { port: Number(process.env.DEV_PROXY_TEST_PORT) }, + routes: [{ paths: '/api', target: process.env.DEV_PROXY_TEST_TARGET }], + } + `) + + // Act + const config = await loadDevProxyConfig('dev-proxy.config.ts', tempDir, { + envFile: '.env.proxy', + }) + + // Assert + expect(config.server).toEqual({ + port: 7788, + }) + expect(config.routes).toEqual([ + { + paths: '/api', + target: 'https://env.example.com', + }, + ]) + }) +}) diff --git a/packages/dev-proxy/src/config.ts b/packages/dev-proxy/src/config.ts new file mode 100644 index 0000000000..b23cb0a152 --- /dev/null +++ b/packages/dev-proxy/src/config.ts @@ -0,0 +1,129 @@ +import type { DotenvOptions } from 'c12' +import type { DevProxyCliOptions, DevProxyConfig, DevProxyConfigLoadOptions, DevProxyServerConfig, ResolvedDevProxyServerOptions } from './types' +import path from 'node:path' +import { loadConfig } from 'c12' + +const DEFAULT_CONFIG_FILE = 'dev-proxy.config.ts' +const DEFAULT_PROXY_HOST = '127.0.0.1' +const DEFAULT_PROXY_PORT = 5001 + +const OPTION_NAME_TO_KEY = { + '--config': 'config', + '-c': 'config', + '--env-file': 'envFile', + '--host': 'host', + '--port': 'port', +} as const + +type OptionName = keyof typeof OPTION_NAME_TO_KEY + +const isOptionName = (value: string): value is OptionName => value in OPTION_NAME_TO_KEY + +const requireOptionValue = (name: string, value?: string) => { + if (!value || value.startsWith('-')) + throw new Error(`Missing value for ${name}.`) + + return value +} + +export const parseDevProxyCliArgs = (argv: readonly string[]): DevProxyCliOptions => { + const options: DevProxyCliOptions = {} + + for (let index = 0; index < argv.length; index += 1) { + const arg = argv[index]! + + if (arg === '--') + continue + + if (arg === '--help' || arg === '-h') { + options.help = true + continue + } + + const [rawName, inlineValue] = arg.split('=', 2) + const name = rawName ?? '' + + if (!name.startsWith('-')) + continue + + if (!isOptionName(name)) + throw new Error(`Unsupported dev proxy option "${name}".`) + + const key = OPTION_NAME_TO_KEY[name] + options[key] = inlineValue ?? requireOptionValue(name, argv[index + 1]) + + if (inlineValue === undefined) + index += 1 + } + + return options +} + +const resolvePort = (rawPort: string | number) => { + const port = Number(rawPort) + if (!Number.isInteger(port) || port < 1 || port > 65535) + throw new Error(`Invalid proxy port "${rawPort}". Expected an integer between 1 and 65535.`) + + return port +} + +export const resolveDevProxyServerOptions = ( + serverConfig: DevProxyServerConfig = {}, + cliOptions: DevProxyCliOptions = {}, +): ResolvedDevProxyServerOptions => { + const configuredPort = cliOptions.port ?? serverConfig.port ?? DEFAULT_PROXY_PORT + + return { + host: cliOptions.host || serverConfig.host || DEFAULT_PROXY_HOST, + port: resolvePort(configuredPort), + } +} + +const isRecord = (value: unknown): value is Record => + typeof value === 'object' && value !== null + +export function assertDevProxyConfig(config: unknown): asserts config is DevProxyConfig { + if (!isRecord(config)) + throw new Error('Dev proxy config must export an object.') + + if (!Array.isArray(config.routes)) + throw new Error('Dev proxy config must include a routes array.') +} + +const resolveDotenvOptions = ( + envFile: DevProxyConfigLoadOptions['envFile'], + cwd: string, +): DotenvOptions | false => { + if (!envFile) + return false + + const resolvedEnvFilePath = path.resolve(cwd, envFile) + return { + cwd: path.dirname(resolvedEnvFilePath), + fileName: path.basename(resolvedEnvFilePath), + interpolate: true, + } +} + +export const loadDevProxyConfig = async ( + configPath = DEFAULT_CONFIG_FILE, + cwd = process.cwd(), + options: DevProxyConfigLoadOptions = {}, +): Promise => { + const resolvedConfigPath = path.resolve(cwd, configPath) + const parsedPath = path.parse(resolvedConfigPath) + const { config: loadedConfig } = await loadConfig({ + configFile: parsedPath.name, + cwd: parsedPath.dir, + dotenv: resolveDotenvOptions(options.envFile, cwd), + envName: false, + globalRc: false, + packageJson: false, + rcFile: false, + }) + + assertDevProxyConfig(loadedConfig) + return loadedConfig +} + +export const defineDevProxyConfig = (config: DevProxyConfig) => config diff --git a/packages/dev-proxy/src/cookies.spec.ts b/packages/dev-proxy/src/cookies.spec.ts new file mode 100644 index 0000000000..4a1b614eeb --- /dev/null +++ b/packages/dev-proxy/src/cookies.spec.ts @@ -0,0 +1,44 @@ +/** + * @vitest-environment node + */ +import { describe, expect, it } from 'vitest' +import { rewriteCookieHeaderForUpstream, rewriteSetCookieHeadersForLocal } from './cookies' + +describe('dev proxy cookies', () => { + // Scenario: cookie names should only receive secure host prefixes when configured. + it('should rewrite configured cookie names for HTTPS upstream requests', () => { + // Act + const cookieHeader = rewriteCookieHeaderForUpstream('access_token=abc; theme=dark; passport-app=def', { + hostPrefixCookies: ['access_token', /^passport-/], + useHostPrefix: true, + }) + + // Assert + expect(cookieHeader).toBe('__Host-access_token=abc; theme=dark; __Host-passport-app=def') + }) + + // Scenario: HTTP upstreams should keep local cookie names even when rewrite config exists. + it('should keep local cookie names for HTTP upstream requests', () => { + // Act + const cookieHeader = rewriteCookieHeaderForUpstream('access_token=abc; refresh_token=def', { + hostPrefixCookies: ['access_token', 'refresh_token'], + useHostPrefix: false, + }) + + // Assert + expect(cookieHeader).toBe('access_token=abc; refresh_token=def') + }) + + // Scenario: upstream set-cookie headers should be converted into localhost-safe cookies. + it('should rewrite upstream set-cookie headers for local development', () => { + // Act + const cookies = rewriteSetCookieHeadersForLocal([ + '__Host-access_token=abc; Path=/console/api; Domain=cloud.example.com; Secure; SameSite=None; Partitioned', + ]) + + // Assert + expect(cookies).toEqual([ + 'access_token=abc; Path=/; SameSite=Lax', + ]) + }) +}) diff --git a/web/plugins/dev-proxy/cookies.ts b/packages/dev-proxy/src/cookies.ts similarity index 58% rename from web/plugins/dev-proxy/cookies.ts rename to packages/dev-proxy/src/cookies.ts index c606322e96..61fdb6abd4 100644 --- a/web/plugins/dev-proxy/cookies.ts +++ b/packages/dev-proxy/src/cookies.ts @@ -1,4 +1,4 @@ -const DEFAULT_PROXY_TARGET = 'https://cloud.dify.ai' +import type { CookieRewriteOptions } from './types' const SECURE_COOKIE_PREFIX_PATTERN = /^__(Host|Secure)-/ const SAME_SITE_NONE_PATTERN = /^samesite=none$/i @@ -7,39 +7,43 @@ const COOKIE_DOMAIN_PATTERN = /^domain=/i const COOKIE_SECURE_PATTERN = /^secure$/i const COOKIE_PARTITIONED_PATTERN = /^partitioned$/i -const HOST_PREFIX_COOKIE_NAMES = new Set([ - 'access_token', - 'csrf_token', - 'refresh_token', - 'webapp_access_token', -]) +const stripSecureCookiePrefix = (cookieName: string) => cookieName.replace(SECURE_COOKIE_PREFIX_PATTERN, '') -const isPassportCookie = (cookieName: string) => cookieName.startsWith('passport-') +const matchesCookieName = (cookieName: string, matcher: string | RegExp) => + typeof matcher === 'string' + ? matcher === cookieName + : matcher.test(cookieName) -const shouldUseHostPrefix = (cookieName: string) => { - const normalizedCookieName = cookieName.replace(SECURE_COOKIE_PREFIX_PATTERN, '') - return HOST_PREFIX_COOKIE_NAMES.has(normalizedCookieName) || isPassportCookie(normalizedCookieName) +const shouldUseHostPrefix = (cookieName: string, options: CookieRewriteOptions) => { + const normalizedCookieName = stripSecureCookiePrefix(cookieName) + + return options.hostPrefixCookies?.some(matcher => matchesCookieName(normalizedCookieName, matcher)) || false } -const toUpstreamCookieName = (cookieName: string) => { +const toUpstreamCookieName = (cookieName: string, options: CookieRewriteOptions) => { if (cookieName.startsWith('__Host-')) return cookieName if (cookieName.startsWith('__Secure-')) - return `__Host-${cookieName.replace(SECURE_COOKIE_PREFIX_PATTERN, '')}` + return `__Host-${stripSecureCookiePrefix(cookieName)}` - if (!shouldUseHostPrefix(cookieName)) + if (!shouldUseHostPrefix(cookieName, options)) return cookieName return `__Host-${cookieName}` } -const toLocalCookieName = (cookieName: string) => cookieName.replace(SECURE_COOKIE_PREFIX_PATTERN, '') +export const toLocalCookieName = (cookieName: string) => stripSecureCookiePrefix(cookieName) -export const rewriteCookieHeaderForUpstream = (cookieHeader?: string) => { +export const rewriteCookieHeaderForUpstream = ( + cookieHeader: string | undefined, + options: CookieRewriteOptions & { useHostPrefix?: boolean }, +) => { if (!cookieHeader) return cookieHeader + const { useHostPrefix = true } = options + return cookieHeader .split(/;\s*/) .filter(Boolean) @@ -50,7 +54,11 @@ export const rewriteCookieHeaderForUpstream = (cookieHeader?: string) => { const cookieName = cookie.slice(0, separatorIndex).trim() const cookieValue = cookie.slice(separatorIndex + 1) - return `${toUpstreamCookieName(cookieName)}=${cookieValue}` + const upstreamCookieName = useHostPrefix + ? toUpstreamCookieName(cookieName, options) + : cookieName + + return `${upstreamCookieName}=${cookieValue}` }) .join('; ') } @@ -84,15 +92,5 @@ const rewriteSetCookieValueForLocal = (setCookieValue: string) => { return [`${toLocalCookieName(cookieName)}=${cookieValue}`, ...rewrittenAttributes].join('; ') } -export const rewriteSetCookieHeadersForLocal = (setCookieHeaders?: string | string[]): string[] | undefined => { - if (!setCookieHeaders) - return undefined - - const normalizedHeaders = Array.isArray(setCookieHeaders) - ? setCookieHeaders - : [setCookieHeaders] - - return normalizedHeaders.map(rewriteSetCookieValueForLocal) -} - -export { DEFAULT_PROXY_TARGET } +export const rewriteSetCookieHeadersForLocal = (setCookieHeaders: readonly string[]) => + setCookieHeaders.map(rewriteSetCookieValueForLocal) diff --git a/packages/dev-proxy/src/index.ts b/packages/dev-proxy/src/index.ts new file mode 100644 index 0000000000..e35893b98f --- /dev/null +++ b/packages/dev-proxy/src/index.ts @@ -0,0 +1,22 @@ +export { + assertDevProxyConfig, + defineDevProxyConfig, + loadDevProxyConfig, + parseDevProxyCliArgs, + resolveDevProxyServerOptions, +} from './config' +export { rewriteCookieHeaderForUpstream, rewriteSetCookieHeadersForLocal, toLocalCookieName } from './cookies' +export { buildUpstreamUrl, createDevProxyApp, isAllowedDevOrigin, isAllowedLocalDevOrigin } from './server' +export type { + CookieNameMatcher, + CookieRewriteOptions, + CreateDevProxyAppOptions, + DevProxyCliOptions, + DevProxyConfig, + DevProxyConfigLoadOptions, + DevProxyCorsAllowedOrigins, + DevProxyCorsConfig, + DevProxyRoute, + DevProxyServerConfig, + ResolvedDevProxyServerOptions, +} from './types' diff --git a/packages/dev-proxy/src/server.spec.ts b/packages/dev-proxy/src/server.spec.ts new file mode 100644 index 0000000000..32c16a1807 --- /dev/null +++ b/packages/dev-proxy/src/server.spec.ts @@ -0,0 +1,242 @@ +/** + * @vitest-environment node + */ +import { beforeEach, describe, expect, it, vi } from 'vitest' +import { buildUpstreamUrl, createDevProxyApp, isAllowedDevOrigin } from './server' + +describe('dev proxy server', () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + // Scenario: target paths should not be duplicated when the incoming route already includes them. + it('should preserve prefixed targets when building upstream URLs', () => { + // Act + const url = buildUpstreamUrl('https://api.example.com/console/api', '/console/api/apps', '?page=1') + + // Assert + expect(url.href).toBe('https://api.example.com/console/api/apps?page=1') + }) + + // Scenario: only localhost dev origins should be reflected for credentialed CORS by default. + it('should only allow local development origins by default', () => { + // Assert + expect(isAllowedDevOrigin('http://localhost:3000')).toBe(true) + expect(isAllowedDevOrigin('http://127.0.0.1:3000')).toBe(true) + expect(isAllowedDevOrigin('https://example.com')).toBe(false) + }) + + // Scenario: explicit CORS origins should support non-local development hosts. + it('should allow explicitly configured origins', () => { + // Assert + expect(isAllowedDevOrigin('https://app.example.com', ['https://app.example.com'])).toBe(true) + expect(isAllowedDevOrigin('https://other.example.com', ['https://app.example.com'])).toBe(false) + }) + + // Scenario: proxy requests should rewrite cookies and surface credentialed CORS headers when configured. + it('should proxy api requests with configured local cookie rewriting', async () => { + // Arrange + const fetchImpl = vi.fn().mockResolvedValue(new Response('ok', { + status: 200, + headers: [ + ['content-encoding', 'br'], + ['content-length', '123'], + ['set-cookie', '__Host-access_token=abc; Path=/console/api; Domain=cloud.example.com; Secure; SameSite=None'], + ['transfer-encoding', 'chunked'], + ], + })) + const app = createDevProxyApp({ + routes: [ + { + paths: '/console/api', + target: 'https://cloud.example.com', + cookieRewrite: { + hostPrefixCookies: ['access_token'], + }, + }, + ], + fetchImpl, + }) + + // Act + const response = await app.request('http://127.0.0.1:5001/console/api/apps?page=1', { + headers: { + 'Origin': 'http://localhost:3000', + 'Cookie': 'access_token=abc; theme=dark', + 'Accept-Encoding': 'zstd, br, gzip', + }, + }) + + // Assert + expect(fetchImpl).toHaveBeenCalledTimes(1) + expect(fetchImpl).toHaveBeenCalledWith( + new URL('https://cloud.example.com/console/api/apps?page=1'), + expect.objectContaining({ + method: 'GET', + headers: expect.any(Headers), + }), + ) + + const requestHeaders = fetchImpl.mock.calls[0]?.[1]?.headers + if (!(requestHeaders instanceof Headers)) + throw new Error('Expected proxy request headers to be Headers') + + expect(requestHeaders.get('cookie')).toBe('__Host-access_token=abc; theme=dark') + expect(requestHeaders.get('origin')).toBe('https://cloud.example.com') + expect(requestHeaders.get('accept-encoding')).toBe('identity') + expect(response.headers.get('access-control-allow-origin')).toBe('http://localhost:3000') + expect(response.headers.get('access-control-allow-credentials')).toBe('true') + expect(response.headers.get('content-encoding')).toBeNull() + expect(response.headers.get('content-length')).toBeNull() + expect(response.headers.get('transfer-encoding')).toBeNull() + expect(response.headers.getSetCookie()).toEqual([ + 'access_token=abc; Path=/; SameSite=Lax', + ]) + }) + + // Scenario: generic proxy routes should not know Dify cookie names by default. + it('should not rewrite cookie names when cookie rewriting is not configured', async () => { + // Arrange + const fetchImpl = vi.fn().mockResolvedValue(new Response('ok')) + const app = createDevProxyApp({ + routes: [ + { + paths: '/api', + target: 'https://api.example.com', + }, + ], + fetchImpl, + }) + + // Act + await app.request('http://127.0.0.1:5001/api/messages', { + headers: { + Cookie: 'access_token=abc; refresh_token=def', + }, + }) + + // Assert + const requestHeaders = fetchImpl.mock.calls[0]?.[1]?.headers + if (!(requestHeaders instanceof Headers)) + throw new Error('Expected proxy request headers to be Headers') + + expect(requestHeaders.get('cookie')).toBe('access_token=abc; refresh_token=def') + }) + + // Scenario: local HTTP upstreams expect local cookie names even when cookie rewriting is configured. + it('should keep local cookie names for HTTP upstream targets', async () => { + // Arrange + const fetchImpl = vi.fn().mockResolvedValue(new Response('ok')) + const app = createDevProxyApp({ + routes: [ + { + paths: '/console/api', + target: 'http://127.0.0.1:5001', + cookieRewrite: { + hostPrefixCookies: ['access_token', 'refresh_token'], + }, + }, + ], + fetchImpl, + }) + + // Act + await app.request('http://127.0.0.1:5010/console/api/account/profile', { + headers: { + Cookie: 'access_token=abc; refresh_token=def', + }, + }) + + // Assert + const requestHeaders = fetchImpl.mock.calls[0]?.[1]?.headers + if (!(requestHeaders instanceof Headers)) + throw new Error('Expected proxy request headers to be Headers') + + expect(requestHeaders.get('cookie')).toBe('access_token=abc; refresh_token=def') + }) + + // Scenario: custom route paths should support independent upstream targets. + it('should proxy custom route paths to their configured targets', async () => { + // Arrange + const fetchImpl = vi.fn().mockResolvedValue(new Response('ok')) + const app = createDevProxyApp({ + routes: [ + { + paths: '/api', + target: 'https://api.example.com', + }, + { + paths: '/files', + target: 'https://files.example.com/assets', + }, + ], + fetchImpl, + }) + + // Act + await app.request('http://127.0.0.1:5001/api/messages') + await app.request('http://127.0.0.1:5001/files/logo.png?size=small') + + // Assert + expect(fetchImpl.mock.calls.map(([url]) => url.toString())).toEqual([ + 'https://api.example.com/api/messages', + 'https://files.example.com/assets/files/logo.png?size=small', + ]) + }) + + // Scenario: routes are matched in config order so callers can put specific routes first. + it('should prefer earlier route entries', async () => { + // Arrange + const fetchImpl = vi.fn().mockResolvedValue(new Response('ok')) + const app = createDevProxyApp({ + routes: [ + { + paths: '/api/enterprise', + target: 'https://enterprise.example.com', + }, + { + paths: '/api', + target: 'https://api.example.com', + }, + ], + fetchImpl, + }) + + // Act + await app.request('http://127.0.0.1:5001/api/enterprise/sso/login') + + // Assert + expect(fetchImpl.mock.calls.map(([url]) => url.toString())).toEqual([ + 'https://enterprise.example.com/api/enterprise/sso/login', + ]) + }) + + // Scenario: preflight requests should advertise allowed headers for credentialed cross-origin calls. + it('should answer CORS preflight requests', async () => { + // Arrange + const app = createDevProxyApp({ + routes: [ + { + paths: '/api', + target: 'https://api.example.com', + }, + ], + fetchImpl: vi.fn(), + }) + + // Act + const response = await app.request('http://127.0.0.1:5001/api/messages', { + method: 'OPTIONS', + headers: { + 'Origin': 'http://localhost:3000', + 'Access-Control-Request-Headers': 'authorization,content-type,x-csrf-token', + }, + }) + + // Assert + expect(response.status).toBe(204) + expect(response.headers.get('access-control-allow-origin')).toBe('http://localhost:3000') + expect(response.headers.get('access-control-allow-credentials')).toBe('true') + expect(response.headers.get('access-control-allow-headers')).toBe('authorization,content-type,x-csrf-token') + }) +}) diff --git a/packages/dev-proxy/src/server.ts b/packages/dev-proxy/src/server.ts new file mode 100644 index 0000000000..79654750da --- /dev/null +++ b/packages/dev-proxy/src/server.ts @@ -0,0 +1,254 @@ +import type { Context, Hono } from 'hono' +import type { CookieRewriteOptions, CreateDevProxyAppOptions, DevProxyCorsAllowedOrigins, DevProxyRoute } from './types' +import { Hono as HonoApp } from 'hono' +import { rewriteCookieHeaderForUpstream, rewriteSetCookieHeadersForLocal } from './cookies' + +const LOCAL_DEV_HOSTS = new Set(['localhost', '127.0.0.1', '[::1]', '::1']) +const ALLOW_METHODS = 'GET,HEAD,POST,PUT,PATCH,DELETE,OPTIONS' +const DEFAULT_ALLOW_HEADERS = 'Authorization, Content-Type, X-CSRF-Token' +const UPSTREAM_ACCEPT_ENCODING = 'identity' +const RESPONSE_HEADERS_TO_DROP = [ + 'connection', + 'content-encoding', + 'content-length', + 'keep-alive', + 'proxy-authenticate', + 'proxy-authorization', + 'te', + 'trailer', + 'transfer-encoding', + 'upgrade', +] as const + +const appendHeaderValue = (headers: Headers, name: string, value: string) => { + const currentValue = headers.get(name) + if (!currentValue) { + headers.set(name, value) + return + } + + if (currentValue.split(',').map(item => item.trim()).includes(value)) + return + + headers.set(name, `${currentValue}, ${value}`) +} + +export const isAllowedLocalDevOrigin = (origin?: string | null) => { + if (!origin) + return false + + try { + const url = new URL(origin) + return LOCAL_DEV_HOSTS.has(url.hostname) + } + catch { + return false + } +} + +export const isAllowedDevOrigin = ( + origin?: string | null, + allowedOrigins: DevProxyCorsAllowedOrigins = 'local', +) => { + if (!origin) + return false + + if (allowedOrigins === 'local') + return isAllowedLocalDevOrigin(origin) + + return allowedOrigins.includes(origin) +} + +const applyCorsHeaders = ( + headers: Headers, + origin: string | undefined | null, + allowedOrigins: DevProxyCorsAllowedOrigins = 'local', +) => { + if (!isAllowedDevOrigin(origin, allowedOrigins)) + return + + headers.set('Access-Control-Allow-Origin', origin!) + headers.set('Access-Control-Allow-Credentials', 'true') + appendHeaderValue(headers, 'Vary', 'Origin') +} + +export const buildUpstreamUrl = (target: string, requestPath: string, search = '') => { + const targetUrl = new URL(target) + const normalizedTargetPath = targetUrl.pathname === '/' ? '' : targetUrl.pathname.replace(/\/$/, '') + const normalizedRequestPath = requestPath.startsWith('/') ? requestPath : `/${requestPath}` + const hasTargetPrefix = normalizedTargetPath + && (normalizedRequestPath === normalizedTargetPath || normalizedRequestPath.startsWith(`${normalizedTargetPath}/`)) + + targetUrl.pathname = hasTargetPrefix + ? normalizedRequestPath + : `${normalizedTargetPath}${normalizedRequestPath}` + targetUrl.search = search + + return targetUrl +} + +const createProxyRequestHeaders = ( + request: Request, + targetUrl: URL, + cookieRewrite: CookieRewriteOptions | false | undefined, +) => { + const headers = new Headers(request.headers) + headers.delete('host') + headers.set('accept-encoding', UPSTREAM_ACCEPT_ENCODING) + + if (headers.has('origin')) + headers.set('origin', targetUrl.origin) + + if (cookieRewrite) { + const rewrittenCookieHeader = rewriteCookieHeaderForUpstream(headers.get('cookie') || undefined, { + ...cookieRewrite, + useHostPrefix: targetUrl.protocol === 'https:', + }) + if (rewrittenCookieHeader) + headers.set('cookie', rewrittenCookieHeader) + } + + return headers +} + +const getSetCookieHeaders = (headers: Headers) => { + const headersWithGetSetCookie = headers as Headers & { getSetCookie?: () => string[] } + const setCookieHeaders = headersWithGetSetCookie.getSetCookie?.() + if (setCookieHeaders?.length) + return setCookieHeaders + + const setCookie = headers.get('set-cookie') + return setCookie ? [setCookie] : [] +} + +const createUpstreamResponseHeaders = ( + response: Response, + requestOrigin: string | undefined | null, + allowedOrigins: DevProxyCorsAllowedOrigins, + cookieRewrite: CookieRewriteOptions | false | undefined, +) => { + const headers = new Headers(response.headers) + RESPONSE_HEADERS_TO_DROP.forEach(header => headers.delete(header)) + headers.delete('set-cookie') + + const setCookieHeaders = getSetCookieHeaders(response.headers) + const responseSetCookieHeaders = cookieRewrite + ? rewriteSetCookieHeadersForLocal(setCookieHeaders) + : setCookieHeaders + + responseSetCookieHeaders.forEach((cookie) => { + headers.append('set-cookie', cookie) + }) + + applyCorsHeaders(headers, requestOrigin, allowedOrigins) + return headers +} + +const proxyRequest = async ( + context: Context, + route: DevProxyRoute, + fetchImpl: typeof globalThis.fetch, + allowedOrigins: DevProxyCorsAllowedOrigins, +) => { + const requestUrl = new URL(context.req.url) + const targetUrl = buildUpstreamUrl(route.target, requestUrl.pathname, requestUrl.search) + const requestHeaders = createProxyRequestHeaders(context.req.raw, targetUrl, route.cookieRewrite) + const requestInit: RequestInit & { duplex?: 'half' } = { + method: context.req.method, + headers: requestHeaders, + redirect: 'manual', + } + + if (context.req.method !== 'GET' && context.req.method !== 'HEAD') { + requestInit.body = context.req.raw.body + requestInit.duplex = 'half' + } + + const upstreamResponse = await fetchImpl(targetUrl, requestInit) + const responseHeaders = createUpstreamResponseHeaders( + upstreamResponse, + context.req.header('origin'), + allowedOrigins, + route.cookieRewrite, + ) + + return new Response(upstreamResponse.body, { + status: upstreamResponse.status, + statusText: upstreamResponse.statusText, + headers: responseHeaders, + }) +} + +const normalizeRoutePaths = (paths: DevProxyRoute['paths']) => Array.isArray(paths) ? paths : [paths] + +const registerProxyRoute = ( + app: Hono, + route: DevProxyRoute, + path: string, + fetchImpl: typeof globalThis.fetch, + allowedOrigins: DevProxyCorsAllowedOrigins, +) => { + if (!path.startsWith('/')) + throw new Error(`Invalid dev proxy route path "${path}". Paths must start with "/".`) + + app.all(path, context => proxyRequest(context, route, fetchImpl, allowedOrigins)) + app.all(`${path}/*`, context => proxyRequest(context, route, fetchImpl, allowedOrigins)) +} + +const registerProxyRoutes = ( + app: Hono, + routes: readonly DevProxyRoute[], + fetchImpl: typeof globalThis.fetch, + allowedOrigins: DevProxyCorsAllowedOrigins, +) => { + routes.forEach((route) => { + normalizeRoutePaths(route.paths).forEach((path) => { + registerProxyRoute(app, route, path, fetchImpl, allowedOrigins) + }) + }) +} + +export const createDevProxyApp = (options: CreateDevProxyAppOptions) => { + const app = new HonoApp() + const fetchImpl = options.fetchImpl || globalThis.fetch + const logger = options.logger || console + const allowedOrigins = options.cors?.allowedOrigins || 'local' + + app.onError((error, context) => { + logger.error('[dev-proxy]', error) + + const headers = new Headers() + applyCorsHeaders(headers, context.req.header('origin'), allowedOrigins) + + return new Response('Upstream proxy request failed.', { + status: 502, + headers, + }) + }) + + app.use('*', async (context, next) => { + if (context.req.method === 'OPTIONS') { + const headers = new Headers() + applyCorsHeaders(headers, context.req.header('origin'), allowedOrigins) + headers.set('Access-Control-Allow-Methods', ALLOW_METHODS) + headers.set( + 'Access-Control-Allow-Headers', + context.req.header('Access-Control-Request-Headers') || DEFAULT_ALLOW_HEADERS, + ) + if (context.req.header('Access-Control-Request-Private-Network') === 'true') + headers.set('Access-Control-Allow-Private-Network', 'true') + + return new Response(null, { + status: 204, + headers, + }) + } + + await next() + applyCorsHeaders(context.res.headers, context.req.header('origin'), allowedOrigins) + }) + + registerProxyRoutes(app, options.routes, fetchImpl, allowedOrigins) + + return app +} diff --git a/packages/dev-proxy/src/types.ts b/packages/dev-proxy/src/types.ts new file mode 100644 index 0000000000..2c42b2f7fb --- /dev/null +++ b/packages/dev-proxy/src/types.ts @@ -0,0 +1,50 @@ +export type DevProxyServerConfig = { + host?: string + port?: number +} + +export type DevProxyCorsAllowedOrigins = 'local' | readonly string[] + +export type DevProxyCorsConfig = { + allowedOrigins?: DevProxyCorsAllowedOrigins +} + +export type CookieNameMatcher = string | RegExp + +export type CookieRewriteOptions = { + hostPrefixCookies?: readonly CookieNameMatcher[] +} + +export type DevProxyRoute = { + paths: string | readonly string[] + target: string + cookieRewrite?: CookieRewriteOptions | false +} + +export type DevProxyConfig = { + server?: DevProxyServerConfig + routes: readonly DevProxyRoute[] + cors?: DevProxyCorsConfig +} + +export type DevProxyCliOptions = { + config?: string + envFile?: string + host?: string + port?: string + help?: boolean +} + +export type DevProxyConfigLoadOptions = { + envFile?: string | false +} + +export type ResolvedDevProxyServerOptions = { + host: string + port: number +} + +export type CreateDevProxyAppOptions = Pick & { + fetchImpl?: typeof globalThis.fetch + logger?: Pick +} diff --git a/packages/dev-proxy/tsconfig.json b/packages/dev-proxy/tsconfig.json new file mode 100644 index 0000000000..813a9bd8a3 --- /dev/null +++ b/packages/dev-proxy/tsconfig.json @@ -0,0 +1,17 @@ +{ + "extends": "@dify/tsconfig/node.json", + "compilerOptions": { + "types": [ + "node", + "vitest/globals" + ] + }, + "include": [ + "src/**/*.ts", + "vite.config.ts" + ], + "exclude": [ + "node_modules", + "dist" + ] +} diff --git a/packages/dev-proxy/vite.config.ts b/packages/dev-proxy/vite.config.ts new file mode 100644 index 0000000000..d060ae036e --- /dev/null +++ b/packages/dev-proxy/vite.config.ts @@ -0,0 +1,27 @@ +import { defineConfig } from 'vite-plus' + +export default defineConfig({ + pack: { + clean: true, + deps: { + neverBundle: [ + '@hono/node-server', + 'c12', + 'hono', + ], + }, + entry: [ + 'src/index.ts', + 'src/cli.ts', + ], + format: ['esm'], + outDir: 'dist', + platform: 'node', + sourcemap: true, + target: 'node22', + treeshake: true, + }, + test: { + environment: 'node', + }, +}) diff --git a/packages/dify-ui/.storybook/storybook.css b/packages/dify-ui/.storybook/storybook.css index e9796fd046..ca76cd2968 100644 --- a/packages/dify-ui/.storybook/storybook.css +++ b/packages/dify-ui/.storybook/storybook.css @@ -1,6 +1,9 @@ @import 'tailwindcss'; -@config '../tailwind.config.ts'; +@plugin '../src/plugins/icons.ts'; + +@source '../src'; +@source '../.storybook'; @import '../src/styles/styles.css'; diff --git a/packages/dify-ui/AGENTS.md b/packages/dify-ui/AGENTS.md index 4a7fe2f22a..9524394214 100644 --- a/packages/dify-ui/AGENTS.md +++ b/packages/dify-ui/AGENTS.md @@ -1,6 +1,6 @@ # @langgenius/dify-ui -Shared design tokens, the `cn()` utility, a Tailwind CSS preset, and headless primitive components consumed by `web/`. +Shared design tokens, the `cn()` utility, CSS-first Tailwind styles, and headless primitive components consumed by `web/`. ## Component Authoring Rules @@ -51,9 +51,33 @@ The Figma design system uses `--radius/*` tokens whose scale is **offset by one ### Rules -- **Do not** add custom `borderRadius` values to `tailwind-preset.ts`. We use Tailwind v4 defaults and arbitrary values (`rounded-[Npx]`) for sizes without a standard equivalent. +- **Do not** add custom `borderRadius` theme values. We use Tailwind v4 defaults and arbitrary values (`rounded-[Npx]`) for sizes without a standard equivalent. - **Do not** use `radius-*` as CSS class names. The old `@utility radius-*` definitions have been removed. - When the Figma MCP returns `rounded-[var(--radius/sm, 6px)]`, convert it to the standard Tailwind class from the table above (e.g. `rounded-md`). - For values without a standard Tailwind equivalent (10px, 20px, 28px), use arbitrary values like `rounded-[10px]`. +## Search / Picker Primitive Selection: Autocomplete vs Combobox vs Select + +Pick by whether the user is entering free-form text, choosing a remembered value, or selecting from a closed list. + +Base UI decision rules: + +- [Autocomplete docs]: use `Combobox` instead of `Autocomplete` if the selection should be remembered and the input value cannot be custom. +- [Combobox docs]: do not use `Combobox` for simple search widgets that require unrestricted text entry; use `Autocomplete` instead. + +Apply this split in Dify UI: + +- `Autocomplete` — free-form text input with optional suggestions or completions. The input value may be custom and does not necessarily become a selected option. Use for search boxes, command-style suggestions, tag suggestions, and async text completion. +- `Combobox` — searchable picker whose value is one or more selected items from a collection. The chosen value is remembered by the root, and free-form text is not the final value. Use for model pickers, user pickers, dataset/document pickers, and multi-select chips. +- `Select` — closed-list picker without text entry. Use when the option set is small or already scannable and filtering is unnecessary. + +Composition rules: + +- Keep Base UI primitive semantics visible in the public API. Export compound parts such as `ComboboxInputGroup`, `ComboboxInput`, `ComboboxContent`, `ComboboxList`, `ComboboxItem`, and `ComboboxItemIndicator` instead of wrapping them into one business component. +- For `Combobox` multiple selection, follow the official chips pattern: `ComboboxInputGroup` contains `ComboboxChips`, `ComboboxValue` renders `ComboboxChip` items, and `ComboboxInput` remains inside the chips row. Chips should wrap and let the input group grow vertically instead of forcing horizontal overflow. +- Content primitives must own their Base UI `Portal` and use `z-50` on `Positioner`, matching the overlay contract in `README.md`. Toast owns `z-60`. +- Use `w-(--anchor-width)` with viewport-aware max-width for `Autocomplete` and `Combobox` popups. Do not add `min-w-(--anchor-width)` when it would defeat available-width clamping. + +[Autocomplete docs]: https://base-ui.com/react/components/autocomplete.md#usage-guidelines +[Combobox docs]: https://base-ui.com/react/components/combobox.md#usage-guidelines [docs]: https://base-ui.com/react/components/tooltip#infotips diff --git a/packages/dify-ui/README.md b/packages/dify-ui/README.md index cd9485c400..010fb3e56d 100644 --- a/packages/dify-ui/README.md +++ b/packages/dify-ui/README.md @@ -1,6 +1,6 @@ # @langgenius/dify-ui -Shared UI primitives, design tokens, Tailwind preset, and the `cn()` utility consumed by Dify's `web/` app. +Shared UI primitives, design tokens, CSS-first Tailwind styles, and the `cn()` utility consumed by Dify's `web/` app. The primitives are thin, opinionated wrappers around [Base UI] headless components, styled with `cva` + `cn` and Dify design tokens. @@ -28,6 +28,7 @@ Always import from a **subpath export** — there is no barrel: import { Button } from '@langgenius/dify-ui/button' import { cn } from '@langgenius/dify-ui/cn' import { Dialog, DialogContent, DialogTrigger } from '@langgenius/dify-ui/dialog' +import { Drawer, DrawerPopup, DrawerTrigger } from '@langgenius/dify-ui/drawer' import { Popover, PopoverContent, PopoverTrigger } from '@langgenius/dify-ui/popover' import '@langgenius/dify-ui/styles.css' // once, in the app root ``` @@ -36,22 +37,36 @@ Importing from `@langgenius/dify-ui` (no subpath) is intentionally not supported ## Primitives -| Category | Subpath | Notes | -| -------- | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------- | -| Overlay | `./alert-dialog`, `./context-menu`, `./dialog`, `./dropdown-menu`, `./popover`, `./select`, `./toast`, `./tooltip` | Portalled. See [Overlay & portal contract] below. | -| Form | `./number-field`, `./slider`, `./switch` | Controlled / uncontrolled per Base UI defaults. | -| Layout | `./scroll-area` | Custom-styled scrollbar over the host viewport. | -| Media | `./avatar`, `./button` | Button exposes `cva` variants. | +| Category | Subpath | Notes | +| -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------- | +| Overlay | `./alert-dialog`, `./autocomplete`, `./combobox`, `./context-menu`, `./dialog`, `./drawer`, `./dropdown-menu`, `./popover`, `./select`, `./toast`, `./tooltip` | Portalled. See [Overlay & portal contract] below. | +| Form | `./autocomplete`, `./combobox`, `./number-field`, `./slider`, `./switch` | Controlled / uncontrolled per Base UI defaults. | +| Layout | `./scroll-area` | Custom-styled scrollbar over the host viewport. | +| Media | `./avatar`, `./button` | Button exposes `cva` variants. | Utilities: - `./cn` — `clsx` + `tailwind-merge` wrapper. Use this for conditional class composition. -- `./tailwind-preset` — Tailwind v4 preset with Dify tokens. Apps extend it from their own `tailwind.config.ts`. -- `./styles.css` — the one CSS entry that ships the design tokens, theme variables, and base reset. Import it once from the app root. +- `./styles.css` — the one CSS entry that ships the design tokens, theme variables, and project utilities/components. Import it once from the app root. + +## Tailwind CSS v4 integration + +This package uses Tailwind CSS v4's CSS-first configuration model. Consumers should import Tailwind from their own root stylesheet, then import this package's CSS entry: + +```css +@import 'tailwindcss'; +@import '@langgenius/dify-ui/styles.css'; +``` + +If a consumer uses Dify UI source files through the workspace, add an explicit source so Tailwind can detect utility classes: + +```css +@source '../packages/dify-ui/src'; +``` ## Overlay & portal contract -All overlay primitives (`dialog`, `alert-dialog`, `popover`, `dropdown-menu`, `context-menu`, `select`, `tooltip`, `toast`) render their content inside a [Base UI Portal] attached to `document.body`. This is the Base UI default — see the upstream [Portals][Base UI Portal] docs for the underlying behavior. Consumers **do not** need to wrap anything in a portal manually. +Overlay primitives render their floating surfaces inside a [Base UI Portal] attached to `document.body`. This is the Base UI default — see the upstream [Portals][Base UI Portal] docs for the underlying behavior. Convenience content components such as `DialogContent`, `PopoverContent`, and `SelectContent` own their portal internally; primitives with explicit portal anatomy such as `Drawer` expose the matching `DrawerPortal` part so consumers can compose the full Base UI structure. ### Root isolation requirement @@ -69,21 +84,28 @@ Equivalent: any root element with `isolation: isolate` in CSS. Without it, overl Every overlay primitive uses a single, shared z-index. Do **not** override it at call sites. -| Layer | z-index | Where | -| ----------------------------------------------------------------------------------- | -------- | -------------------------------------------------------------------------- | -| Overlays (Dialog, AlertDialog, Popover, DropdownMenu, ContextMenu, Select, Tooltip) | `z-1002` | Positioner / Backdrop | -| Toast viewport | `z-1003` | One layer above overlays so notifications are never hidden under a dialog. | +| Layer | z-index | Where | +| ------------------------------------------------------------------------------------------------------------------- | ------- | -------------------------------------------------------------------------- | +| Overlays (Dialog, AlertDialog, Autocomplete, Combobox, Drawer, Popover, DropdownMenu, ContextMenu, Select, Tooltip) | `z-50` | Positioner / Backdrop | +| Toast viewport | `z-60` | One layer above overlays so notifications are never hidden under a dialog. | -Rationale: during Dify's migration from legacy `portal-to-follow-elem` / `base/modal` / `base/dialog` overlays to this package, new and old overlays coexist in the DOM. `z-1002` sits above any common legacy layer, eliminating per-call-site z-index hacks. Among themselves, new primitives share the same z-index and **rely on DOM order** for stacking — the portal mounted later wins. +Rationale: Dify UI owns the normal application overlay layer. Overlay primitives share `z-50` and **rely on DOM order** for stacking — the portal mounted later wins. Toast owns `z-60` so notifications remain visible above dialogs, popovers, and other portalled surfaces without falling back to `z-9999`. -See `[web/docs/overlay-migration.md](../../web/docs/overlay-migration.md)` for the Dify-web migration history and the remaining legacy allowlist. Once the legacy overlays are gone, the values in this table can drop back to `z-50` / `z-51`. +See `[web/docs/overlay.md](../../web/docs/overlay.md)` for the web app overlay best practices. ### Rules -- Never add `z-1003` / `z-9999` / etc. overrides on primitives from this package. If something is getting clipped, the **parent** overlay (typically a legacy one) is the problem and should be migrated. -- Never portal an overlay manually on top of our primitives — use `DialogTrigger`, `PopoverTrigger`, etc. Base UI handles focus management, scroll-locking, and dismissal. +- Never add ad hoc `z-*` overrides on primitives from this package. If something is getting clipped, fix the parent overlay structure instead of raising the child primitive. +- Never create an extra manual portal on top of our primitives — use the exported content / portal parts such as `DialogContent`, `PopoverContent`, and `DrawerPortal`. Base UI handles focus management, scroll-locking, and dismissal. - When a primitive needs additional presentation chrome (e.g. a custom backdrop), add it **inside** the exported component, not at call sites. +### Tooltip, infotip, and popover semantics + +- Use `Tooltip` only for short, non-interactive visual labels. The trigger must already have visible text or an `aria-label`; the tooltip is not the accessible name and must not contain links, buttons, forms, or structured prose. +- Use `Popover` for explanatory content, long text, rich layout, or anything users may need to reach on touch or with assistive technology. In `web/`, the `Infotip` wrapper is the preferred pattern for a `?` help glyph backed by `Popover`. +- Pick a `placement` and let the primitive own spacing. Avoid per-call-site offsets unless the component API explicitly needs a measured layout exception. +- When passing a Base UI trigger `render` prop, render a real `