From 271019006e1755c1ea2135c7fc6377b7518433be Mon Sep 17 00:00:00 2001 From: yyh <92089059+lyzno1@users.noreply.github.com> Date: Sat, 9 May 2026 09:29:20 +0800 Subject: [PATCH 01/13] fix: prevent workflow preview resize observer loop (#35936) --- web/app/components/workflow/index.tsx | 24 +++++++- .../operator/__tests__/index.spec.tsx | 10 ++-- .../components/workflow/operator/index.tsx | 24 +++++++- .../workflow/panel/__tests__/index.spec.tsx | 58 +++++++++++++++---- web/app/components/workflow/panel/index.tsx | 51 +++++++++------- .../workflow/store/workflow/layout-slice.ts | 30 ++++++---- 6 files changed, 148 insertions(+), 49 deletions(-) diff --git a/web/app/components/workflow/index.tsx b/web/app/components/workflow/index.tsx index 0707ba8b3b..d946ad4a97 100644 --- a/web/app/components/workflow/index.tsx +++ b/web/app/components/workflow/index.tsx @@ -213,6 +213,8 @@ export const Workflow: FC = memo(({ const bottomPanelHeight = useStore(s => s.bottomPanelHeight) const setWorkflowCanvasWidth = useStore(s => s.setWorkflowCanvasWidth) const setWorkflowCanvasHeight = useStore(s => s.setWorkflowCanvasHeight) + const workflowCanvasSizeRef = useRef<{ width?: number, height?: number }>({}) + const workflowCanvasResizeFrameRef = useRef(undefined) const controlHeight = useMemo(() => { if (!workflowCanvasHeight) return '100%' @@ -222,15 +224,33 @@ export const Workflow: FC = memo(({ // update workflow Canvas width and height useEffect(() => { if (workflowContainerRef.current) { + const updateWorkflowCanvasSize = (width: number, height: number) => { + if (workflowCanvasSizeRef.current.width === width && workflowCanvasSizeRef.current.height === height) + return + + workflowCanvasSizeRef.current = { width, height } + if (workflowCanvasResizeFrameRef.current) + cancelAnimationFrame(workflowCanvasResizeFrameRef.current) + + workflowCanvasResizeFrameRef.current = requestAnimationFrame(() => { + workflowCanvasResizeFrameRef.current = undefined + setWorkflowCanvasWidth(width) + setWorkflowCanvasHeight(height) + }) + } + const resizeContainerObserver = new ResizeObserver((entries) => { for (const entry of entries) { const { inlineSize, blockSize } = entry.borderBoxSize[0]! - setWorkflowCanvasWidth(inlineSize) - setWorkflowCanvasHeight(blockSize) + updateWorkflowCanvasSize(inlineSize, blockSize) } }) resizeContainerObserver.observe(workflowContainerRef.current) return () => { + if (workflowCanvasResizeFrameRef.current) { + cancelAnimationFrame(workflowCanvasResizeFrameRef.current) + workflowCanvasResizeFrameRef.current = undefined + } resizeContainerObserver.disconnect() } } diff --git a/web/app/components/workflow/operator/__tests__/index.spec.tsx b/web/app/components/workflow/operator/__tests__/index.spec.tsx index 455f3aa0b5..49f077341d 100644 --- a/web/app/components/workflow/operator/__tests__/index.spec.tsx +++ b/web/app/components/workflow/operator/__tests__/index.spec.tsx @@ -1,4 +1,4 @@ -import { act, screen } from '@testing-library/react' +import { act, screen, waitFor } from '@testing-library/react' import { createNode } from '../../__tests__/fixtures' import { renderWorkflowFlowComponent } from '../../__tests__/workflow-test-env' import { BlockEnum } from '../../types' @@ -110,7 +110,7 @@ describe('Operator', () => { expect(container.querySelector('div[style*="width: auto"]')).toBeInTheDocument() }) - it('should sync the observed panel size back into the workflow store and disconnect on unmount', () => { + it('should sync the observed panel size back into the workflow store and disconnect on unmount', async () => { const { store, unmount } = renderOperator({ workflowCanvasWidth: 900, rightPanelWidth: 260, @@ -126,8 +126,10 @@ describe('Operator', () => { ], {} as ResizeObserver) }) - expect(store.getState().bottomPanelWidth).toBe(512) - expect(store.getState().bottomPanelHeight).toBe(188) + await waitFor(() => { + expect(store.getState().bottomPanelWidth).toBe(512) + expect(store.getState().bottomPanelHeight).toBe(188) + }) unmount() diff --git a/web/app/components/workflow/operator/index.tsx b/web/app/components/workflow/operator/index.tsx index 052953cecf..5797983e44 100644 --- a/web/app/components/workflow/operator/index.tsx +++ b/web/app/components/workflow/operator/index.tsx @@ -15,6 +15,8 @@ type OperatorProps = { const Operator = ({ handleUndo, handleRedo }: OperatorProps) => { const bottomPanelRef = useRef(null) + const bottomPanelSizeRef = useRef<{ width?: number, height?: number }>({}) + const bottomPanelResizeFrameRef = useRef(undefined) const [showMiniMap, setShowMiniMap] = useState(true) const showUserCursors = useStore(s => s.showUserCursors) const setShowUserCursors = useStore(s => s.setShowUserCursors) @@ -55,15 +57,33 @@ const Operator = ({ handleUndo, handleRedo }: OperatorProps) => { // update bottom panel height useEffect(() => { if (bottomPanelRef.current) { + const updateBottomPanelSize = (width: number, height: number) => { + if (bottomPanelSizeRef.current.width === width && bottomPanelSizeRef.current.height === height) + return + + bottomPanelSizeRef.current = { width, height } + if (bottomPanelResizeFrameRef.current) + cancelAnimationFrame(bottomPanelResizeFrameRef.current) + + bottomPanelResizeFrameRef.current = requestAnimationFrame(() => { + bottomPanelResizeFrameRef.current = undefined + setBottomPanelWidth(width) + setBottomPanelHeight(height) + }) + } + const resizeContainerObserver = new ResizeObserver((entries) => { for (const entry of entries) { const { inlineSize, blockSize } = entry.borderBoxSize[0]! - setBottomPanelWidth(inlineSize) - setBottomPanelHeight(blockSize) + updateBottomPanelSize(inlineSize, blockSize) } }) resizeContainerObserver.observe(bottomPanelRef.current) return () => { + if (bottomPanelResizeFrameRef.current) { + cancelAnimationFrame(bottomPanelResizeFrameRef.current) + bottomPanelResizeFrameRef.current = undefined + } resizeContainerObserver.disconnect() } } diff --git a/web/app/components/workflow/panel/__tests__/index.spec.tsx b/web/app/components/workflow/panel/__tests__/index.spec.tsx index 5da08dd832..4a813f392d 100644 --- a/web/app/components/workflow/panel/__tests__/index.spec.tsx +++ b/web/app/components/workflow/panel/__tests__/index.spec.tsx @@ -1,4 +1,4 @@ -import { render, screen } from '@testing-library/react' +import { act, render, screen, waitFor } from '@testing-library/react' import * as React from 'react' import Panel from '../index' @@ -232,17 +232,55 @@ describe('Panel', () => { expect(mockPanelStoreState.setPreviewPanelWidth).not.toHaveBeenCalled() }) - it('should derive observer widths from border-box, content-rect, and fallback values and disconnect on unmount', () => { - mockResizeModes = ['borderBox', 'contentRect', 'fallback'] - + it('should derive observer widths from border-box, content-rect, and fallback values and disconnect on unmount', async () => { const { unmount } = render() - expect(mockPanelStoreState.setRightPanelWidth).toHaveBeenCalledWith(720) - expect(mockPanelStoreState.setRightPanelWidth).toHaveBeenCalledWith(530) - expect(mockPanelStoreState.setRightPanelWidth).toHaveBeenCalledWith(640) - expect(mockPanelStoreState.setOtherPanelWidth).toHaveBeenCalledWith(720) - expect(mockPanelStoreState.setOtherPanelWidth).toHaveBeenCalledWith(530) - expect(mockPanelStoreState.setOtherPanelWidth).toHaveBeenCalledWith(640) + await waitFor(() => { + expect(mockPanelStoreState.setRightPanelWidth).toHaveBeenCalledWith(640) + expect(mockPanelStoreState.setOtherPanelWidth).toHaveBeenCalledWith(640) + }) + + vi.mocked(mockPanelStoreState.setRightPanelWidth).mockClear() + vi.mocked(mockPanelStoreState.setOtherPanelWidth).mockClear() + + act(() => { + mockResizeObservers.forEach((observer) => { + observer.callback([createResizeEntry('borderBox')], observer as unknown as ResizeObserver) + }) + }) + + await waitFor(() => { + expect(mockPanelStoreState.setRightPanelWidth).toHaveBeenCalledWith(720) + expect(mockPanelStoreState.setOtherPanelWidth).toHaveBeenCalledWith(720) + }) + + vi.mocked(mockPanelStoreState.setRightPanelWidth).mockClear() + vi.mocked(mockPanelStoreState.setOtherPanelWidth).mockClear() + + act(() => { + mockResizeObservers.forEach((observer) => { + observer.callback([createResizeEntry('contentRect')], observer as unknown as ResizeObserver) + }) + }) + + await waitFor(() => { + expect(mockPanelStoreState.setRightPanelWidth).toHaveBeenCalledWith(530) + expect(mockPanelStoreState.setOtherPanelWidth).toHaveBeenCalledWith(530) + }) + + vi.mocked(mockPanelStoreState.setRightPanelWidth).mockClear() + vi.mocked(mockPanelStoreState.setOtherPanelWidth).mockClear() + + act(() => { + mockResizeObservers.forEach((observer) => { + observer.callback([createResizeEntry('fallback')], observer as unknown as ResizeObserver) + }) + }) + + await waitFor(() => { + expect(mockPanelStoreState.setRightPanelWidth).toHaveBeenCalledWith(640) + expect(mockPanelStoreState.setOtherPanelWidth).toHaveBeenCalledWith(640) + }) unmount() diff --git a/web/app/components/workflow/panel/index.tsx b/web/app/components/workflow/panel/index.tsx index 89e8419b5f..3b54583d1c 100644 --- a/web/app/components/workflow/panel/index.tsx +++ b/web/app/components/workflow/panel/index.tsx @@ -1,7 +1,7 @@ import type { FC } from 'react' import type { VersionHistoryPanelProps } from '@/app/components/workflow/panel/version-history-panel' import { cn } from '@langgenius/dify-ui/cn' -import { memo, useCallback, useEffect, useRef } from 'react' +import { memo, useEffect, useRef } from 'react' import { useStore as useReactflow } from 'reactflow' import { useShallow } from 'zustand/react/shallow' import dynamic from '@/next/dynamic' @@ -34,35 +34,50 @@ const getEntryWidth = (entry: ResizeObserverEntry, element: HTMLElement): number return element.getBoundingClientRect().width } -const useResizeObserver = ( - callback: (width: number) => void, - dependencies: React.DependencyList = [], -) => { +const useResizeObserver = (callback: (width: number) => void) => { const elementRef = useRef(null) - - const stableCallback = useCallback(callback, [callback]) + const widthRef = useRef(undefined) + const animationFrameRef = useRef(undefined) useEffect(() => { const element = elementRef.current if (!element) return + widthRef.current = undefined + + const updateWidth = (width: number) => { + if (widthRef.current === width) + return + + widthRef.current = width + if (animationFrameRef.current) + cancelAnimationFrame(animationFrameRef.current) + + animationFrameRef.current = requestAnimationFrame(() => { + animationFrameRef.current = undefined + callback(width) + }) + } + const resizeObserver = new ResizeObserver((entries) => { - for (const entry of entries) { - const width = getEntryWidth(entry, element) - stableCallback(width) - } + for (const entry of entries) + updateWidth(getEntryWidth(entry, element)) }) resizeObserver.observe(element) const initialWidth = element.getBoundingClientRect().width - stableCallback(initialWidth) + updateWidth(initialWidth) return () => { + if (animationFrameRef.current) { + cancelAnimationFrame(animationFrameRef.current) + animationFrameRef.current = undefined + } resizeObserver.disconnect() } - }, [stableCallback, ...dependencies]) + }, [callback]) return elementRef } @@ -113,15 +128,9 @@ const Panel: FC = ({ const setRightPanelWidth = useStore(s => s.setRightPanelWidth) const setOtherPanelWidth = useStore(s => s.setOtherPanelWidth) - const rightPanelRef = useResizeObserver( - setRightPanelWidth, - [setRightPanelWidth, selectedNode, showEnvPanel, showWorkflowVersionHistoryPanel], - ) + const rightPanelRef = useResizeObserver(setRightPanelWidth) - const otherPanelRef = useResizeObserver( - setOtherPanelWidth, - [setOtherPanelWidth, showEnvPanel, showWorkflowVersionHistoryPanel], - ) + const otherPanelRef = useResizeObserver(setOtherPanelWidth) return (
= set => ({ workflowCanvasWidth: undefined, workflowCanvasHeight: undefined, - setWorkflowCanvasWidth: width => set(() => ({ workflowCanvasWidth: width })), - setWorkflowCanvasHeight: height => set(() => ({ workflowCanvasHeight: height })), + setWorkflowCanvasWidth: width => set(state => + state.workflowCanvasWidth === width ? state : ({ workflowCanvasWidth: width })), + setWorkflowCanvasHeight: height => set(state => + state.workflowCanvasHeight === height ? state : ({ workflowCanvasHeight: height })), rightPanelWidth: undefined, - setRightPanelWidth: width => set(() => ({ rightPanelWidth: width })), + setRightPanelWidth: width => set(state => + state.rightPanelWidth === width ? state : ({ rightPanelWidth: width })), nodePanelWidth: localStorage.getItem('workflow-node-panel-width') ? Number.parseFloat(localStorage.getItem('workflow-node-panel-width')!) : 400, - setNodePanelWidth: width => set(() => ({ nodePanelWidth: width })), + setNodePanelWidth: width => set(state => + state.nodePanelWidth === width ? state : ({ nodePanelWidth: width })), previewPanelWidth: localStorage.getItem('debug-and-preview-panel-width') ? Number.parseFloat(localStorage.getItem('debug-and-preview-panel-width')!) : 400, - setPreviewPanelWidth: width => set(() => ({ previewPanelWidth: width })), + setPreviewPanelWidth: width => set(state => + state.previewPanelWidth === width ? state : ({ previewPanelWidth: width })), otherPanelWidth: 400, - setOtherPanelWidth: width => set(() => ({ otherPanelWidth: width })), + setOtherPanelWidth: width => set(state => + state.otherPanelWidth === width ? state : ({ otherPanelWidth: width })), bottomPanelWidth: 480, - setBottomPanelWidth: width => set(() => ({ bottomPanelWidth: width })), + setBottomPanelWidth: width => set(state => + state.bottomPanelWidth === width ? state : ({ bottomPanelWidth: width })), bottomPanelHeight: 324, - setBottomPanelHeight: height => set(() => ({ bottomPanelHeight: height })), + setBottomPanelHeight: height => set(state => + state.bottomPanelHeight === height ? state : ({ bottomPanelHeight: height })), variableInspectPanelHeight: localStorage.getItem('workflow-variable-inpsect-panel-height') ? Number.parseFloat(localStorage.getItem('workflow-variable-inpsect-panel-height')!) : 320, - setVariableInspectPanelHeight: height => set(() => ({ variableInspectPanelHeight: height })), + setVariableInspectPanelHeight: height => set(state => + state.variableInspectPanelHeight === height ? state : ({ variableInspectPanelHeight: height })), maximizeCanvas: localStorage.getItem('workflow-canvas-maximize') === 'true', - setMaximizeCanvas: maximize => set(() => ({ maximizeCanvas: maximize })), + setMaximizeCanvas: maximize => set(state => + state.maximizeCanvas === maximize ? state : ({ maximizeCanvas: maximize })), }) From c74cbb68da429f7d3a98b5db86e21acaa77e5ebf Mon Sep 17 00:00:00 2001 From: wangxiaolei Date: Sat, 9 May 2026 10:36:54 +0800 Subject: [PATCH 02/13] fix: change write to db order (#35948) --- api/services/file_service.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/api/services/file_service.py b/api/services/file_service.py index f60afe2f19..b683a2f3d4 100644 --- a/api/services/file_service.py +++ b/api/services/file_service.py @@ -107,15 +107,14 @@ class FileService: hash=hashlib.sha3_256(content).hexdigest(), source_url=source_url, ) - # The `UploadFile` ID is generated within its constructor, so flushing to retrieve the ID is unnecessary. - # We can directly generate the `source_url` here before committing. - if not upload_file.source_url: - upload_file.source_url = file_helpers.get_signed_file_url(upload_file_id=upload_file.id) with self._session_maker(expire_on_commit=False) as session: session.add(upload_file) session.commit() + if not upload_file.source_url: + upload_file.source_url = file_helpers.get_signed_file_url(upload_file_id=upload_file.id) + return upload_file @staticmethod From 38a419d0737612845c57dfb36ef8b157a9a6eaab Mon Sep 17 00:00:00 2001 From: Asuka Minato Date: Sat, 9 May 2026 12:01:47 +0900 Subject: [PATCH 03/13] ci: auto gen api doc and download link (#35919) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: WH-2099 --- .github/workflows/autofix.yml | 6 + Makefile | 4 +- api/controllers/common/schema.py | 42 +- api/controllers/console/admin.py | 11 +- api/controllers/console/app/app_import.py | 3 +- api/controllers/console/app/generator.py | 24 +- api/dev/generate_fastopenapi_specs.py | 95 + api/dev/generate_swagger_markdown_docs.py | 161 + api/dev/generate_swagger_specs.py | 265 +- api/openapi/markdown/console-swagger.md | 14766 ++++++++++++++++ api/openapi/markdown/service-swagger.md | 2754 +++ api/openapi/markdown/web-swagger.md | 1224 ++ .../test_generate_swagger_markdown_docs.py | 103 + .../commands/test_generate_swagger_specs.py | 39 + .../controllers/common/test_schema.py | 24 + 15 files changed, 19481 insertions(+), 40 deletions(-) create mode 100644 api/dev/generate_fastopenapi_specs.py create mode 100644 api/dev/generate_swagger_markdown_docs.py create mode 100644 api/openapi/markdown/console-swagger.md create mode 100644 api/openapi/markdown/service-swagger.md create mode 100644 api/openapi/markdown/web-swagger.md create mode 100644 api/tests/unit_tests/commands/test_generate_swagger_markdown_docs.py diff --git a/.github/workflows/autofix.yml b/.github/workflows/autofix.yml index 76fbd18f47..9c2c6e2ca9 100644 --- a/.github/workflows/autofix.yml +++ b/.github/workflows/autofix.yml @@ -116,6 +116,12 @@ jobs: if: github.event_name != 'merge_group' uses: ./.github/actions/setup-web + - name: Generate API docs + if: github.event_name != 'merge_group' && steps.api-changes.outputs.any_changed == 'true' + run: | + cd api + uv run dev/generate_swagger_markdown_docs.py --swagger-dir openapi --markdown-dir openapi/markdown + - name: ESLint autofix if: github.event_name != 'merge_group' && steps.web-changes.outputs.any_changed == 'true' run: | diff --git a/Makefile b/Makefile index d8c9df5208..ae7589bbd6 100644 --- a/Makefile +++ b/Makefile @@ -71,13 +71,13 @@ type-check: @echo "📝 Running type checks (basedpyright + pyrefly + mypy)..." @./dev/basedpyright-check $(PATH_TO_CHECK) @./dev/pyrefly-check-local - @uv --directory api run mypy --exclude-gitignore --exclude 'tests/' --exclude 'migrations/' --check-untyped-defs --disable-error-code=import-untyped . + @uv --directory api run mypy --exclude-gitignore --exclude 'tests/' --exclude 'migrations/' --exclude 'dev/generate_swagger_specs.py' --check-untyped-defs --disable-error-code=import-untyped . @echo "✅ Type checks complete" type-check-core: @echo "📝 Running core type checks (basedpyright + mypy)..." @./dev/basedpyright-check $(PATH_TO_CHECK) - @uv --directory api run mypy --exclude-gitignore --exclude 'tests/' --exclude 'migrations/' --check-untyped-defs --disable-error-code=import-untyped . + @uv --directory api run mypy --exclude-gitignore --exclude 'tests/' --exclude 'migrations/' --exclude 'dev/generate_swagger_specs.py' --exclude 'dev/generate_fastopenapi_specs.py' --check-untyped-defs --disable-error-code=import-untyped . @echo "✅ Core type checks complete" test: diff --git a/api/controllers/common/schema.py b/api/controllers/common/schema.py index 8d112c203b..0c5e23c29c 100644 --- a/api/controllers/common/schema.py +++ b/api/controllers/common/schema.py @@ -1,4 +1,10 @@ -"""Helpers for registering Pydantic models with Flask-RESTX namespaces.""" +"""Helpers for registering Pydantic models with Flask-RESTX namespaces. + +Flask-RESTX treats `SchemaModel` bodies as opaque JSON schemas; it does not +promote Pydantic's nested `$defs` into top-level Swagger `definitions`. +These helpers keep that translation centralized so models registered through +`register_schema_models` emit resolvable Swagger 2.0 references. +""" from enum import StrEnum @@ -8,10 +14,32 @@ from pydantic import BaseModel, TypeAdapter DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" -def register_schema_model(namespace: Namespace, model: type[BaseModel]) -> None: - """Register a single BaseModel with a namespace for Swagger documentation.""" +def _register_json_schema(namespace: Namespace, name: str, schema: dict) -> None: + """Register a JSON schema and promote any nested Pydantic `$defs`.""" - namespace.schema_model(model.__name__, model.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0)) + nested_definitions = schema.get("$defs") + schema_to_register = dict(schema) + if isinstance(nested_definitions, dict): + schema_to_register.pop("$defs") + + namespace.schema_model(name, schema_to_register) + + if not isinstance(nested_definitions, dict): + return + + for nested_name, nested_schema in nested_definitions.items(): + if isinstance(nested_schema, dict): + _register_json_schema(namespace, nested_name, nested_schema) + + +def register_schema_model(namespace: Namespace, model: type[BaseModel]) -> None: + """Register a BaseModel and its nested schema definitions for Swagger documentation.""" + + _register_json_schema( + namespace, + model.__name__, + model.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), + ) def register_schema_models(namespace: Namespace, *models: type[BaseModel]) -> None: @@ -34,8 +62,10 @@ def get_or_create_model(model_name: str, field_def): def register_enum_models(namespace: Namespace, *models: type[StrEnum]) -> None: """Register multiple StrEnum with a namespace.""" for model in models: - namespace.schema_model( - model.__name__, TypeAdapter(model).json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0) + _register_json_schema( + namespace, + model.__name__, + TypeAdapter(model).json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), ) diff --git a/api/controllers/console/admin.py b/api/controllers/console/admin.py index dce394be97..a32c3420bb 100644 --- a/api/controllers/console/admin.py +++ b/api/controllers/console/admin.py @@ -12,6 +12,7 @@ from werkzeug.exceptions import BadRequest, NotFound, Unauthorized from configs import dify_config from constants.languages import supported_language +from controllers.common.schema import register_schema_models from controllers.console import console_ns from controllers.console.wraps import only_edition_cloud from core.db.session_factory import session_factory @@ -301,15 +302,7 @@ class BatchAddNotificationAccountsPayload(BaseModel): user_email: list[str] = Field(..., description="List of account email addresses") -console_ns.schema_model( - UpsertNotificationPayload.__name__, - UpsertNotificationPayload.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), -) - -console_ns.schema_model( - BatchAddNotificationAccountsPayload.__name__, - BatchAddNotificationAccountsPayload.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0), -) +register_schema_models(console_ns, UpsertNotificationPayload, BatchAddNotificationAccountsPayload) @console_ns.route("/admin/upsert_notification") diff --git a/api/controllers/console/app/app_import.py b/api/controllers/console/app/app_import.py index e91dc9cfe5..b653016319 100644 --- a/api/controllers/console/app/app_import.py +++ b/api/controllers/console/app/app_import.py @@ -2,7 +2,7 @@ from flask_restx import Resource from pydantic import BaseModel, Field from sqlalchemy.orm import Session -from controllers.common.schema import register_schema_models +from controllers.common.schema import register_enum_models, register_schema_models from controllers.console.app.wraps import get_app_model from controllers.console.wraps import ( account_initialization_required, @@ -33,6 +33,7 @@ class AppImportPayload(BaseModel): app_id: str | None = Field(None) +register_enum_models(console_ns, ImportStatus) register_schema_models(console_ns, AppImportPayload, Import, CheckDependenciesResult) diff --git a/api/controllers/console/app/generator.py b/api/controllers/console/app/generator.py index c720a5e074..d4f501d34c 100644 --- a/api/controllers/console/app/generator.py +++ b/api/controllers/console/app/generator.py @@ -3,6 +3,7 @@ from collections.abc import Sequence from flask_restx import Resource from pydantic import BaseModel, Field +from controllers.common.schema import register_enum_models, register_schema_models from controllers.console import console_ns from controllers.console.app.error import ( CompletionRequestError, @@ -19,13 +20,12 @@ from core.helper.code_executor.python3.python3_code_provider import Python3CodeP from core.llm_generator.entities import RuleCodeGeneratePayload, RuleGeneratePayload, RuleStructuredOutputPayload from core.llm_generator.llm_generator import LLMGenerator from extensions.ext_database import db +from graphon.model_runtime.entities.llm_entities import LLMMode from graphon.model_runtime.errors.invoke import InvokeError from libs.login import current_account_with_tenant, login_required from models import App from services.workflow_service import WorkflowService -DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" - class InstructionGeneratePayload(BaseModel): flow_id: str = Field(..., description="Workflow/Flow ID") @@ -41,16 +41,16 @@ class InstructionTemplatePayload(BaseModel): type: str = Field(..., description="Instruction template type") -def reg(cls: type[BaseModel]): - console_ns.schema_model(cls.__name__, cls.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0)) - - -reg(RuleGeneratePayload) -reg(RuleCodeGeneratePayload) -reg(RuleStructuredOutputPayload) -reg(InstructionGeneratePayload) -reg(InstructionTemplatePayload) -reg(ModelConfig) +register_enum_models(console_ns, LLMMode) +register_schema_models( + console_ns, + RuleGeneratePayload, + RuleCodeGeneratePayload, + RuleStructuredOutputPayload, + InstructionGeneratePayload, + InstructionTemplatePayload, + ModelConfig, +) @console_ns.route("/rule-generate") diff --git a/api/dev/generate_fastopenapi_specs.py b/api/dev/generate_fastopenapi_specs.py new file mode 100644 index 0000000000..5a94d32b93 --- /dev/null +++ b/api/dev/generate_fastopenapi_specs.py @@ -0,0 +1,95 @@ +"""Generate FastOpenAPI OpenAPI 3.0 specs without booting the full backend.""" + +from __future__ import annotations + +import argparse +import json +import logging +import sys +from dataclasses import dataclass +from pathlib import Path + +API_ROOT = Path(__file__).resolve().parents[1] +if str(API_ROOT) not in sys.path: + sys.path.insert(0, str(API_ROOT)) + +from dev.generate_swagger_specs import apply_runtime_defaults, drop_null_values, sort_openapi_arrays + +logger = logging.getLogger(__name__) + + +@dataclass(frozen=True) +class FastOpenApiSpecTarget: + route: str + filename: str + + +FASTOPENAPI_SPEC_TARGETS: tuple[FastOpenApiSpecTarget, ...] = ( + FastOpenApiSpecTarget(route="/fastopenapi/openapi.json", filename="fastopenapi-console-openapi.json"), +) + + +def create_fastopenapi_spec_app(): + """Build a minimal Flask app that only mounts FastOpenAPI docs routes.""" + + apply_runtime_defaults() + + from app_factory import create_flask_app_with_configs + from extensions import ext_fastopenapi + + app = create_flask_app_with_configs() + ext_fastopenapi.init_app(app) + return app + + +def generate_fastopenapi_specs(output_dir: Path) -> list[Path]: + """Write FastOpenAPI specs to `output_dir` and return the written paths.""" + + output_dir.mkdir(parents=True, exist_ok=True) + + app = create_fastopenapi_spec_app() + client = app.test_client() + + written_paths: list[Path] = [] + for target in FASTOPENAPI_SPEC_TARGETS: + response = client.get(target.route) + if response.status_code != 200: + raise RuntimeError(f"failed to fetch {target.route}: {response.status_code}") + + payload = response.get_json() + if not isinstance(payload, dict): + raise RuntimeError(f"unexpected response payload for {target.route}") + payload = drop_null_values(payload) + payload = sort_openapi_arrays(payload) + + output_path = output_dir / target.filename + output_path.write_text(json.dumps(payload, indent=2, sort_keys=True) + "\n", encoding="utf-8") + written_paths.append(output_path) + + return written_paths + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument( + "-o", + "--output-dir", + type=Path, + default=Path("openapi"), + help="Directory where the OpenAPI JSON files will be written.", + ) + return parser.parse_args() + + +def main() -> int: + args = parse_args() + written_paths = generate_fastopenapi_specs(args.output_dir) + + for path in written_paths: + logger.debug(path) + + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/api/dev/generate_swagger_markdown_docs.py b/api/dev/generate_swagger_markdown_docs.py new file mode 100644 index 0000000000..0900d08331 --- /dev/null +++ b/api/dev/generate_swagger_markdown_docs.py @@ -0,0 +1,161 @@ +"""Generate OpenAPI JSON specs and split Markdown API docs. + +The Markdown step uses `swagger-markdown`, the same converter family as the +Swagger Markdown UI, so CI and local regeneration catch converter-incompatible +OpenAPI output early. +""" + +from __future__ import annotations + +import argparse +import logging +import subprocess +import sys +import tempfile +from pathlib import Path + +API_ROOT = Path(__file__).resolve().parents[1] +if str(API_ROOT) not in sys.path: + sys.path.insert(0, str(API_ROOT)) + +from dev.generate_fastopenapi_specs import FASTOPENAPI_SPEC_TARGETS, generate_fastopenapi_specs +from dev.generate_swagger_specs import SPEC_TARGETS, generate_specs + +logger = logging.getLogger(__name__) + +SWAGGER_MARKDOWN_PACKAGE = "swagger-markdown@3.0.0" +CONSOLE_SWAGGER_FILENAME = "console-swagger.json" +STALE_COMBINED_MARKDOWN_FILENAME = "api-reference.md" + + +def _convert_spec_to_markdown(spec_path: Path, markdown_path: Path) -> None: + subprocess.run( + [ + "npx", + "--yes", + SWAGGER_MARKDOWN_PACKAGE, + "-i", + str(spec_path), + "-o", + str(markdown_path), + ], + check=True, + ) + + +def _demote_markdown_headings(markdown: str, *, levels: int = 1) -> str: + """Nest generated Markdown under another Markdown section.""" + + heading_prefix = "#" * levels + lines = [] + for line in markdown.splitlines(): + if line.startswith("#"): + lines.append(f"{heading_prefix}{line}") + else: + lines.append(line) + return "\n".join(lines).strip() + + +def _append_fastopenapi_markdown(console_markdown_path: Path, fastopenapi_markdown_path: Path) -> None: + """Append FastOpenAPI console docs to the existing console API Markdown.""" + + console_markdown = console_markdown_path.read_text(encoding="utf-8").rstrip() + fastopenapi_markdown = _demote_markdown_headings( + fastopenapi_markdown_path.read_text(encoding="utf-8"), + levels=2, + ) + console_markdown_path.write_text( + "\n\n".join( + [ + console_markdown, + "## FastOpenAPI Preview (OpenAPI 3.0)", + fastopenapi_markdown, + ] + ) + + "\n", + encoding="utf-8", + ) + + +def generate_markdown_docs( + swagger_dir: Path, + markdown_dir: Path, + *, + keep_swagger_json: bool = False, +) -> list[Path]: + """Generate intermediate specs, convert them to split Markdown API docs, and return Markdown paths.""" + + swagger_paths = generate_specs(swagger_dir) + fastopenapi_paths = generate_fastopenapi_specs(swagger_dir) + spec_paths = [*swagger_paths, *fastopenapi_paths] + swagger_paths_by_name = {path.name: path for path in swagger_paths} + fastopenapi_paths_by_name = {path.name: path for path in fastopenapi_paths} + + markdown_dir.mkdir(parents=True, exist_ok=True) + + written_paths: list[Path] = [] + try: + with tempfile.TemporaryDirectory(prefix="dify-api-docs-") as temp_dir: + temp_markdown_dir = Path(temp_dir) + + for target in SPEC_TARGETS: + swagger_path = swagger_paths_by_name[target.filename] + markdown_path = markdown_dir / f"{swagger_path.stem}.md" + _convert_spec_to_markdown(swagger_path, markdown_path) + written_paths.append(markdown_path) + + for target in FASTOPENAPI_SPEC_TARGETS: # type: ignore + fastopenapi_path = fastopenapi_paths_by_name[target.filename] + markdown_path = temp_markdown_dir / f"{fastopenapi_path.stem}.md" + _convert_spec_to_markdown(fastopenapi_path, markdown_path) + + console_markdown_path = markdown_dir / f"{Path(CONSOLE_SWAGGER_FILENAME).stem}.md" + _append_fastopenapi_markdown(console_markdown_path, markdown_path) + + (markdown_dir / STALE_COMBINED_MARKDOWN_FILENAME).unlink(missing_ok=True) + finally: + if not keep_swagger_json: + for path in spec_paths: + path.unlink(missing_ok=True) + + return written_paths + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument( + "--swagger-dir", + type=Path, + default=Path("openapi"), + help="Directory where intermediate JSON spec files will be written.", + ) + parser.add_argument( + "--markdown-dir", + type=Path, + default=Path("openapi/markdown"), + help="Directory where split Markdown API docs will be written.", + ) + parser.add_argument( + "--keep-swagger-json", + action="store_true", + help="Keep intermediate JSON spec files after Markdown generation.", + ) + return parser.parse_args() + + +def main() -> int: + args = parse_args() + written_paths = generate_markdown_docs( + args.swagger_dir, + args.markdown_dir, + keep_swagger_json=args.keep_swagger_json, + ) + + for path in written_paths: + logger.debug(path) + + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/api/dev/generate_swagger_specs.py b/api/dev/generate_swagger_specs.py index 7e9688bfb4..9122f3ab24 100644 --- a/api/dev/generate_swagger_specs.py +++ b/api/dev/generate_swagger_specs.py @@ -9,12 +9,15 @@ which is unnecessary when the goal is only to serialize the Flask-RESTX from __future__ import annotations import argparse +import hashlib import json import logging import os import sys +from collections.abc import MutableMapping from dataclasses import dataclass from pathlib import Path +from typing import Protocol, TypeGuard from flask import Flask from flask_restx.swagger import Swagger @@ -30,19 +33,110 @@ if str(API_ROOT) not in sys.path: class SpecTarget: route: str filename: str + namespace: str + + +class RestxApi(Protocol): + models: MutableMapping[str, object] + + def model(self, name: str, model: dict[object, object]) -> object: ... SPEC_TARGETS: tuple[SpecTarget, ...] = ( - SpecTarget(route="/console/api/swagger.json", filename="console-swagger.json"), - SpecTarget(route="/api/swagger.json", filename="web-swagger.json"), - SpecTarget(route="/v1/swagger.json", filename="service-swagger.json"), + SpecTarget(route="/console/api/swagger.json", filename="console-swagger.json", namespace="console"), + SpecTarget(route="/api/swagger.json", filename="web-swagger.json", namespace="web"), + SpecTarget(route="/v1/swagger.json", filename="service-swagger.json", namespace="service"), ) _ORIGINAL_REGISTER_MODEL = Swagger.register_model _ORIGINAL_REGISTER_FIELD = Swagger.register_field -def _apply_runtime_defaults() -> None: +def _is_inline_field_map(value: object) -> TypeGuard[dict[object, object]]: + """Return whether a nested field map is an anonymous inline mapping.""" + + from flask_restx.model import Model, OrderedModel + + return isinstance(value, dict) and not isinstance(value, (Model, OrderedModel)) + + +def _jsonable_schema_value(value: object) -> object: + """Return a deterministic JSON-serializable representation for schema fingerprints.""" + + if value is None or isinstance(value, str | int | float | bool): + return value + if isinstance(value, list | tuple): + return [_jsonable_schema_value(item) for item in value] + if isinstance(value, dict): + return {str(key): _jsonable_schema_value(item) for key, item in value.items()} + value_type = type(value) + return f"<{value_type.__module__}.{value_type.__qualname__}>" + + +def _field_signature(field: object) -> object: + """Build a stable signature for a Flask-RESTX field object.""" + + from flask_restx import fields + from flask_restx.model import instance + + field_instance = instance(field) + signature: dict[str, object] = { + "class": f"{field_instance.__class__.__module__}.{field_instance.__class__.__qualname__}" + } + + if isinstance(field_instance, fields.Nested): + nested = getattr(field_instance, "nested", None) + if _is_inline_field_map(nested): + signature["nested"] = _inline_model_signature(nested) + else: + signature["nested"] = getattr( + nested, + "name", + f"<{type(nested).__module__}.{type(nested).__qualname__}>", + ) + elif hasattr(field_instance, "container"): + signature["container"] = _field_signature(field_instance.container) + else: + schema = getattr(field_instance, "__schema__", None) + if isinstance(schema, dict): + signature["schema"] = _jsonable_schema_value(schema) + + for attr_name in ( + "attribute", + "default", + "description", + "example", + "max", + "min", + "nullable", + "readonly", + "required", + "title", + ): + if hasattr(field_instance, attr_name): + signature[attr_name] = _jsonable_schema_value(getattr(field_instance, attr_name)) + + return signature + + +def _inline_model_signature(nested_fields: dict[object, object]) -> object: + """Build a stable signature for an anonymous inline model.""" + + return [ + (str(field_name), _field_signature(field)) + for field_name, field in sorted(nested_fields.items(), key=lambda item: str(item[0])) + ] + + +def _inline_model_name(nested_fields: dict[object, object]) -> str: + """Return a stable Swagger model name for an anonymous inline field map.""" + + signature = json.dumps(_inline_model_signature(nested_fields), sort_keys=True, separators=(",", ":")) + digest = hashlib.sha1(signature.encode("utf-8")).hexdigest()[:12] + return f"_AnonymousInlineModel_{digest}" + + +def apply_runtime_defaults() -> None: """Force the small config surface required for Swagger generation.""" os.environ.setdefault("SECRET_KEY", "spec-export") @@ -74,25 +168,26 @@ def _patch_swagger_for_inline_nested_dicts() -> None: anonymous_models = getattr(self, "_anonymous_inline_models", None) if anonymous_models is None: anonymous_models = {} - self._anonymous_inline_models = anonymous_models + self.__dict__["_anonymous_inline_models"] = anonymous_models anonymous_name = anonymous_models.get(id(nested_fields)) if anonymous_name is None: - anonymous_name = f"_AnonymousInlineModel{len(anonymous_models) + 1}" + anonymous_name = _inline_model_name(nested_fields) anonymous_models[id(nested_fields)] = anonymous_name - self.api.model(anonymous_name, nested_fields) + if anonymous_name not in self.api.models: + self.api.model(anonymous_name, nested_fields) return self.api.models[anonymous_name] def register_model_with_inline_dict_support(self: Swagger, model: object) -> dict[str, str]: - if isinstance(model, dict): + if _is_inline_field_map(model): model = get_or_create_inline_model(self, model) return _ORIGINAL_REGISTER_MODEL(self, model) def register_field_with_inline_dict_support(self: Swagger, field: object) -> None: nested = getattr(field, "nested", None) - if isinstance(nested, dict): + if _is_inline_field_map(nested): field.model = get_or_create_inline_model(self, nested) # type: ignore _ORIGINAL_REGISTER_FIELD(self, field) @@ -105,22 +200,169 @@ def _patch_swagger_for_inline_nested_dicts() -> None: def create_spec_app() -> Flask: """Build a minimal Flask app that only mounts the Swagger-producing blueprints.""" - _apply_runtime_defaults() + apply_runtime_defaults() _patch_swagger_for_inline_nested_dicts() app = Flask(__name__) from controllers.console import bp as console_bp + from controllers.console import console_ns from controllers.service_api import bp as service_api_bp + from controllers.service_api import service_api_ns from controllers.web import bp as web_bp + from controllers.web import web_ns app.register_blueprint(console_bp) app.register_blueprint(web_bp) app.register_blueprint(service_api_bp) + for namespace in (console_ns, web_ns, service_api_ns): + for api in namespace.apis: + _materialize_inline_model_definitions(api) + return app +def _registered_models(namespace: str) -> dict[str, object]: + """Return the Flask-RESTX models registered for a Swagger namespace.""" + + if namespace == "console": + from controllers.console import console_ns + + models = dict(console_ns.models) + for api in console_ns.apis: + models.update(api.models) + return models + if namespace == "web": + from controllers.web import web_ns + + models = dict(web_ns.models) + for api in web_ns.apis: + models.update(api.models) + return models + if namespace == "service": + from controllers.service_api import service_api_ns + + models = dict(service_api_ns.models) + for api in service_api_ns.apis: + models.update(api.models) + return models + + raise ValueError(f"unknown Swagger namespace: {namespace}") + + +def _materialize_inline_model_definitions(api: RestxApi) -> None: + """Convert inline `fields.Nested({...})` maps into named API models.""" + + from flask_restx import fields + from flask_restx.model import Model, OrderedModel, instance + + inline_models: dict[int, dict[object, object]] = {} + inline_model_names: dict[int, str] = {} + + def collect_field(field: object) -> None: + field_instance = instance(field) + if isinstance(field_instance, fields.Nested): + nested = getattr(field_instance, "nested", None) + if _is_inline_field_map(nested) and id(nested) not in inline_models: + inline_models[id(nested)] = nested + for nested_field in nested.values(): + collect_field(nested_field) + + container = getattr(field_instance, "container", None) + if container is not None: + collect_field(container) + + for model in list(api.models.values()): + if isinstance(model, (Model, OrderedModel)): + for field in model.values(): + collect_field(field) + + for nested_fields in sorted(inline_models.values(), key=_inline_model_name): + anonymous_name = _inline_model_name(nested_fields) + inline_model_names[id(nested_fields)] = anonymous_name + if anonymous_name not in api.models: + api.model(anonymous_name, nested_fields) + + def model_name_for(nested_fields: dict[object, object]) -> str: + anonymous_name = inline_model_names.get(id(nested_fields)) + if anonymous_name is None: + anonymous_name = _inline_model_name(nested_fields) + inline_model_names[id(nested_fields)] = anonymous_name + if anonymous_name not in api.models: + api.model(anonymous_name, nested_fields) + return anonymous_name + + def materialize_field(field: object) -> None: + field_instance = instance(field) + if isinstance(field_instance, fields.Nested): + nested = getattr(field_instance, "nested", None) + if _is_inline_field_map(nested): + field_instance.model = api.models[model_name_for(nested)] # type: ignore[attr-defined] + + container = getattr(field_instance, "container", None) + if container is not None: + materialize_field(container) + + index = 0 + while index < len(api.models): + model = list(api.models.values())[index] + index += 1 + if isinstance(model, (Model, OrderedModel)): + for field in model.values(): + materialize_field(field) + + +def drop_null_values(value: object) -> object: + """Remove JSON null values that make the Markdown converter crash.""" + + if isinstance(value, dict): + return {key: drop_null_values(item) for key, item in value.items() if item is not None} + if isinstance(value, list): + return [drop_null_values(item) for item in value] + return value + + +def sort_openapi_arrays(value: object, *, parent_key: str | None = None) -> object: + """Sort order-insensitive Swagger arrays so generated Markdown is stable.""" + + if isinstance(value, dict): + return {key: sort_openapi_arrays(item, parent_key=key) for key, item in value.items()} + if not isinstance(value, list): + return value + + sorted_items = [sort_openapi_arrays(item, parent_key=parent_key) for item in value] + if parent_key == "parameters": + return sorted( + sorted_items, + key=lambda item: ( + item.get("in", "") if isinstance(item, dict) else "", + item.get("name", "") if isinstance(item, dict) else "", + json.dumps(item, sort_keys=True, default=str), + ), + ) + if parent_key in {"enum", "required", "schemes", "tags"}: + string_items = [item for item in sorted_items if isinstance(item, str)] + if len(string_items) == len(sorted_items): + return sorted(string_items) + return sorted_items + + +def _merge_registered_definitions(payload: dict[str, object], namespace: str) -> dict[str, object]: + """Include registered but route-indirect models in the exported Swagger definitions.""" + + definitions = payload.setdefault("definitions", {}) + if not isinstance(definitions, dict): + raise RuntimeError("unexpected Swagger definitions payload") + + for name, model in _registered_models(namespace).items(): + schema = getattr(model, "__schema__", None) + if isinstance(schema, dict): + definitions.setdefault(name, schema) + + return payload + + def generate_specs(output_dir: Path) -> list[Path]: """Write all Swagger specs to `output_dir` and return the written paths.""" @@ -138,6 +380,9 @@ def generate_specs(output_dir: Path) -> list[Path]: payload = response.get_json() if not isinstance(payload, dict): raise RuntimeError(f"unexpected response payload for {target.route}") + payload = _merge_registered_definitions(payload, target.namespace) + payload = drop_null_values(payload) + payload = sort_openapi_arrays(payload) output_path = output_dir / target.filename output_path.write_text(json.dumps(payload, indent=2, sort_keys=True) + "\n", encoding="utf-8") diff --git a/api/openapi/markdown/console-swagger.md b/api/openapi/markdown/console-swagger.md new file mode 100644 index 0000000000..a69cecd83c --- /dev/null +++ b/api/openapi/markdown/console-swagger.md @@ -0,0 +1,14766 @@ +# Console API +Console management APIs for app configuration, monitoring, and administration + +## Version: 1.0 + +### Security +**Bearer** + +| apiKey | *API Key* | +| ------ | --------- | +| Description | Type: Bearer {your-api-key} | +| In | header | +| Name | Authorization | + +--- +## console +Console management API operations + +### /account/avatar + +#### GET +##### Description + +Get account avatar url + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountAvatarQuery](#accountavatarquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountAvatarPayload](#accountavatarpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/change-email + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChangeEmailSendPayload](#changeemailsendpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/change-email/check-email-unique + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CheckEmailUniquePayload](#checkemailuniquepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/change-email/reset + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChangeEmailResetPayload](#changeemailresetpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/change-email/validity + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChangeEmailValidityPayload](#changeemailvaliditypayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/delete + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountDeletePayload](#accountdeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/delete/feedback + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountDeletionFeedbackPayload](#accountdeletionfeedbackpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/delete/verify + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/education + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [EducationStatusResponse](#educationstatusresponse) | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EducationActivatePayload](#educationactivatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/education/autocomplete + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EducationAutocompleteQuery](#educationautocompletequery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [EducationAutocompleteResponse](#educationautocompleteresponse) | + +### /account/education/verify + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [EducationVerifyResponse](#educationverifyresponse) | + +### /account/init + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountInitPayload](#accountinitpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /account/integrates + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [AccountIntegrateListResponse](#accountintegratelistresponse) | + +### /account/interface-language + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountInterfaceLanguagePayload](#accountinterfacelanguagepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/interface-theme + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountInterfaceThemePayload](#accountinterfacethemepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/name + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountNamePayload](#accountnamepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/password + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountPasswordPayload](#accountpasswordpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/profile + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /account/timezone + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AccountTimezonePayload](#accounttimezonepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [Account](#account) | + +### /activate + +#### POST +##### Description + +Activate account with invitation token + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ActivatePayload](#activatepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Account activated successfully | [ActivationResponse](#activationresponse) | +| 400 | Already activated or invalid token | | + +### /activate/check + +#### GET +##### Description + +Check if activation token is valid + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ActivateCheckQuery](#activatecheckquery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ActivationCheckResponse](#activationcheckresponse) | + +### /admin/batch_add_notification_accounts + +#### POST +##### Description + +Register target accounts for a notification by email address. JSON body: {"notification_id": "...", "user_email": ["a@example.com", ...]}. File upload: multipart/form-data with a 'file' field (CSV or TXT, one email per line) plus a 'notification_id' field. Emails that do not match any account are silently skipped. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Accounts added successfully | + +### /admin/delete-explore-banner/{banner_id} + +#### DELETE +##### Description + +Delete an explore banner + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| banner_id | path | Banner ID to delete | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Banner deleted successfully | + +### /admin/insert-explore-apps + +#### POST +##### Description + +Insert or update an app in the explore list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [InsertExploreAppPayload](#insertexploreapppayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | App updated successfully | +| 201 | App inserted successfully | +| 404 | App not found | + +### /admin/insert-explore-apps/{app_id} + +#### DELETE +##### Description + +Remove an app from the explore list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID to remove | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | App removed successfully | + +### /admin/insert-explore-banner + +#### POST +##### Description + +Insert an explore banner + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [InsertExploreBannerPayload](#insertexplorebannerpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 201 | Banner inserted successfully | + +### /admin/upsert_notification + +#### POST +##### Description + +Create or update an in-product notification. Supply notification_id to update an existing one; omit it to create a new one. Pass at least one language variant in contents (zh / en / jp). + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [UpsertNotificationPayload](#upsertnotificationpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Notification upserted successfully | + +### /all-workspaces + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkspaceListQuery](#workspacelistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /api-based-extension + +#### GET +##### Description + +Get all API-based extensions for current tenant + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [APIBasedExtensionListResponse](#apibasedextensionlistresponse) | + +#### POST +##### Description + +Create a new API-based extension + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [APIBasedExtensionPayload](#apibasedextensionpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Extension created successfully | [APIBasedExtensionResponse](#apibasedextensionresponse) | + +### /api-based-extension/{id} + +#### DELETE +##### Description + +Delete API-based extension + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| id | path | Extension ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Extension deleted successfully | + +#### GET +##### Description + +Get API-based extension by ID + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| id | path | Extension ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [APIBasedExtensionResponse](#apibasedextensionresponse) | + +#### POST +##### Description + +Update API-based extension + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [APIBasedExtensionPayload](#apibasedextensionpayload) | +| id | path | Extension ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Extension updated successfully | [APIBasedExtensionResponse](#apibasedextensionresponse) | + +### /api-key-auth/data-source + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /api-key-auth/data-source/binding + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ApiKeyAuthBindingPayload](#apikeyauthbindingpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /api-key-auth/data-source/{binding_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| binding_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /app/prompt-templates + +#### GET +##### Description + +Get advanced prompt templates based on app mode and model configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AdvancedPromptTemplateQuery](#advancedprompttemplatequery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Prompt templates retrieved successfully | [ object ] | +| 400 | Invalid request parameters | | + +### /apps + +#### GET +##### Summary + +Get app list + +##### Description + +Get list of applications with pagination and filtering + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppListQuery](#applistquery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [AppPagination](#apppagination) | + +#### POST +##### Summary + +Create app + +##### Description + +Create a new application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CreateAppPayload](#createapppayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | App created successfully | [AppDetail](#appdetail) | +| 400 | Invalid request parameters | | +| 403 | Insufficient permissions | | + +### /apps/imports + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppImportPayload](#appimportpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Import completed | [Import](#import) | +| 202 | Import pending confirmation | [Import](#import) | +| 400 | Import failed | [Import](#import) | + +### /apps/imports/{app_id}/check-dependencies + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Dependencies checked | [CheckDependenciesResult](#checkdependenciesresult) | + +### /apps/imports/{import_id}/confirm + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| import_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Import confirmed | [Import](#import) | +| 400 | Import failed | [Import](#import) | + +### /apps/workflows/online-users + +#### POST +##### Description + +Get workflow online users + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowOnlineUsersPayload](#workflowonlineuserspayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id} + +#### DELETE +##### Summary + +Delete app + +##### Description + +Delete application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | App deleted successfully | +| 403 | Insufficient permissions | + +#### GET +##### Summary + +Get app detail + +##### Description + +Get application details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [AppDetailWithSite](#appdetailwithsite) | + +#### PUT +##### Summary + +Update app + +##### Description + +Update application details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [UpdateAppPayload](#updateapppayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | App updated successfully | [AppDetailWithSite](#appdetailwithsite) | +| 400 | Invalid request parameters | | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/advanced-chat/workflow-runs + +#### GET +##### Summary + +Get advanced chat app workflow run list + +##### Description + +Get advanced chat workflow run list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowRunListQuery](#workflowrunlistquery) | +| app_id | path | Application ID | Yes | string | +| last_id | query | Last run ID for pagination | No | string | +| limit | query | Number of items per page (1-100) | No | string | +| status | query | Filter by status (optional): running, succeeded, failed, stopped, partial-succeeded | No | string | +| triggered_from | query | Filter by trigger source (optional): debugging or app-run. Default: debugging | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow runs retrieved successfully | [AdvancedChatWorkflowRunPagination](#advancedchatworkflowrunpagination) | + +### /apps/{app_id}/advanced-chat/workflow-runs/count + +#### GET +##### Summary + +Get advanced chat workflow runs count statistics + +##### Description + +Get advanced chat workflow runs count statistics + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowRunCountQuery](#workflowruncountquery) | +| app_id | path | Application ID | Yes | string | +| status | query | Filter by status (optional): running, succeeded, failed, stopped, partial-succeeded | No | string | +| time_range | query | Filter by time range (optional): e.g., 7d (7 days), 4h (4 hours), 30m (30 minutes), 30s (30 seconds). Filters by created_at field. | No | string | +| triggered_from | query | Filter by trigger source (optional): debugging or app-run. Default: debugging | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow runs count retrieved successfully | [WorkflowRunCount](#workflowruncount) | + +### /apps/{app_id}/advanced-chat/workflows/draft/human-input/nodes/{node_id}/form/preview + +#### POST +##### Summary + +Preview human input form content and placeholders + +##### Description + +Get human input form preview for advanced chat workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HumanInputFormPreviewPayload](#humaninputformpreviewpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/advanced-chat/workflows/draft/human-input/nodes/{node_id}/form/run + +#### POST +##### Summary + +Submit human input form preview + +##### Description + +Submit human input form preview for advanced chat workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HumanInputFormSubmitPayload](#humaninputformsubmitpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/advanced-chat/workflows/draft/iteration/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow iteration node + +##### Description + +Run draft workflow iteration node for advanced chat + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [IterationNodeRunPayload](#iterationnoderunpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Iteration node run started successfully | +| 403 | Permission denied | +| 404 | Node not found | + +### /apps/{app_id}/advanced-chat/workflows/draft/loop/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow loop node + +##### Description + +Run draft workflow loop node for advanced chat + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [LoopNodeRunPayload](#loopnoderunpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Loop node run started successfully | +| 403 | Permission denied | +| 404 | Node not found | + +### /apps/{app_id}/advanced-chat/workflows/draft/run + +#### POST +##### Summary + +Run draft workflow + +##### Description + +Run draft workflow for advanced chat application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AdvancedChatWorkflowRunPayload](#advancedchatworkflowrunpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow run started successfully | +| 400 | Invalid request parameters | +| 403 | Permission denied | + +### /apps/{app_id}/agent/logs + +#### GET +##### Summary + +Get agent logs + +##### Description + +Get agent execution logs for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AgentLogQuery](#agentlogquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Agent logs retrieved successfully | [ object ] | +| 400 | Invalid request parameters | | + +### /apps/{app_id}/annotation-reply/{action} + +#### POST +##### Description + +Enable or disable annotation reply for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AnnotationReplyPayload](#annotationreplypayload) | +| action | path | Action to perform (enable/disable) | Yes | string | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Action completed successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/annotation-reply/{action}/status/{job_id} + +#### GET +##### Description + +Get status of annotation reply action job + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | Action type | Yes | string | +| app_id | path | Application ID | Yes | string | +| job_id | path | Job ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Job status retrieved successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/annotation-setting + +#### GET +##### Description + +Get annotation settings for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Annotation settings retrieved successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/annotation-settings/{annotation_setting_id} + +#### POST +##### Description + +Update annotation settings for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AnnotationSettingUpdatePayload](#annotationsettingupdatepayload) | +| annotation_setting_id | path | Annotation setting ID | Yes | string | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Settings updated successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/annotations + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Description + +Get annotations for an app with pagination + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AnnotationListQuery](#annotationlistquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Annotations retrieved successfully | +| 403 | Insufficient permissions | + +#### POST +##### Description + +Create a new annotation for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CreateAnnotationPayload](#createannotationpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Annotation created successfully | [Annotation](#annotation) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/annotations/batch-import + +#### POST +##### Description + +Batch import annotations from CSV file with rate limiting and security checks + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Batch import started successfully | +| 400 | No file uploaded or too many files | +| 403 | Insufficient permissions | +| 413 | File too large | +| 429 | Too many requests or concurrent imports | + +### /apps/{app_id}/annotations/batch-import-status/{job_id} + +#### GET +##### Description + +Get status of batch import job + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| job_id | path | Job ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Job status retrieved successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/annotations/count + +#### GET +##### Description + +Get count of message annotations for the app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Annotation count retrieved successfully | [AnnotationCountResponse](#annotationcountresponse) | + +### /apps/{app_id}/annotations/export + +#### GET +##### Description + +Export all annotations for an app with CSV injection protection + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Annotations exported successfully | [AnnotationExportList](#annotationexportlist) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/annotations/{annotation_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| annotation_id | path | | Yes | string | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Description + +Update or delete an annotation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [UpdateAnnotationPayload](#updateannotationpayload) | +| annotation_id | path | Annotation ID | Yes | string | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Annotation updated successfully | [Annotation](#annotation) | +| 204 | Annotation deleted successfully | | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/annotations/{annotation_id}/hit-histories + +#### GET +##### Description + +Get hit histories for an annotation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| annotation_id | path | Annotation ID | Yes | string | +| app_id | path | Application ID | Yes | string | +| limit | query | Page size | No | integer | +| page | query | Page number | No | integer | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Hit histories retrieved successfully | [AnnotationHitHistoryList](#annotationhithistorylist) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/api-enable + +#### POST +##### Description + +Enable or disable app API + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppApiStatusPayload](#appapistatuspayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | API status updated successfully | [AppDetail](#appdetail) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/audio-to-text + +#### POST +##### Description + +Transcript audio to text for chat messages + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | App ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Audio transcription successful | [AudioTranscriptResponse](#audiotranscriptresponse) | +| 400 | Bad request - No audio uploaded or unsupported type | | +| 413 | Audio file too large | | + +### /apps/{app_id}/chat-conversations + +#### GET +##### Description + +Get chat conversations with pagination, filtering and summary + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChatConversationQuery](#chatconversationquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ConversationWithSummaryPagination](#conversationwithsummarypagination) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/chat-conversations/{conversation_id} + +#### DELETE +##### Description + +Delete a chat conversation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| conversation_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Conversation deleted successfully | +| 403 | Insufficient permissions | +| 404 | Conversation not found | + +#### GET +##### Description + +Get chat conversation details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| conversation_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ConversationDetail](#conversationdetail) | +| 403 | Insufficient permissions | | +| 404 | Conversation not found | | + +### /apps/{app_id}/chat-messages + +#### GET +##### Description + +Get chat messages for a conversation with pagination + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChatMessagesQuery](#chatmessagesquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [MessageInfiniteScrollPaginationResponse](#messageinfinitescrollpaginationresponse) | +| 404 | Conversation not found | | + +### /apps/{app_id}/chat-messages/{message_id}/suggested-questions + +#### GET +##### Description + +Get suggested questions for a message + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| message_id | path | Message ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Suggested questions retrieved successfully | [SuggestedQuestionsResponse](#suggestedquestionsresponse) | +| 404 | Message or conversation not found | | + +### /apps/{app_id}/chat-messages/{task_id}/stop + +#### POST +##### Description + +Stop a running chat message generation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| task_id | path | Task ID to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Task stopped successfully | + +### /apps/{app_id}/completion-conversations + +#### GET +##### Description + +Get completion conversations with pagination and filtering + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CompletionConversationQuery](#completionconversationquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ConversationPagination](#conversationpagination) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/completion-conversations/{conversation_id} + +#### DELETE +##### Description + +Delete a completion conversation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| conversation_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Conversation deleted successfully | +| 403 | Insufficient permissions | +| 404 | Conversation not found | + +#### GET +##### Description + +Get completion conversation details with messages + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| conversation_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ConversationMessageDetail](#conversationmessagedetail) | +| 403 | Insufficient permissions | | +| 404 | Conversation not found | | + +### /apps/{app_id}/completion-messages + +#### POST +##### Description + +Generate completion message for debugging + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CompletionMessagePayload](#completionmessagepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Completion generated successfully | +| 400 | Invalid request parameters | +| 404 | App not found | + +### /apps/{app_id}/completion-messages/{task_id}/stop + +#### POST +##### Description + +Stop a running completion message generation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| task_id | path | Task ID to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Task stopped successfully | + +### /apps/{app_id}/conversation-variables + +#### GET +##### Description + +Get conversation variables for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConversationVariablesQuery](#conversationvariablesquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Conversation variables retrieved successfully | [PaginatedConversationVariableResponse](#paginatedconversationvariableresponse) | + +### /apps/{app_id}/convert-to-workflow + +#### POST +##### Summary + +Convert basic mode of chatbot app to workflow mode + +##### Description + +Convert application to workflow mode +Convert expert mode of chatbot app to workflow mode +Convert Completion App to Workflow App + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConvertToWorkflowPayload](#converttoworkflowpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Application converted to workflow successfully | +| 400 | Application cannot be converted | +| 403 | Permission denied | + +### /apps/{app_id}/copy + +#### POST +##### Summary + +Copy app + +##### Description + +Create a copy of an existing application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CopyAppPayload](#copyapppayload) | +| app_id | path | Application ID to copy | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | App copied successfully | [AppDetailWithSite](#appdetailwithsite) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/export + +#### GET +##### Summary + +Export app + +##### Description + +Export application configuration as DSL + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppExportQuery](#appexportquery) | +| app_id | path | Application ID to export | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | App exported successfully | [AppExportResponse](#appexportresponse) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/feedbacks + +#### POST +##### Description + +Create or update message feedback (like/dislike) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MessageFeedbackPayload](#messagefeedbackpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Feedback updated successfully | +| 403 | Insufficient permissions | +| 404 | Message not found | + +### /apps/{app_id}/feedbacks/export + +#### GET +##### Description + +Export user feedback data for Google Sheets + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [FeedbackExportQuery](#feedbackexportquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Feedback data exported successfully | +| 400 | Invalid parameters | +| 500 | Internal server error | + +### /apps/{app_id}/icon + +#### POST +##### Description + +Update application icon + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppIconPayload](#appiconpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Icon updated successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/messages/{message_id} + +#### GET +##### Description + +Get message details by ID + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| message_id | path | Message ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Message retrieved successfully | [MessageDetailResponse](#messagedetailresponse) | +| 404 | Message not found | | + +### /apps/{app_id}/model-config + +#### POST +##### Summary + +Modify app model config + +##### Description + +Update application model configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ModelConfigRequest](#modelconfigrequest) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Model configuration updated successfully | +| 400 | Invalid configuration | +| 404 | App not found | + +### /apps/{app_id}/name + +#### POST +##### Description + +Check if app name is available + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppNamePayload](#appnamepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Name availability checked | [AppDetail](#appdetail) | + +### /apps/{app_id}/publish-to-creators-platform + +#### POST +##### Summary + +Publish app to Creators Platform + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/server + +#### GET +##### Description + +Get MCP server configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | MCP server configuration retrieved successfully | [AppMCPServerResponse](#appmcpserverresponse) | + +#### POST +##### Description + +Create MCP server configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MCPServerCreatePayload](#mcpservercreatepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | MCP server configuration created successfully | [AppMCPServerResponse](#appmcpserverresponse) | +| 403 | Insufficient permissions | | + +#### PUT +##### Description + +Update MCP server configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MCPServerUpdatePayload](#mcpserverupdatepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | MCP server configuration updated successfully | [AppMCPServerResponse](#appmcpserverresponse) | +| 403 | Insufficient permissions | | +| 404 | Server not found | | + +### /apps/{app_id}/site + +#### POST +##### Description + +Update application site configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppSiteUpdatePayload](#appsiteupdatepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Site configuration updated successfully | [AppSiteResponse](#appsiteresponse) | +| 403 | Insufficient permissions | | +| 404 | App not found | | + +### /apps/{app_id}/site-enable + +#### POST +##### Description + +Enable or disable app site + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppSiteStatusPayload](#appsitestatuspayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Site status updated successfully | [AppDetail](#appdetail) | +| 403 | Insufficient permissions | | + +### /apps/{app_id}/site/access-token-reset + +#### POST +##### Description + +Reset access token for application site + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Access token reset successfully | [AppSiteResponse](#appsiteresponse) | +| 403 | Insufficient permissions (admin/owner required) | | +| 404 | App or site not found | | + +### /apps/{app_id}/statistics/average-response-time + +#### GET +##### Description + +Get average response time statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Average response time statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/average-session-interactions + +#### GET +##### Description + +Get average session interaction statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Average session interaction statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/daily-conversations + +#### GET +##### Description + +Get daily conversation statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Daily conversation statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/daily-end-users + +#### GET +##### Description + +Get daily terminal/end-user statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Daily terminal statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/daily-messages + +#### GET +##### Description + +Get daily message statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Daily message statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/token-costs + +#### GET +##### Description + +Get daily token cost statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Daily token cost statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/tokens-per-second + +#### GET +##### Description + +Get tokens per second statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Tokens per second statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/statistics/user-satisfaction-rate + +#### GET +##### Description + +Get user satisfaction rate statistics for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [StatisticTimeRangeQuery](#statistictimerangequery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | User satisfaction rate statistics retrieved successfully | [ object ] | + +### /apps/{app_id}/text-to-audio + +#### POST +##### Description + +Convert text to speech for chat messages + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TextToSpeechPayload](#texttospeechpayload) | +| app_id | path | App ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Text to speech conversion successful | +| 400 | Bad request - Invalid parameters | + +### /apps/{app_id}/text-to-audio/voices + +#### GET +##### Description + +Get available TTS voices for a specific language + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TextToSpeechVoiceQuery](#texttospeechvoicequery) | +| app_id | path | App ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | TTS voices retrieved successfully | [ object ] | +| 400 | Invalid language parameter | | + +### /apps/{app_id}/trace + +#### GET +##### Summary + +Get app trace + +##### Description + +Get app tracing configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Trace configuration retrieved successfully | + +#### POST +##### Description + +Update app tracing configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AppTracePayload](#apptracepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Trace configuration updated successfully | +| 403 | Insufficient permissions | + +### /apps/{app_id}/trace-config + +#### DELETE +##### Summary + +Delete an existing trace app configuration + +##### Description + +Delete an existing tracing configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TraceProviderQuery](#traceproviderquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Tracing configuration deleted successfully | +| 400 | Invalid request parameters or configuration not found | + +#### GET +##### Description + +Get tracing configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TraceProviderQuery](#traceproviderquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Tracing configuration retrieved successfully | object | +| 400 | Invalid request parameters | | + +#### PATCH +##### Summary + +Update an existing trace app configuration + +##### Description + +Update an existing tracing configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TraceConfigPayload](#traceconfigpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Tracing configuration updated successfully | object | +| 400 | Invalid request parameters or configuration not found | | + +#### POST +##### Summary + +Create a new trace app configuration + +##### Description + +Create a new tracing configuration for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TraceConfigPayload](#traceconfigpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Tracing configuration created successfully | object | +| 400 | Invalid request parameters or configuration already exists | | + +### /apps/{app_id}/trigger-enable + +#### POST +##### Summary + +Update app trigger (enable/disable) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| payload | body | | Yes | [ParserEnable](#parserenable) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [WorkflowTriggerResponse](#workflowtriggerresponse) | + +### /apps/{app_id}/triggers + +#### GET +##### Summary + +Get app triggers list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [WorkflowTriggerListResponse](#workflowtriggerlistresponse) | + +### /apps/{app_id}/workflow-app-logs + +#### GET +##### Summary + +Get workflow app logs + +##### Description + +Get workflow application execution logs + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowAppLogQuery](#workflowapplogquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow app logs retrieved successfully | [WorkflowAppLogPaginationResponse](#workflowapplogpaginationresponse) | + +### /apps/{app_id}/workflow-archived-logs + +#### GET +##### Summary + +Get workflow archived logs + +##### Description + +Get workflow archived execution logs + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowAppLogQuery](#workflowapplogquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow archived logs retrieved successfully | [WorkflowArchivedLogPaginationResponse](#workflowarchivedlogpaginationresponse) | + +### /apps/{app_id}/workflow-runs + +#### GET +##### Summary + +Get workflow run list + +##### Description + +Get workflow run list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowRunListQuery](#workflowrunlistquery) | +| app_id | path | Application ID | Yes | string | +| last_id | query | Last run ID for pagination | No | string | +| limit | query | Number of items per page (1-100) | No | string | +| status | query | Filter by status (optional): running, succeeded, failed, stopped, partial-succeeded | No | string | +| triggered_from | query | Filter by trigger source (optional): debugging or app-run. Default: debugging | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow runs retrieved successfully | [WorkflowRunPagination](#workflowrunpagination) | + +### /apps/{app_id}/workflow-runs/count + +#### GET +##### Summary + +Get workflow runs count statistics + +##### Description + +Get workflow runs count statistics + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowRunCountQuery](#workflowruncountquery) | +| app_id | path | Application ID | Yes | string | +| status | query | Filter by status (optional): running, succeeded, failed, stopped, partial-succeeded | No | string | +| time_range | query | Filter by time range (optional): e.g., 7d (7 days), 4h (4 hours), 30m (30 minutes), 30s (30 seconds). Filters by created_at field. | No | string | +| triggered_from | query | Filter by trigger source (optional): debugging or app-run. Default: debugging | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow runs count retrieved successfully | [WorkflowRunCount](#workflowruncount) | + +### /apps/{app_id}/workflow-runs/tasks/{task_id}/stop + +#### POST +##### Summary + +Stop workflow task + +##### Description + +Stop running workflow task + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| task_id | path | Task ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Task stopped successfully | +| 403 | Permission denied | +| 404 | Task not found | + +### /apps/{app_id}/workflow-runs/{run_id} + +#### GET +##### Summary + +Get workflow run detail + +##### Description + +Get workflow run detail + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| run_id | path | Workflow run ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow run detail retrieved successfully | [WorkflowRunDetail](#workflowrundetail) | +| 404 | Workflow run not found | | + +### /apps/{app_id}/workflow-runs/{run_id}/export + +#### GET +##### Description + +Generate a download URL for an archived workflow run. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| run_id | path | Workflow run ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Export URL generated | [WorkflowRunExport](#workflowrunexport) | + +### /apps/{app_id}/workflow-runs/{run_id}/node-executions + +#### GET +##### Summary + +Get workflow run node execution list + +##### Description + +Get workflow run node execution list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| run_id | path | Workflow run ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Node executions retrieved successfully | [WorkflowRunNodeExecutionList](#workflowrunnodeexecutionlist) | +| 404 | Workflow run not found | | + +### /apps/{app_id}/workflow/comments + +#### GET +##### Summary + +Get all comments for a workflow + +##### Description + +Get all comments for a workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Comments retrieved successfully | [WorkflowCommentBasic](#workflowcommentbasic) | + +#### POST +##### Summary + +Create a new workflow comment + +##### Description + +Create a new workflow comment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowCommentCreatePayload](#workflowcommentcreatepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Comment created successfully | [WorkflowCommentCreate](#workflowcommentcreate) | + +### /apps/{app_id}/workflow/comments/mention-users + +#### GET +##### Summary + +Get all users in current tenant for mentions + +##### Description + +Get all users in current tenant for mentions + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Mentionable users retrieved successfully | [WorkflowCommentMentionUsersPayload](#workflowcommentmentionuserspayload) | + +### /apps/{app_id}/workflow/comments/{comment_id} + +#### DELETE +##### Summary + +Delete a workflow comment + +##### Description + +Delete a workflow comment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Comment deleted successfully | + +#### GET +##### Summary + +Get a specific workflow comment + +##### Description + +Get a specific workflow comment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Comment retrieved successfully | [WorkflowCommentDetail](#workflowcommentdetail) | + +#### PUT +##### Summary + +Update a workflow comment + +##### Description + +Update a workflow comment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowCommentUpdatePayload](#workflowcommentupdatepayload) | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Comment updated successfully | [WorkflowCommentUpdate](#workflowcommentupdate) | + +### /apps/{app_id}/workflow/comments/{comment_id}/replies + +#### POST +##### Summary + +Add a reply to a workflow comment + +##### Description + +Add a reply to a workflow comment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowCommentReplyPayload](#workflowcommentreplypayload) | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Reply created successfully | [WorkflowCommentReplyCreate](#workflowcommentreplycreate) | + +### /apps/{app_id}/workflow/comments/{comment_id}/replies/{reply_id} + +#### DELETE +##### Summary + +Delete a comment reply + +##### Description + +Delete a comment reply + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | +| reply_id | path | Reply ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Reply deleted successfully | + +#### PUT +##### Summary + +Update a comment reply + +##### Description + +Update a comment reply + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowCommentReplyPayload](#workflowcommentreplypayload) | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | +| reply_id | path | Reply ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Reply updated successfully | [WorkflowCommentReplyUpdate](#workflowcommentreplyupdate) | + +### /apps/{app_id}/workflow/comments/{comment_id}/resolve + +#### POST +##### Summary + +Resolve a workflow comment + +##### Description + +Resolve a workflow comment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| comment_id | path | Comment ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Comment resolved successfully | [WorkflowCommentResolve](#workflowcommentresolve) | + +### /apps/{app_id}/workflow/statistics/average-app-interactions + +#### GET +##### Description + +Get workflow average app interaction statistics + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowStatisticQuery](#workflowstatisticquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Average app interaction statistics retrieved successfully | + +### /apps/{app_id}/workflow/statistics/daily-conversations + +#### GET +##### Description + +Get workflow daily runs statistics + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowStatisticQuery](#workflowstatisticquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Daily runs statistics retrieved successfully | + +### /apps/{app_id}/workflow/statistics/daily-terminals + +#### GET +##### Description + +Get workflow daily terminals statistics + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowStatisticQuery](#workflowstatisticquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Daily terminals statistics retrieved successfully | + +### /apps/{app_id}/workflow/statistics/token-costs + +#### GET +##### Description + +Get workflow daily token cost statistics + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowStatisticQuery](#workflowstatisticquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Daily token cost statistics retrieved successfully | + +### /apps/{app_id}/workflows + +#### GET +##### Summary + +Get published workflows + +##### Description + +Get all published workflows for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowListQuery](#workflowlistquery) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Published workflows retrieved successfully | [WorkflowPagination](#workflowpagination) | + +### /apps/{app_id}/workflows/default-workflow-block-configs + +#### GET +##### Summary + +Get default block config + +##### Description + +Get default block configurations for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Default block configurations retrieved successfully | + +### /apps/{app_id}/workflows/default-workflow-block-configs/{block_type} + +#### GET +##### Summary + +Get default block config + +##### Description + +Get default block configuration by type + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DefaultBlockConfigQuery](#defaultblockconfigquery) | +| app_id | path | Application ID | Yes | string | +| block_type | path | Block type | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Default block configuration retrieved successfully | +| 404 | Block type not found | + +### /apps/{app_id}/workflows/draft + +#### GET +##### Summary + +Get draft workflow + +##### Description + +Get draft workflow for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Draft workflow retrieved successfully | [Workflow](#workflow) | +| 404 | Draft workflow not found | | + +#### POST +##### Summary + +Sync draft workflow + +##### Description + +Sync draft workflow configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [SyncDraftWorkflowPayload](#syncdraftworkflowpayload) | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Draft workflow synced successfully | [SyncDraftWorkflowResponse](#syncdraftworkflowresponse) | +| 400 | Invalid workflow configuration | | +| 403 | Permission denied | | + +### /apps/{app_id}/workflows/draft/conversation-variables + +#### GET +##### Description + +Get conversation variables for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Conversation variables retrieved successfully | [WorkflowDraftVariableList](#workflowdraftvariablelist) | +| 404 | Draft workflow not found | | + +#### POST +##### Description + +Update conversation variables for workflow draft + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConversationVariableUpdatePayload](#conversationvariableupdatepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Conversation variables updated successfully | + +### /apps/{app_id}/workflows/draft/environment-variables + +#### GET +##### Summary + +Get draft workflow + +##### Description + +Get environment variables for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Environment variables retrieved successfully | +| 404 | Draft workflow not found | + +#### POST +##### Description + +Update environment variables for workflow draft + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EnvironmentVariableUpdatePayload](#environmentvariableupdatepayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Environment variables updated successfully | + +### /apps/{app_id}/workflows/draft/features + +#### POST +##### Description + +Update draft workflow features + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowFeaturesPayload](#workflowfeaturespayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow features updated successfully | + +### /apps/{app_id}/workflows/draft/human-input/nodes/{node_id}/delivery-test + +#### POST +##### Summary + +Test human input delivery + +##### Description + +Test human input delivery for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HumanInputDeliveryTestPayload](#humaninputdeliverytestpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/workflows/draft/human-input/nodes/{node_id}/form/preview + +#### POST +##### Summary + +Preview human input form content and placeholders + +##### Description + +Get human input form preview for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HumanInputFormPreviewPayload](#humaninputformpreviewpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/workflows/draft/human-input/nodes/{node_id}/form/run + +#### POST +##### Summary + +Submit human input form preview + +##### Description + +Submit human input form preview for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HumanInputFormSubmitPayload](#humaninputformsubmitpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/workflows/draft/iteration/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow iteration node + +##### Description + +Run draft workflow iteration node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [IterationNodeRunPayload](#iterationnoderunpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow iteration node run started successfully | +| 403 | Permission denied | +| 404 | Node not found | + +### /apps/{app_id}/workflows/draft/loop/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow loop node + +##### Description + +Run draft workflow loop node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [LoopNodeRunPayload](#loopnoderunpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow loop node run started successfully | +| 403 | Permission denied | +| 404 | Node not found | + +### /apps/{app_id}/workflows/draft/nodes/{node_id}/last-run + +#### GET +##### Description + +Get last run result for draft workflow node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Node last run retrieved successfully | [WorkflowRunNodeExecution](#workflowrunnodeexecution) | +| 403 | Permission denied | | +| 404 | Node last run not found | | + +### /apps/{app_id}/workflows/draft/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow node + +##### Description + +Run draft workflow node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DraftWorkflowNodeRunPayload](#draftworkflownoderunpayload) | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Node run started successfully | [WorkflowRunNodeExecution](#workflowrunnodeexecution) | +| 403 | Permission denied | | +| 404 | Node not found | | + +### /apps/{app_id}/workflows/draft/nodes/{node_id}/trigger/run + +#### POST +##### Summary + +Poll for trigger events and execute single node when event arrives + +##### Description + +Poll for trigger events and execute single node when event arrives + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Trigger event received and node executed successfully | +| 403 | Permission denied | +| 500 | Internal server error | + +### /apps/{app_id}/workflows/draft/nodes/{node_id}/variables + +#### DELETE +##### Description + +Delete all variables for a specific node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| node_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Node variables deleted successfully | + +#### GET +##### Description + +Get variables for a specific node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| node_id | path | Node ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Node variables retrieved successfully | [WorkflowDraftVariableList](#workflowdraftvariablelist) | + +### /apps/{app_id}/workflows/draft/run + +#### POST +##### Summary + +Run draft workflow + +##### Description + +Run draft workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DraftWorkflowRunPayload](#draftworkflowrunpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Draft workflow run started successfully | +| 403 | Permission denied | + +### /apps/{app_id}/workflows/draft/system-variables + +#### GET +##### Description + +Get system variables for workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | System variables retrieved successfully | [WorkflowDraftVariableList](#workflowdraftvariablelist) | + +### /apps/{app_id}/workflows/draft/trigger/run + +#### POST +##### Summary + +Poll for trigger events and execute full workflow when event arrives + +##### Description + +Poll for trigger events and execute full workflow when event arrives + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DraftWorkflowTriggerRunRequest](#draftworkflowtriggerrunrequest) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Trigger event received and workflow executed successfully | +| 403 | Permission denied | +| 500 | Internal server error | + +### /apps/{app_id}/workflows/draft/trigger/run-all + +#### POST +##### Summary + +Full workflow debug when the start node is a trigger + +##### Description + +Full workflow debug when the start node is a trigger + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DraftWorkflowTriggerRunAllPayload](#draftworkflowtriggerrunallpayload) | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow executed successfully | +| 403 | Permission denied | +| 500 | Internal server error | + +### /apps/{app_id}/workflows/draft/variables + +#### DELETE +##### Description + +Delete all draft workflow variables + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Workflow variables deleted successfully | + +#### GET +##### Summary + +Get draft workflow + +##### Description + +Get draft workflow variables + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowDraftVariableListQuery](#workflowdraftvariablelistquery) | +| app_id | path | Application ID | Yes | string | +| limit | query | Number of items per page (1-100) | No | string | +| page | query | Page number (1-100000) | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow variables retrieved successfully | [WorkflowDraftVariableListWithoutValue](#workflowdraftvariablelistwithoutvalue) | + +### /apps/{app_id}/workflows/draft/variables/{variable_id} + +#### DELETE +##### Description + +Delete a workflow variable + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| variable_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Variable deleted successfully | +| 404 | Variable not found | + +#### GET +##### Description + +Get a specific workflow variable + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| variable_id | path | Variable ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Variable retrieved successfully | [WorkflowDraftVariable](#workflowdraftvariable) | +| 404 | Variable not found | | + +#### PATCH +##### Description + +Update a workflow variable + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowDraftVariableUpdatePayload](#workflowdraftvariableupdatepayload) | +| app_id | path | | Yes | string | +| variable_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Variable updated successfully | [WorkflowDraftVariable](#workflowdraftvariable) | +| 404 | Variable not found | | + +### /apps/{app_id}/workflows/draft/variables/{variable_id}/reset + +#### PUT +##### Description + +Reset a workflow variable to its default value + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| variable_id | path | Variable ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Variable reset successfully | [WorkflowDraftVariable](#workflowdraftvariable) | +| 204 | Variable reset (no content) | | +| 404 | Variable not found | | + +### /apps/{app_id}/workflows/publish + +#### GET +##### Summary + +Get published workflow + +##### Description + +Get published workflow for an application + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Published workflow retrieved successfully | [Workflow](#workflow) | +| 404 | Published workflow not found | | + +#### POST +##### Summary + +Publish workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [PublishWorkflowPayload](#publishworkflowpayload) | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /apps/{app_id}/workflows/triggers/webhook + +#### GET +##### Summary + +Get webhook trigger for a node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| payload | body | | Yes | [Parser](#parser) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [WebhookTriggerResponse](#webhooktriggerresponse) | + +### /apps/{app_id}/workflows/{workflow_id} + +#### DELETE +##### Summary + +Delete workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| workflow_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Summary + +Update workflow attributes + +##### Description + +Update workflow by ID + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowUpdatePayload](#workflowupdatepayload) | +| app_id | path | Application ID | Yes | string | +| workflow_id | path | Workflow ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow updated successfully | [Workflow](#workflow) | +| 403 | Permission denied | | +| 404 | Workflow not found | | + +### /apps/{app_id}/workflows/{workflow_id}/restore + +#### POST +##### Description + +Restore a published workflow version into the draft workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | Application ID | Yes | string | +| workflow_id | path | Published workflow ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow restored successfully | +| 400 | Source workflow must be published | +| 404 | Workflow not found | + +### /apps/{resource_id}/api-keys + +#### GET +##### Summary + +Get all API keys for an app + +##### Description + +Get all API keys for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| resource_id | path | App ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | API keys retrieved successfully | [ApiKeyList](#apikeylist) | + +#### POST +##### Summary + +Create a new API key for an app + +##### Description + +Create a new API key for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| resource_id | path | App ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | API key created successfully | [ApiKeyItem](#apikeyitem) | +| 400 | Maximum keys exceeded | | + +### /apps/{resource_id}/api-keys/{api_key_id} + +#### DELETE +##### Summary + +Delete an API key for an app + +##### Description + +Delete an API key for an app + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| api_key_id | path | API key ID | Yes | string | +| resource_id | path | App ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | API key deleted successfully | + +### /apps/{server_id}/server/refresh + +#### GET +##### Description + +Refresh MCP server configuration and regenerate server code + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| server_id | path | Server ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | MCP server refreshed successfully | [AppMCPServerResponse](#appmcpserverresponse) | +| 403 | Insufficient permissions | | +| 404 | Server not found | | + +### /auth/plugin/datasource/default-list + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/list + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/{provider_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceCredentialPayload](#datasourcecredentialpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/{provider_id}/custom-client + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceCustomClientPayload](#datasourcecustomclientpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/{provider_id}/default + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceDefaultPayload](#datasourcedefaultpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/{provider_id}/delete + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceCredentialDeletePayload](#datasourcecredentialdeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/{provider_id}/update + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceCredentialUpdatePayload](#datasourcecredentialupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /auth/plugin/datasource/{provider_id}/update-name + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceUpdateNamePayload](#datasourceupdatenamepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /billing/invoices + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /billing/partners/{partner_key}/tenants + +#### PUT +##### Description + +Sync partner tenants bindings + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [PartnerTenantsPayload](#partnertenantspayload) | +| partner_key | path | Partner key | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Tenants synced to partner successfully | +| 400 | Invalid partner information | + +### /billing/subscription + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /code-based-extension + +#### GET +##### Description + +Get code-based extension data by module name + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| module | query | Extension module name | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [CodeBasedExtensionResponse](#codebasedextensionresponse) | + +### /compliance/download + +#### GET +##### Description + +Get compliance document download link + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ComplianceDownloadQuery](#compliancedownloadquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /data-source/integrates + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /data-source/integrates/{binding_id}/{action} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | | Yes | string | +| binding_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | | Yes | string | +| binding_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets + +#### GET +##### Description + +Get list of datasets + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| ids | query | Filter by dataset IDs (list) | No | string | +| include_all | query | Include all datasets (default: false) | No | string | +| keyword | query | Search keyword | No | string | +| limit | query | Number of items per page (default: 20) | No | string | +| page | query | Page number (default: 1) | No | string | +| tag_ids | query | Filter by tag IDs (list) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Datasets retrieved successfully | + +#### POST +##### Description + +Create a new dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DatasetCreatePayload](#datasetcreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 201 | Dataset created successfully | +| 400 | Invalid request parameters | + +### /datasets/api-base-info + +#### GET +##### Description + +Get dataset API base information + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | API base info retrieved successfully | + +### /datasets/api-keys + +#### GET +##### Description + +Get dataset API keys + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | API keys retrieved successfully | [ApiKeyList](#apikeylist) | + +#### POST +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | API key created successfully | [ApiKeyItem](#apikeyitem) | +| 400 | Maximum keys exceeded | | + +### /datasets/api-keys/{api_key_id} + +#### DELETE +##### Description + +Delete dataset API key + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| api_key_id | path | API key ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | API key deleted successfully | + +### /datasets/batch_import_status/{job_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| job_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| job_id | path | | Yes | string | +| payload | body | | Yes | [BatchImportPayload](#batchimportpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/external + +#### POST +##### Description + +Create external knowledge dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ExternalDatasetCreatePayload](#externaldatasetcreatepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | External dataset created successfully | [DatasetDetail](#datasetdetail) | +| 400 | Invalid parameters | | +| 403 | Permission denied | | + +### /datasets/external-knowledge-api + +#### GET +##### Description + +Get external knowledge API templates + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| keyword | query | Search keyword | No | string | +| limit | query | Number of items per page (default: 20) | No | string | +| page | query | Page number (default: 1) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | External API templates retrieved successfully | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ExternalKnowledgeApiPayload](#externalknowledgeapipayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/external-knowledge-api/{external_knowledge_api_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| external_knowledge_api_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Description + +Get external knowledge API template details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| external_knowledge_api_id | path | External knowledge API ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | External API template retrieved successfully | +| 404 | Template not found | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ExternalKnowledgeApiPayload](#externalknowledgeapipayload) | +| external_knowledge_api_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/external-knowledge-api/{external_knowledge_api_id}/use-check + +#### GET +##### Description + +Check if external knowledge API is being used + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| external_knowledge_api_id | path | External knowledge API ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Usage check completed successfully | + +### /datasets/indexing-estimate + +#### POST +##### Description + +Estimate dataset indexing cost + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [IndexingEstimatePayload](#indexingestimatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Indexing estimate calculated successfully | + +### /datasets/init + +#### POST +##### Description + +Initialize dataset with documents + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [KnowledgeConfig](#knowledgeconfig) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Dataset initialized successfully | [DatasetAndDocumentResponse](#datasetanddocumentresponse) | +| 400 | Invalid request parameters | | + +### /datasets/metadata/built-in + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/notion-indexing-estimate + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [NotionEstimatePayload](#notionestimatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/process-rule + +#### GET +##### Description + +Get dataset document processing rules + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| document_id | query | Document ID (optional) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Process rules retrieved successfully | + +### /datasets/retrieval-setting + +#### GET +##### Description + +Get dataset retrieval settings + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Retrieval settings retrieved successfully | + +### /datasets/retrieval-setting/{vector_type} + +#### GET +##### Description + +Get mock dataset retrieval settings by vector type + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| vector_type | path | Vector store type | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Mock retrieval settings retrieved successfully | + +### /datasets/{dataset_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Description + +Get dataset details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Dataset retrieved successfully | [DatasetDetail](#datasetdetail) | +| 403 | Permission denied | | +| 404 | Dataset not found | | + +#### PATCH +##### Description + +Update dataset details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DatasetUpdatePayload](#datasetupdatepayload) | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Dataset updated successfully | [DatasetDetail](#datasetdetail) | +| 403 | Permission denied | | +| 404 | Dataset not found | | + +### /datasets/{dataset_id}/api-keys/{status} + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| status | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/auto-disable-logs + +#### GET +##### Description + +Get dataset auto disable logs + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Auto disable logs retrieved successfully | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/batch/{batch}/indexing-estimate + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| batch | path | | Yes | string | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/batch/{batch}/indexing-status + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| batch | path | | Yes | string | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Description + +Get documents in a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| fetch | query | Fetch full details (default: false) | No | string | +| keyword | query | Search keyword | No | string | +| limit | query | Number of items per page (default: 20) | No | string | +| page | query | Page number (default: 1) | No | string | +| sort | query | Sort order (default: -created_at) | No | string | +| status | query | Filter documents by display status | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Documents retrieved successfully | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [KnowledgeConfig](#knowledgeconfig) | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Documents created successfully | [DatasetAndDocumentResponse](#datasetanddocumentresponse) | + +### /datasets/{dataset_id}/documents/download-zip + +#### POST +##### Summary + +Stream a ZIP archive containing the requested uploaded documents + +##### Description + +Download selected dataset documents as a single ZIP archive (upload-file only) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| payload | body | | Yes | [DocumentBatchDownloadZipPayload](#documentbatchdownloadzippayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/generate-summary + +#### POST +##### Summary + +Generate summary index for specified documents + +##### Description + +Generate summary index for documents +This endpoint checks if the dataset configuration supports summary generation +(indexing_technique must be 'high_quality' and summary_index_setting.enable must be true), +then asynchronously generates summary indexes for the provided documents. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [GenerateSummaryPayload](#generatesummarypayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Summary generation started successfully | +| 400 | Invalid request or dataset configuration | +| 403 | Permission denied | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/documents/metadata + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| payload | body | | Yes | [MetadataOperationData](#metadataoperationdata) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/status/{action}/batch + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | | Yes | string | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Description + +Get document details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| metadata | query | Metadata inclusion (all/only/without) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document retrieved successfully | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/download + +#### GET +##### Description + +Get a signed download URL for a dataset document's original uploaded file + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/indexing-estimate + +#### GET +##### Description + +Estimate document indexing cost + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Indexing estimate calculated successfully | +| 400 | Document already finished | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/indexing-status + +#### GET +##### Description + +Get document indexing status + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Indexing status retrieved successfully | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/metadata + +#### PUT +##### Description + +Update document metadata + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DocumentMetadataUpdatePayload](#documentmetadataupdatepayload) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document metadata updated successfully | +| 403 | Permission denied | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/notion/sync + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/pipeline-execution-log + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/processing/pause + +#### PATCH +##### Summary + +pause document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/processing/resume + +#### PATCH +##### Summary + +recover document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/processing/{action} + +#### PATCH +##### Description + +Update document processing status (pause/resume) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | Action to perform (pause/resume) | Yes | string | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Processing status updated successfully | +| 400 | Invalid action | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/rename + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| payload | body | | Yes | [DocumentRenamePayload](#documentrenamepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Document renamed successfully | [DocumentResponse](#documentresponse) | + +### /datasets/{dataset_id}/documents/{document_id}/segment + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| payload | body | | Yes | [SegmentCreatePayload](#segmentcreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/segment/{action} + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | | Yes | string | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/segments + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/segments/batch_import + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| payload | body | | Yes | [BatchImportPayload](#batchimportpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/segments/{segment_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | +| payload | body | | Yes | [SegmentUpdatePayload](#segmentupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | +| payload | body | | Yes | [ChildChunkCreatePayload](#childchunkcreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks/{child_chunk_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| child_chunk_id | path | | Yes | string | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| child_chunk_id | path | | Yes | string | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | +| payload | body | | Yes | [ChildChunkUpdatePayload](#childchunkupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/documents/{document_id}/summary-status + +#### GET +##### Summary + +Get summary index generation status for a document + +##### Description + +Get summary index generation status for a document +Returns: +- total_segments: Total number of segments in the document +- summary_status: Dictionary with status counts + - completed: Number of summaries completed + - generating: Number of summaries being generated + - error: Number of summaries with errors + - not_started: Number of segments without summary records +- summaries: List of summary records with status and content preview + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Summary status retrieved successfully | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/website-sync + +#### GET +##### Summary + +sync website document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/error-docs + +#### GET +##### Description + +Get dataset error documents + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Error documents retrieved successfully | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/external-hit-testing + +#### POST +##### Description + +Test external knowledge retrieval for dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ExternalHitTestingPayload](#externalhittestingpayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | External hit testing completed successfully | +| 400 | Invalid parameters | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/hit-testing + +#### POST +##### Description + +Test dataset knowledge retrieval + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HitTestingPayload](#hittestingpayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Hit testing completed successfully | [HitTestingResponse](#hittestingresponse) | +| 400 | Invalid parameters | | +| 404 | Dataset not found | | + +### /datasets/{dataset_id}/indexing-status + +#### GET +##### Description + +Get dataset indexing status + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Indexing status retrieved successfully | + +### /datasets/{dataset_id}/metadata + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| payload | body | | Yes | [MetadataArgs](#metadataargs) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/metadata/built-in/{action} + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | | Yes | string | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/metadata/{metadata_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| metadata_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| metadata_id | path | | Yes | string | +| payload | body | | Yes | [MetadataUpdatePayload](#metadataupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/notion/sync + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/permission-part-users + +#### GET +##### Description + +Get dataset permission user list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Permission users retrieved successfully | +| 403 | Permission denied | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/queries + +#### GET +##### Description + +Get dataset query history + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Query history retrieved successfully | [DatasetQueryDetail](#datasetquerydetail) | + +### /datasets/{dataset_id}/related-apps + +#### GET +##### Description + +Get applications related to dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Related apps retrieved successfully | [RelatedAppList](#relatedapplist) | + +### /datasets/{dataset_id}/retry + +#### POST +##### Summary + +retry document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| payload | body | | Yes | [DocumentRetryPayload](#documentretrypayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /datasets/{dataset_id}/use-check + +#### GET +##### Description + +Check if dataset is in use + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Dataset use status retrieved successfully | + +### /datasets/{resource_id}/api-keys + +#### GET +##### Summary + +Get all API keys for a dataset + +##### Description + +Get all API keys for a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| resource_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | API keys retrieved successfully | [ApiKeyList](#apikeylist) | + +#### POST +##### Summary + +Create a new API key for a dataset + +##### Description + +Create a new API key for a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| resource_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | API key created successfully | [ApiKeyItem](#apikeyitem) | +| 400 | Maximum keys exceeded | | + +### /datasets/{resource_id}/api-keys/{api_key_id} + +#### DELETE +##### Summary + +Delete an API key for a dataset + +##### Description + +Delete an API key for a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| api_key_id | path | API key ID | Yes | string | +| resource_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | API key deleted successfully | + +### /email-code-login + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EmailPayload](#emailpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /email-code-login/validity + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EmailCodeLoginPayload](#emailcodeloginpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /email-register + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /email-register/send-email + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /email-register/validity + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /explore/apps + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [RecommendedAppsQuery](#recommendedappsquery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [RecommendedAppListResponse](#recommendedapplistresponse) | + +### /explore/apps/{app_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /features + +#### GET +##### Summary + +Get feature configuration for current tenant + +##### Description + +Get feature configuration for current tenant + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [FeatureResponse](#featureresponse) | + +### /files/support-type + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /files/upload + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [UploadConfig](#uploadconfig) | + +#### POST +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | File uploaded successfully | [FileResponse](#fileresponse) | + +### /files/{file_id}/preview + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| file_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /forgot-password + +#### POST +##### Description + +Send password reset email + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ForgotPasswordSendPayload](#forgotpasswordsendpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Email sent successfully | [ForgotPasswordEmailResponse](#forgotpasswordemailresponse) | +| 400 | Invalid email or rate limit exceeded | | + +### /forgot-password/resets + +#### POST +##### Description + +Reset password with verification token + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ForgotPasswordResetPayload](#forgotpasswordresetpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Password reset successfully | [ForgotPasswordResetResponse](#forgotpasswordresetresponse) | +| 400 | Invalid token or password mismatch | | + +### /forgot-password/validity + +#### POST +##### Description + +Verify password reset code + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ForgotPasswordCheckPayload](#forgotpasswordcheckpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Code verified successfully | [ForgotPasswordCheckResponse](#forgotpasswordcheckresponse) | +| 400 | Invalid code or token | | + +### /form/human_input/{form_token} + +#### GET +##### Summary + +Get human input form definition by form token + +##### Description + +GET /console/api/form/human_input/ + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| form_token | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Summary + +Submit human input form by form token + +##### Description + +POST /console/api/form/human_input/ + +Request body: +{ + "inputs": { + "content": "User input content" + }, + "action": "Approve" +} + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| form_token | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /info + +#### POST +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [TenantInfoResponse](#tenantinforesponse) | + +### /installed-apps + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [InstalledAppListResponse](#installedapplistresponse) | + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/audio-to-text + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/chat-messages + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [ChatMessagePayload](#chatmessagepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/chat-messages/{task_id}/stop + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/completion-messages + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [CompletionMessageExplorePayload](#completionmessageexplorepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/completion-messages/{task_id}/stop + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/conversations + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [ConversationListQuery](#conversationlistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/conversations/{c_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | | Yes | string | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/conversations/{c_id}/name + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | | Yes | string | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [ConversationRenamePayload](#conversationrenamepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/conversations/{c_id}/pin + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | | Yes | string | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/conversations/{c_id}/unpin + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | | Yes | string | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/messages + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [MessageListQuery](#messagelistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/messages/{message_id}/feedbacks + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| message_id | path | | Yes | string | +| payload | body | | Yes | [MessageFeedbackPayload](#messagefeedbackpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/messages/{message_id}/more-like-this + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| message_id | path | | Yes | string | +| payload | body | | Yes | [MoreLikeThisQuery](#morelikethisquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/messages/{message_id}/suggested-questions + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| message_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/meta + +#### GET +##### Summary + +Get app meta + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/parameters + +#### GET +##### Summary + +Retrieve app parameters + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/saved-messages + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [SavedMessageListQuery](#savedmessagelistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [SavedMessageCreatePayload](#savedmessagecreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/saved-messages/{message_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| message_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/text-to-audio + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [TextToAudioPayload](#texttoaudiopayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/workflows/run + +#### POST +##### Summary + +Run workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| payload | body | | Yes | [WorkflowRunPayload](#workflowrunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /installed-apps/{installed_app_id}/workflows/tasks/{task_id}/stop + +#### POST +##### Summary + +Stop workflow task + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| installed_app_id | path | | Yes | string | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /instruction-generate + +#### POST +##### Description + +Generate instruction for workflow nodes or general use + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [InstructionGeneratePayload](#instructiongeneratepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Instruction generated successfully | +| 400 | Invalid request parameters or flow/workflow not found | +| 402 | Provider quota exceeded | + +### /instruction-generate/template + +#### POST +##### Description + +Get instruction generation template + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [InstructionTemplatePayload](#instructiontemplatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Template retrieved successfully | +| 400 | Invalid request parameters | + +### /login + +#### POST +##### Summary + +Authenticate user and login + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [LoginPayload](#loginpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /logout + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /mcp/oauth/callback + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /notification + +#### GET +##### Description + +Return the active in-product notification for the current user in their interface language (falls back to English if unavailable). The notification is NOT marked as seen here; call POST /notification/dismiss when the user explicitly closes the modal. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success — inspect should_show to decide whether to render the modal | +| 401 | Unauthorized | + +### /notification/dismiss + +#### POST +##### Description + +Mark a notification as dismissed for the current user. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 401 | Unauthorized | + +### /notion/pages/{page_id}/{page_type}/preview + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| page_id | path | | Yes | string | +| page_type | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| page_id | path | | Yes | string | +| page_type | path | | Yes | string | +| payload | body | | Yes | [NotionEstimatePayload](#notionestimatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /notion/pre-import/pages + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/authorize/{provider} + +#### GET +##### Description + +Handle OAuth callback and complete login process + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | OAuth provider name (github/google) | Yes | string | +| code | query | Authorization code from OAuth provider | No | string | +| state | query | Optional state parameter (used for invite token) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 302 | Redirect to console with access token | +| 400 | OAuth process failed | + +### /oauth/data-source/binding/{provider} + +#### GET +##### Description + +Bind OAuth data source with authorization code + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | Data source provider name (notion) | Yes | string | +| code | query | Authorization code from OAuth provider | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Data source binding success | [OAuthDataSourceBindingResponse](#oauthdatasourcebindingresponse) | +| 400 | Invalid provider or code | | + +### /oauth/data-source/callback/{provider} + +#### GET +##### Description + +Handle OAuth callback from data source provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | Data source provider name (notion) | Yes | string | +| code | query | Authorization code from OAuth provider | No | string | +| error | query | Error message from OAuth provider | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 302 | Redirect to console with result | +| 400 | Invalid provider | + +### /oauth/data-source/{provider} + +#### GET +##### Description + +Get OAuth authorization URL for data source provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | Data source provider name (notion) | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Authorization URL or internal setup success | [OAuthDataSourceResponse](#oauthdatasourceresponse) | +| 400 | Invalid provider | | +| 403 | Admin privileges required | | + +### /oauth/data-source/{provider}/{binding_id}/sync + +#### GET +##### Description + +Sync data from OAuth data source + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| binding_id | path | Data source binding ID | Yes | string | +| provider | path | Data source provider name (notion) | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Data source sync success | [OAuthDataSourceSyncResponse](#oauthdatasourcesyncresponse) | +| 400 | Invalid provider or sync failed | | + +### /oauth/login/{provider} + +#### GET +##### Description + +Initiate OAuth login process + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | OAuth provider name (github/google) | Yes | string | +| invite_token | query | Optional invitation token | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 302 | Redirect to OAuth authorization URL | +| 400 | Invalid provider | + +### /oauth/plugin/{provider_id}/datasource/callback + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/plugin/{provider_id}/datasource/get-authorization-url + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/plugin/{provider}/tool/authorization-url + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/plugin/{provider}/tool/callback + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/plugin/{provider}/trigger/callback + +#### GET +##### Summary + +Handle OAuth callback for trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/provider + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/provider/account + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/provider/authorize + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /oauth/provider/token + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipeline/customized/templates/{template_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| template_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| template_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| template_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipeline/dataset + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [RagPipelineDatasetImportPayload](#ragpipelinedatasetimportpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipeline/empty-dataset + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipeline/templates + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipeline/templates/{template_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| template_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/datasource-plugins + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/imports + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [RagPipelineImportPayload](#ragpipelineimportpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/imports/{import_id}/confirm + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| import_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/imports/{pipeline_id}/check-dependencies + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/recommended-plugins + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/transform/datasets/{dataset_id} + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/customized/publish + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [Payload](#payload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/exports + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflow-runs + +#### GET +##### Summary + +Get workflow run list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflow-runs/tasks/{task_id}/stop + +#### POST +##### Summary + +Stop workflow task + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflow-runs/{run_id} + +#### GET +##### Summary + +Get workflow run detail + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| run_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflow-runs/{run_id}/node-executions + +#### GET +##### Summary + +Get workflow run node execution list + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| run_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows + +#### GET +##### Summary + +Get published workflows + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/default-workflow-block-configs + +#### GET +##### Summary + +Get default block config + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/default-workflow-block-configs/{block_type} + +#### GET +##### Summary + +Get default block config + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| block_type | path | | Yes | string | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft + +#### GET +##### Summary + +Get draft rag pipeline's workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Summary + +Sync draft workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/datasource/nodes/{node_id}/run + +#### POST +##### Summary + +Run rag pipeline datasource + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceNodeRunPayload](#datasourcenoderunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/datasource/variables-inspect + +#### POST +##### Summary + +Set datasource variables + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceVariablesPayload](#datasourcevariablespayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/environment-variables + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/iteration/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow iteration node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [NodeRunPayload](#noderunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/loop/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow loop node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [NodeRunPayload](#noderunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/nodes/{node_id}/last-run + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/nodes/{node_id}/run + +#### POST +##### Summary + +Run draft workflow node + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [NodeRunRequiredPayload](#noderunrequiredpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/nodes/{node_id}/variables + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/pre-processing/parameters + +#### GET +##### Summary + +Get first step parameters of rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/processing/parameters + +#### GET +##### Summary + +Get second step parameters of rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/run + +#### POST +##### Summary + +Run draft workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [DraftWorkflowRunPayload](#draftworkflowrunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/system-variables + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/variables + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/variables/{variable_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| variable_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| variable_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| variable_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/draft/variables/{variable_id}/reset + +#### PUT +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| variable_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/publish + +#### GET +##### Summary + +Get published pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Summary + +Publish workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/published/datasource/nodes/{node_id}/preview + +#### POST +##### Summary + +Run datasource content preview + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [Parser](#parser) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/published/datasource/nodes/{node_id}/run + +#### POST +##### Summary + +Run rag pipeline datasource + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| node_id | path | | Yes | string | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [DatasourceNodeRunPayload](#datasourcenoderunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/published/pre-processing/parameters + +#### GET +##### Summary + +Get first step parameters of rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/published/processing/parameters + +#### GET +##### Summary + +Get second step parameters of rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/published/run + +#### POST +##### Summary + +Run published workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| payload | body | | Yes | [PublishedWorkflowRunPayload](#publishedworkflowrunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/{workflow_id} + +#### DELETE +##### Summary + +Delete a published workflow version that is not currently active on the pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| workflow_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Summary + +Update workflow attributes + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| workflow_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rag/pipelines/{pipeline_id}/workflows/{workflow_id}/restore + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| pipeline_id | path | | Yes | string | +| workflow_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /refresh-token + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /remote-files/upload + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /remote-files/{url} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| url | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /reset-password + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EmailPayload](#emailpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /rule-code-generate + +#### POST +##### Description + +Generate code rules using LLM + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [RuleCodeGeneratePayload](#rulecodegeneratepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Code rules generated successfully | +| 400 | Invalid request parameters | +| 402 | Provider quota exceeded | + +### /rule-generate + +#### POST +##### Description + +Generate rule configuration using LLM + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [RuleGeneratePayload](#rulegeneratepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Rule configuration generated successfully | +| 400 | Invalid request parameters | +| 402 | Provider quota exceeded | + +### /rule-structured-output-generate + +#### POST +##### Description + +Generate structured output rules using LLM + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [RuleStructuredOutputPayload](#rulestructuredoutputpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Structured output generated successfully | +| 400 | Invalid request parameters | +| 402 | Provider quota exceeded | + +### /spec/schema-definitions + +#### GET +##### Summary + +Get system JSON Schema definitions specification + +##### Description + +Used for frontend component type mapping + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /system-features + +#### GET +##### Summary + +Get system-wide feature configuration + +##### Description + +Get system-wide feature configuration +NOTE: This endpoint is unauthenticated by design, as it provides system features +data required for dashboard initialization. + +Authentication would create circular dependency (can't login without dashboard loading). + +Only non-sensitive configuration data should be returned by this endpoint. + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [SystemFeatureResponse](#systemfeatureresponse) | + +### /tag-bindings + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagBindingPayload](#tagbindingpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /tag-bindings/remove + +#### POST +##### Description + +Remove one or more tag bindings from a target. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagBindingRemovePayload](#tagbindingremovepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /tags + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| keyword | query | Search keyword for tag name. | No | string | +| type | query | Tag type filter. Can be "knowledge" or "app". | No | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ [TagResponse](#tagresponse) ] | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagBasePayload](#tagbasepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /tags/{tag_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| tag_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| tag_id | path | | Yes | string | +| payload | body | | Yes | [TagBasePayload](#tagbasepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /test/retrieval + +#### POST +##### Description + +Bedrock retrieval test (internal use only) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [BedrockRetrievalPayload](#bedrockretrievalpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Bedrock retrieval test completed | + +### /trial-apps/{app_id} + +#### GET +##### Summary + +Get app detail + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/audio-to-text + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/chat-messages + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| payload | body | | Yes | [ChatRequest](#chatrequest) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/completion-messages + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| payload | body | | Yes | [CompletionRequest](#completionrequest) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/datasets + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/messages/{message_id}/suggested-questions + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| message_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/parameters + +#### GET +##### Summary + +Retrieve app parameters + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/site + +#### GET +##### Summary + +Retrieve app site info + +##### Description + +Returns the site configuration for the application including theme, icons, and text. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/text-to-audio + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| payload | body | | Yes | [TextToSpeechRequest](#texttospeechrequest) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/workflows + +#### GET +##### Summary + +Get workflow detail + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/workflows/run + +#### POST +##### Summary + +Run workflow + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| payload | body | | Yes | [WorkflowRunRequest](#workflowrunrequest) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /trial-apps/{app_id}/workflows/tasks/{task_id}/stop + +#### POST +##### Summary + +Stop workflow task + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| app_id | path | | Yes | string | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /website/crawl + +#### POST +##### Description + +Crawl website content + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WebsiteCrawlPayload](#websitecrawlpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Website crawl initiated successfully | +| 400 | Invalid crawl parameters | + +### /website/crawl/status/{job_id} + +#### GET +##### Description + +Get website crawl status + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WebsiteCrawlStatusQuery](#websitecrawlstatusquery) | +| job_id | path | Crawl job ID | Yes | string | +| provider | query | Crawl provider (firecrawl/watercrawl/jinareader) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Crawl status retrieved successfully | +| 400 | Invalid provider | +| 404 | Crawl job not found | + +### /workflow/{workflow_run_id}/events + +#### GET +##### Summary + +Get workflow execution events stream after resume + +##### Description + +GET /console/api/workflow//events + +Returns Server-Sent Events stream. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| workflow_run_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workflow/{workflow_run_id}/pause-details + +#### GET +##### Summary + +Get workflow pause details + +##### Description + +GET /console/api/workflow//pause-details + +Returns information about why and where the workflow is paused. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| workflow_run_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current + +#### POST +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [TenantInfoResponse](#tenantinforesponse) | + +### /workspaces/current/agent-provider/{provider_name} + +#### GET +##### Description + +Get specific agent provider details + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_name | path | Agent provider name | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | object | + +### /workspaces/current/agent-providers + +#### GET +##### Description + +Get list of available agent providers + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [ object ] | + +### /workspaces/current/dataset-operators + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [AccountWithRoleList](#accountwithrolelist) | + +### /workspaces/current/default-model + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserGetDefault](#parsergetdefault) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPostDefault](#parserpostdefault) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/endpoints + +#### POST +##### Description + +Create a new plugin endpoint + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointCreatePayload](#endpointcreatepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint created successfully | [EndpointCreateResponse](#endpointcreateresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/endpoints/create + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for creating a plugin endpoint. Use POST /workspaces/current/endpoints instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointCreatePayload](#endpointcreatepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint created successfully | [EndpointCreateResponse](#endpointcreateresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/endpoints/delete + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for deleting a plugin endpoint. Use DELETE /workspaces/current/endpoints/{id} instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointIdPayload](#endpointidpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint deleted successfully | [EndpointDeleteResponse](#endpointdeleteresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/endpoints/disable + +#### POST +##### Description + +Disable a plugin endpoint + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointIdPayload](#endpointidpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint disabled successfully | [EndpointDisableResponse](#endpointdisableresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/endpoints/enable + +#### POST +##### Description + +Enable a plugin endpoint + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointIdPayload](#endpointidpayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint enabled successfully | [EndpointEnableResponse](#endpointenableresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/endpoints/list + +#### GET +##### Description + +List plugin endpoints with pagination + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointListQuery](#endpointlistquery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [EndpointListResponse](#endpointlistresponse) | + +### /workspaces/current/endpoints/list/plugin + +#### GET +##### Description + +List endpoints for a specific plugin + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointListForPluginQuery](#endpointlistforpluginquery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [PluginEndpointListResponse](#pluginendpointlistresponse) | + +### /workspaces/current/endpoints/update + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for updating a plugin endpoint. Use PATCH /workspaces/current/endpoints/{id} instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [LegacyEndpointUpdatePayload](#legacyendpointupdatepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint updated successfully | [EndpointUpdateResponse](#endpointupdateresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/endpoints/{id} + +#### DELETE +##### Description + +Delete a plugin endpoint + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| id | path | Endpoint ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint deleted successfully | [EndpointDeleteResponse](#endpointdeleteresponse) | +| 403 | Admin privileges required | | + +#### PATCH +##### Description + +Update a plugin endpoint + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EndpointUpdatePayload](#endpointupdatepayload) | +| id | path | Endpoint ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Endpoint updated successfully | [EndpointUpdateResponse](#endpointupdateresponse) | +| 403 | Admin privileges required | | + +### /workspaces/current/members + +#### GET +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Success | [AccountWithRoleList](#accountwithrolelist) | + +### /workspaces/current/members/invite-email + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MemberInvitePayload](#memberinvitepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/members/owner-transfer-check + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [OwnerTransferCheckPayload](#ownertransfercheckpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/members/send-owner-transfer-confirm-email + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [OwnerTransferEmailPayload](#ownertransferemailpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/members/{member_id} + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| member_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/members/{member_id}/owner-transfer + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| member_id | path | | Yes | string | +| payload | body | | Yes | [OwnerTransferPayload](#ownertransferpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/members/{member_id}/update-role + +#### PUT +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| member_id | path | | Yes | string | +| payload | body | | Yes | [MemberRoleUpdatePayload](#memberroleupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserModelList](#parsermodellist) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/checkout-url + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/credentials + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCredentialDelete](#parsercredentialdelete) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCredentialId](#parsercredentialid) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCredentialCreate](#parsercredentialcreate) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PUT +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCredentialUpdate](#parsercredentialupdate) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/credentials/switch + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCredentialSwitch](#parsercredentialswitch) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/credentials/validate + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCredentialValidate](#parsercredentialvalidate) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserDeleteModels](#parserdeletemodels) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserPostModels](#parserpostmodels) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/credentials + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserDeleteCredential](#parserdeletecredential) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserGetCredentials](#parsergetcredentials) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserCreateCredential](#parsercreatecredential) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PUT +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserUpdateCredential](#parserupdatecredential) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/credentials/switch + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserSwitch](#parserswitch) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/credentials/validate + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserValidate](#parservalidate) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/disable + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserDeleteModels](#parserdeletemodels) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/enable + +#### PATCH +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserDeleteModels](#parserdeletemodels) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/load-balancing-configs/credentials-validate + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [LoadBalancingCredentialPayload](#loadbalancingcredentialpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/load-balancing-configs/{config_id}/credentials-validate + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| config_id | path | | Yes | string | +| provider | path | | Yes | string | +| payload | body | | Yes | [LoadBalancingCredentialPayload](#loadbalancingcredentialpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/models/parameter-rules + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserParameter](#parserparameter) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/model-providers/{provider}/preferred-provider-type + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ParserPreferredProviderType](#parserpreferredprovidertype) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/models/model-types/{model_type} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| model_type | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/permission + +#### GET +##### Summary + +Get workspace permission settings + +##### Description + +Returns permission flags that control workspace features like member invitations and owner transfer. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/asset + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserAsset](#parserasset) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/debugging-key + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/fetch-manifest + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPluginIdentifierQuery](#parserpluginidentifierquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/icon + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserIcon](#parsericon) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/install/github + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserGithubInstall](#parsergithubinstall) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/install/marketplace + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPluginIdentifiers](#parserpluginidentifiers) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/install/pkg + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPluginIdentifiers](#parserpluginidentifiers) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/list + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserList](#parserlist) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/list/installations/ids + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserLatest](#parserlatest) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/list/latest-versions + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserLatest](#parserlatest) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/marketplace/pkg + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPluginIdentifierQuery](#parserpluginidentifierquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/parameters/dynamic-options + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserDynamicOptions](#parserdynamicoptions) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/parameters/dynamic-options-with-credentials + +#### POST +##### Summary + +Fetch dynamic options using credentials directly (for edit mode) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserDynamicOptionsWithCredentials](#parserdynamicoptionswithcredentials) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/permission/change + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPermissionChange](#parserpermissionchange) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/permission/fetch + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/preferences/autoupgrade/exclude + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserExcludePlugin](#parserexcludeplugin) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/preferences/change + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserPreferencesChange](#parserpreferenceschange) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/preferences/fetch + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/readme + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserReadme](#parserreadme) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/tasks + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserTasks](#parsertasks) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/tasks/delete_all + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/tasks/{task_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/tasks/{task_id}/delete + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/tasks/{task_id}/delete/{identifier} + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| identifier | path | | Yes | string | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/uninstall + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserUninstall](#parseruninstall) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/upgrade/github + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserGithubUpgrade](#parsergithubupgrade) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/upgrade/marketplace + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserMarketplaceUpgrade](#parsermarketplaceupgrade) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/upload/bundle + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/upload/github + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ParserGithubUpload](#parsergithubupload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/plugin/upload/pkg + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-labels + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/add + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ApiToolProviderAddPayload](#apitoolprovideraddpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/delete + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ApiToolProviderDeletePayload](#apitoolproviderdeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/get + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/remote + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/schema + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ApiToolSchemaPayload](#apitoolschemapayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/test/pre + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ApiToolTestPayload](#apitooltestpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/tools + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/api/update + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ApiToolProviderUpdatePayload](#apitoolproviderupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/add + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [BuiltinToolAddPayload](#builtintooladdpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/credential/info + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/credential/schema/{credential_type} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| credential_type | path | | Yes | string | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/credentials + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/default-credential + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [BuiltinProviderDefaultCredentialPayload](#builtinproviderdefaultcredentialpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/delete + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [BuiltinToolCredentialDeletePayload](#builtintoolcredentialdeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/icon + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/info + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/oauth/client-schema + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/oauth/custom-client + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [ToolOAuthCustomClientPayload](#tooloauthcustomclientpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/tools + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/builtin/{provider}/update + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [BuiltinToolUpdatePayload](#builtintoolupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/mcp + +#### DELETE +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MCPProviderDeletePayload](#mcpproviderdeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MCPProviderCreatePayload](#mcpprovidercreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### PUT +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MCPProviderUpdatePayload](#mcpproviderupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/mcp/auth + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MCPAuthPayload](#mcpauthpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/mcp/tools/{provider_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/mcp/update/{provider_id} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/workflow/create + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowToolCreatePayload](#workflowtoolcreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/workflow/delete + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowToolDeletePayload](#workflowtooldeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/workflow/get + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/workflow/tools + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-provider/workflow/update + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowToolUpdatePayload](#workflowtoolupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tool-providers + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tools/api + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tools/builtin + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tools/mcp + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/tools/workflow + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/icon + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/info + +#### GET +##### Summary + +Get info for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/oauth/client + +#### DELETE +##### Summary + +Remove custom OAuth client configuration + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### GET +##### Summary + +Get OAuth client configuration for a provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Summary + +Configure custom OAuth client for a provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [TriggerOAuthClientPayload](#triggeroauthclientpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/builder/build/{subscription_builder_id} + +#### POST +##### Summary + +Build a subscription instance for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| subscription_builder_id | path | | Yes | string | +| payload | body | | Yes | [TriggerSubscriptionBuilderUpdatePayload](#triggersubscriptionbuilderupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/builder/create + +#### POST +##### Summary + +Add a new subscription instance for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| payload | body | | Yes | [TriggerSubscriptionBuilderCreatePayload](#triggersubscriptionbuildercreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/builder/logs/{subscription_builder_id} + +#### GET +##### Summary + +Get the request logs for a subscription instance for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| subscription_builder_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/builder/update/{subscription_builder_id} + +#### POST +##### Summary + +Update a subscription instance for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| subscription_builder_id | path | | Yes | string | +| payload | body | | Yes | [TriggerSubscriptionBuilderUpdatePayload](#triggersubscriptionbuilderupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/builder/verify-and-update/{subscription_builder_id} + +#### POST +##### Summary + +Verify and update a subscription instance for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| subscription_builder_id | path | | Yes | string | +| payload | body | | Yes | [TriggerSubscriptionBuilderVerifyPayload](#triggersubscriptionbuilderverifypayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/builder/{subscription_builder_id} + +#### GET +##### Summary + +Get a subscription instance for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| subscription_builder_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/list + +#### GET +##### Summary + +List all trigger subscriptions for the current tenant's provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/oauth/authorize + +#### GET +##### Summary + +Initiate OAuth authorization flow for a trigger provider + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{provider}/subscriptions/verify/{subscription_id} + +#### POST +##### Summary + +Verify credentials for an existing subscription (edit mode only) + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| provider | path | | Yes | string | +| subscription_id | path | | Yes | string | +| payload | body | | Yes | [TriggerSubscriptionBuilderVerifyPayload](#triggersubscriptionbuilderverifypayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{subscription_id}/subscriptions/delete + +#### POST +##### Summary + +Delete a subscription instance + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| subscription_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/trigger-provider/{subscription_id}/subscriptions/update + +#### POST +##### Summary + +Update a subscription instance + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| subscription_id | path | | Yes | string | +| payload | body | | Yes | [TriggerSubscriptionBuilderUpdatePayload](#triggersubscriptionbuilderupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/current/triggers + +#### GET +##### Summary + +List all trigger providers for the current tenant + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/custom-config + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkspaceCustomConfigPayload](#workspacecustomconfigpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/custom-config/webapp-logo/upload + +#### POST +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/info + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkspaceInfoPayload](#workspaceinfopayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/switch + +#### POST +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [SwitchWorkspacePayload](#switchworkspacepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /workspaces/{tenant_id}/model-providers/{provider}/{icon_type}/{lang} + +#### GET +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| icon_type | path | | Yes | string | +| lang | path | | Yes | string | +| provider | path | | Yes | string | +| tenant_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +--- +## default +Default namespace + +### /explore/banners + +#### GET +##### Summary + +Get banner list + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +--- +### Models + +#### APIBasedExtensionListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| APIBasedExtensionListResponse | array | | | + +#### APIBasedExtensionPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| api_endpoint | string | API endpoint URL | Yes | +| api_key | string | API key for authentication | Yes | +| name | string | Extension name | Yes | + +#### APIBasedExtensionResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| api_endpoint | string | | Yes | +| api_key | string | | Yes | +| created_at | | | No | +| id | string | | Yes | +| name | string | | Yes | + +#### Account + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| avatar | | | No | +| created_at | | | No | +| email | string | | Yes | +| id | string | | Yes | +| interface_language | | | No | +| interface_theme | | | No | +| is_password_set | boolean | | Yes | +| last_login_at | | | No | +| last_login_ip | | | No | +| name | string | | Yes | +| timezone | | | No | + +#### AccountAvatarPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| avatar | string | | Yes | + +#### AccountAvatarQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| avatar | string | Avatar file ID | Yes | + +#### AccountDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| token | string | | Yes | + +#### AccountDeletionFeedbackPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| feedback | string | | Yes | + +#### AccountInitPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| interface_language | string | | Yes | +| invitation_code | | | No | +| timezone | string | | Yes | + +#### AccountIntegrateListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [AccountIntegrateResponse](#accountintegrateresponse) ] | | Yes | + +#### AccountIntegrateResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| is_bound | boolean | | Yes | +| link | | | No | +| provider | string | | Yes | + +#### AccountInterfaceLanguagePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| interface_language | string | | Yes | + +#### AccountInterfaceThemePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| interface_theme | string | *Enum:* `"dark"`, `"light"` | Yes | + +#### AccountNamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | + +#### AccountPasswordPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_password | string | | Yes | +| password | | | No | +| repeat_new_password | string | | Yes | + +#### AccountTimezonePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| timezone | string | | Yes | + +#### AccountWithRole + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| avatar | | | No | +| created_at | | | No | +| email | string | | Yes | +| id | string | | Yes | +| last_active_at | | | No | +| last_login_at | | | No | +| name | string | | Yes | +| role | string | | Yes | +| status | string | | Yes | + +#### AccountWithRoleList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| accounts | [ [AccountWithRole](#accountwithrole) ] | | Yes | + +#### ActivateCheckQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | | | No | +| token | string | | Yes | +| workspace_id | | | No | + +#### ActivatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | | | No | +| interface_language | string | | Yes | +| name | string | | Yes | +| timezone | string | | Yes | +| token | string | | Yes | +| workspace_id | | | No | + +#### ActivationCheckResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | | Activation data if valid | No | +| is_valid | boolean | Whether token is valid | Yes | + +#### ActivationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Operation result | Yes | + +#### AdvancedChatWorkflowRunForList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | | No | +| created_at | object | | No | +| created_by_account | [SimpleAccount](#simpleaccount) | | No | +| elapsed_time | number | | No | +| exceptions_count | integer | | No | +| finished_at | object | | No | +| id | string | | No | +| message_id | string | | No | +| retry_index | integer | | No | +| status | string | | No | +| total_steps | integer | | No | +| total_tokens | integer | | No | +| version | string | | No | + +#### AdvancedChatWorkflowRunPagination + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [AdvancedChatWorkflowRunForList](#advancedchatworkflowrunforlist) ] | | No | +| has_more | boolean | | No | +| limit | integer | | No | + +#### AdvancedChatWorkflowRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | | No | +| files | | | No | +| inputs | | | No | +| parent_message_id | | | No | +| query | string | | No | + +#### AdvancedPromptTemplateQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_mode | string | Application mode | Yes | +| has_context | string | Whether has context | No | +| model_mode | string | Model mode | Yes | +| model_name | string | Model name | Yes | + +#### AgentLogQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | Conversation UUID | Yes | +| message_id | string | Message UUID | Yes | + +#### AgentThought + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| chain_id | | | No | +| created_at | | | No | +| files | [ string ] | | Yes | +| id | string | | Yes | +| message_chain_id | | | No | +| message_id | string | | Yes | +| observation | | | No | +| position | integer | | Yes | +| thought | | | No | +| tool | | | No | +| tool_input | | | No | +| tool_labels | [JSONValue](#jsonvalue) | | Yes | + +#### Annotation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| created_at | | | No | +| hit_count | | | No | +| id | string | | Yes | +| question | | | No | + +#### AnnotationCountResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| count | integer | Number of annotations | Yes | + +#### AnnotationExportList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [Annotation](#annotation) ] | | Yes | + +#### AnnotationFilePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | string | Message ID | Yes | + +#### AnnotationHitHistory + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| annotation_content | | | No | +| annotation_question | | | No | +| created_at | | | No | +| id | string | | Yes | +| question | | | No | +| score | | | No | +| source | | | No | + +#### AnnotationHitHistoryList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [AnnotationHitHistory](#annotationhithistory) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### AnnotationList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [Annotation](#annotation) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### AnnotationListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword | string | Search keyword | No | +| limit | integer | Page size | No | +| page | integer | Page number | No | + +#### AnnotationReplyPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| embedding_model_name | string | Embedding model name | Yes | +| embedding_provider_name | string | Embedding provider name | Yes | +| score_threshold | number | Score threshold for annotation matching | Yes | + +#### AnnotationReplyStatusQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| action | string | *Enum:* `"disable"`, `"enable"` | Yes | + +#### AnnotationSettingUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| score_threshold | number | Score threshold | Yes | + +#### ApiKeyAuthBindingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| category | string | | Yes | +| credentials | object | | Yes | +| provider | string | | Yes | + +#### ApiKeyItem + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| id | string | | Yes | +| last_used_at | | | No | +| token | string | | Yes | +| type | string | | Yes | + +#### ApiKeyList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [ApiKeyItem](#apikeyitem) ] | | Yes | + +#### ApiProviderSchemaType + +Enum class for api provider schema type. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ApiProviderSchemaType | string | Enum class for api provider schema type. | | + +#### ApiToolProviderAddPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| custom_disclaimer | string | | No | +| icon | object | | Yes | +| labels | | | No | +| privacy_policy | | | No | +| provider | string | | Yes | +| schema | string | | Yes | +| schema_type | [ApiProviderSchemaType](#apiproviderschematype) | | Yes | + +#### ApiToolProviderDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| provider | string | | Yes | + +#### ApiToolProviderUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| custom_disclaimer | string | | No | +| icon | object | | Yes | +| labels | | | No | +| original_provider | string | | Yes | +| privacy_policy | | | No | +| provider | string | | Yes | +| schema | string | | Yes | +| schema_type | [ApiProviderSchemaType](#apiproviderschematype) | | Yes | + +#### ApiToolSchemaPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| schema | string | | Yes | + +#### ApiToolTestPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| parameters | object | | Yes | +| provider_name | | | No | +| schema | string | | Yes | +| schema_type | [ApiProviderSchemaType](#apiproviderschematype) | | Yes | +| tool_name | string | | Yes | + +#### AppApiStatusPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enable_api | boolean | Enable or disable API | Yes | + +#### AppDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| access_mode | | | No | +| app_model_config | | | No | +| created_at | | | No | +| created_by | | | No | +| description | | | No | +| enable_api | boolean | | Yes | +| enable_site | boolean | | Yes | +| icon | | | No | +| icon_background | | | No | +| id | string | | Yes | +| mode_compatible_with_agent | string | | Yes | +| name | string | | Yes | +| tags | [ [Tag](#tag) ] | | No | +| tracing | | | No | +| updated_at | | | No | +| updated_by | | | No | +| use_icon_as_answer_icon | | | No | +| workflow | | | No | + +#### AppDetailKernel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| icon | string | | No | +| icon_background | string | | No | +| icon_type | string | | No | +| icon_url | object | | No | +| id | string | | No | +| mode | string | | No | +| name | string | | No | + +#### AppDetailWithSite + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| access_mode | | | No | +| api_base_url | | | No | +| app_model_config | | | No | +| created_at | | | No | +| created_by | | | No | +| deleted_tools | [ [DeletedTool](#deletedtool) ] | | No | +| description | | | No | +| enable_api | boolean | | Yes | +| enable_site | boolean | | Yes | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| id | string | | Yes | +| max_active_requests | | | No | +| mode_compatible_with_agent | string | | Yes | +| name | string | | Yes | +| site | | | No | +| tags | [ [Tag](#tag) ] | | No | +| tracing | | | No | +| updated_at | | | No | +| updated_by | | | No | +| use_icon_as_answer_icon | | | No | +| workflow | | | No | + +#### AppExportQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| include_secret | boolean | Include secrets in export | No | +| workflow_id | | Specific workflow ID to export | No | + +#### AppExportResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | string | | Yes | + +#### AppIconPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| icon | | Icon data | No | +| icon_background | | Icon background color | No | +| icon_type | | Icon type | No | + +#### AppImportPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_id | | | No | +| description | | | No | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| mode | string | Import mode | Yes | +| name | | | No | +| yaml_content | | | No | +| yaml_url | | | No | + +#### AppListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| is_created_by_me | | Filter by creator | No | +| limit | integer | Page size (1-100) | No | +| mode | string | App mode filter
*Enum:* `"advanced-chat"`, `"agent-chat"`, `"all"`, `"channel"`, `"chat"`, `"completion"`, `"workflow"` | No | +| name | | Filter by app name | No | +| page | integer | Page number (1-99999) | No | +| tag_ids | | Filter by tag IDs | No | + +#### AppMCPServerResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| description | string | | Yes | +| id | string | | Yes | +| name | string | | Yes | +| parameters | | | Yes | +| server_code | string | | Yes | +| status | [AppMCPServerStatus](#appmcpserverstatus) | | Yes | +| updated_at | | | No | + +#### AppMCPServerStatus + +AppMCPServer Status Enum + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| AppMCPServerStatus | string | AppMCPServer Status Enum | | + +#### AppNamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | Name to check | Yes | + +#### AppPagination + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| has_next | boolean | | Yes | +| items | [ [AppPartial](#apppartial) ] | | Yes | +| page | integer | | Yes | +| per_page | integer | | Yes | +| total | integer | | Yes | + +#### AppPartial + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| access_mode | | | No | +| app_model_config | | | No | +| author_name | | | No | +| create_user_name | | | No | +| created_at | | | No | +| created_by | | | No | +| desc_or_prompt | | | No | +| has_draft_trigger | | | No | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| id | string | | Yes | +| max_active_requests | | | No | +| mode_compatible_with_agent | string | | Yes | +| name | string | | Yes | +| tags | [ [Tag](#tag) ] | | No | +| updated_at | | | No | +| updated_by | | | No | +| use_icon_as_answer_icon | | | No | +| workflow | | | No | + +#### AppSiteResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_id | string | | Yes | +| code | | | No | +| copyright | | | No | +| custom_disclaimer | | | No | +| customize_domain | | | No | +| customize_token_strategy | string | | Yes | +| default_language | string | | Yes | +| description | | | No | +| icon | | | No | +| icon_background | | | No | +| privacy_policy | | | No | +| prompt_public | boolean | | Yes | +| show_workflow_steps | boolean | | Yes | +| title | string | | Yes | +| use_icon_as_answer_icon | boolean | | Yes | + +#### AppSiteStatusPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enable_site | boolean | Enable or disable site | Yes | + +#### AppSiteUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| chat_color_theme | | | No | +| chat_color_theme_inverted | | | No | +| copyright | | | No | +| custom_disclaimer | | | No | +| customize_domain | | | No | +| customize_token_strategy | | | No | +| default_language | | | No | +| description | | | No | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| privacy_policy | | | No | +| prompt_public | | | No | +| show_workflow_steps | | | No | +| title | | | No | +| use_icon_as_answer_icon | | | No | + +#### AppTracePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enabled | boolean | Enable or disable tracing | Yes | +| tracing_provider | | Tracing provider | No | + +#### AudioTranscriptResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| text | string | Transcribed text from audio | Yes | + +#### BatchAddNotificationAccountsPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| notification_id | string | | Yes | +| user_email | [ string ] | List of account email addresses | Yes | + +#### BatchImportPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| upload_file_id | string | | Yes | + +#### BedrockRetrievalPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| knowledge_id | string | | Yes | +| query | string | | Yes | +| retrieval_setting | [BedrockRetrievalSetting](#bedrockretrievalsetting) | | Yes | + +#### BedrockRetrievalSetting + +Retrieval settings for Amazon Bedrock knowledge base queries. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| score_threshold | number | Minimum relevance score threshold | No | +| top_k | | Maximum number of results to retrieve | No | + +#### BuiltinProviderDefaultCredentialPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | + +#### BuiltinToolAddPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| name | | | No | +| type | [CredentialType](#credentialtype) | | Yes | + +#### BuiltinToolCredentialDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | + +#### BuiltinToolUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| credentials | | | No | +| name | | | No | + +#### ButtonStyle + +Button styles for user actions. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ButtonStyle | string | Button styles for user actions. | | + +#### ChangeEmailResetPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_email | string | | Yes | +| token | string | | Yes | + +#### ChangeEmailSendPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| language | | | No | +| phase | | | No | +| token | | | No | + +#### ChangeEmailValidityPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| email | string | | Yes | +| token | string | | Yes | + +#### ChatConversationQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| annotation_status | string | Annotation status filter
*Enum:* `"all"`, `"annotated"`, `"not_annotated"` | No | +| end | | End date (YYYY-MM-DD HH:MM) | No | +| keyword | | Search keyword | No | +| limit | integer | Page size (1-100) | No | +| page | integer | Page number | No | +| sort_by | string | Sort field and direction
*Enum:* `"-created_at"`, `"-updated_at"`, `"created_at"`, `"updated_at"` | No | +| start | | Start date (YYYY-MM-DD HH:MM) | No | + +#### ChatMessagePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | Conversation ID | No | +| files | | Uploaded files | No | +| inputs | object | | Yes | +| model_config | object | | Yes | +| parent_message_id | | Parent message ID | No | +| query | string | User query | Yes | +| response_mode | string | Response mode
*Enum:* `"blocking"`, `"streaming"` | No | +| retriever_from | string | Retriever source | No | + +#### ChatMessagesQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | Conversation ID | Yes | +| first_id | | First message ID for pagination | No | +| limit | integer | Number of messages to return (1-100) | No | + +#### ChatRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | | No | +| files | | | No | +| inputs | object | | Yes | +| parent_message_id | | | No | +| query | string | | Yes | +| retriever_from | string | | No | + +#### CheckDependenciesResult + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| leaked_dependencies | [ [PluginDependency](#plugindependency) ] | | No | + +#### CheckEmailUniquePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | + +#### ChildChunkBatchUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| chunks | [ [ChildChunkUpdateArgs](#childchunkupdateargs) ] | | Yes | + +#### ChildChunkCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | Yes | + +#### ChildChunkUpdateArgs + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | Yes | +| id | | | No | + +#### ChildChunkUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | Yes | + +#### CodeBasedExtensionResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | | Extension data | Yes | +| module | string | Module name | Yes | + +#### CompletionConversationQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| annotation_status | string | Annotation status filter
*Enum:* `"all"`, `"annotated"`, `"not_annotated"` | No | +| end | | End date (YYYY-MM-DD HH:MM) | No | +| keyword | | Search keyword | No | +| limit | integer | Page size (1-100) | No | +| page | integer | Page number | No | +| start | | Start date (YYYY-MM-DD HH:MM) | No | + +#### CompletionMessageExplorePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | +| query | string | | No | +| response_mode | | | No | +| retriever_from | string | | No | + +#### CompletionMessagePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | Uploaded files | No | +| inputs | object | | Yes | +| model_config | object | | Yes | +| query | string | Query text | No | +| response_mode | string | Response mode
*Enum:* `"blocking"`, `"streaming"` | No | +| retriever_from | string | Retriever source | No | + +#### CompletionRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | +| query | string | | No | +| response_mode | | | No | +| retriever_from | string | | No | + +#### ComplianceDownloadQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| doc_name | string | Compliance document name | Yes | + +#### Condition + +Condition detail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| comparison_operator | string | *Enum:* `"<"`, `"="`, `">"`, `"after"`, `"before"`, `"contains"`, `"empty"`, `"end with"`, `"in"`, `"is"`, `"is not"`, `"not contains"`, `"not empty"`, `"not in"`, `"start with"`, `"≠"`, `"≤"`, `"≥"` | Yes | +| name | string | | Yes | +| value | | | No | + +#### ConsoleDatasetListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ids | [ string ] | Filter by dataset IDs | No | +| include_all | boolean | Include all datasets | No | +| keyword | | Search keyword | No | +| limit | integer | Number of items per page | No | +| page | integer | Page number | No | +| tag_ids | [ string ] | Filter by tag IDs | No | + +#### Conversation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| admin_feedback_stats | | | No | +| annotation | | | No | +| created_at | | | No | +| first_message | | | No | +| from_account_id | | | No | +| from_account_name | | | No | +| from_end_user_id | | | No | +| from_end_user_session_id | | | No | +| from_source | string | | Yes | +| id | string | | Yes | +| model_config | | | No | +| read_at | | | No | +| status | string | | Yes | +| updated_at | | | No | +| user_feedback_stats | | | No | + +#### ConversationAnnotation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| account | | | No | +| content | string | | Yes | +| created_at | | | No | +| id | string | | Yes | +| question | | | No | + +#### ConversationAnnotationHitHistory + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| annotation_create_account | | | No | +| created_at | | | No | +| id | string | | Yes | + +#### ConversationDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| admin_feedback_stats | | | No | +| annotated | boolean | | Yes | +| created_at | | | No | +| from_account_id | | | No | +| from_end_user_id | | | No | +| from_source | string | | Yes | +| id | string | | Yes | +| introduction | | | No | +| message_count | integer | | Yes | +| model_config | | | No | +| status | string | | Yes | +| updated_at | | | No | +| user_feedback_stats | | | No | + +#### ConversationListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | | No | +| limit | integer | | No | +| pinned | | | No | + +#### ConversationMessageDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| first_message | | | No | +| from_account_id | | | No | +| from_end_user_id | | | No | +| from_source | string | | Yes | +| id | string | | Yes | +| model_config | | | No | +| status | string | | Yes | + +#### ConversationPagination + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| has_next | boolean | | Yes | +| items | [ [Conversation](#conversation) ] | | Yes | +| page | integer | | Yes | +| per_page | integer | | Yes | +| total | integer | | Yes | + +#### ConversationRenamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| auto_generate | boolean | | No | +| name | | | No | + +#### ConversationVariable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| id | string | | No | +| name | string | | No | +| value | object | | No | +| value_type | string | | No | + +#### ConversationVariableResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| description | | | No | +| id | string | | Yes | +| name | string | | Yes | +| updated_at | | | No | +| value | | | No | +| value_type | string | | Yes | + +#### ConversationVariableUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_variables | [ object ] | Conversation variables for the draft workflow | Yes | + +#### ConversationVariablesQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | Conversation ID to filter variables | Yes | + +#### ConversationWithSummary + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| admin_feedback_stats | | | No | +| annotated | boolean | | Yes | +| created_at | | | No | +| from_account_id | | | No | +| from_account_name | | | No | +| from_end_user_id | | | No | +| from_end_user_session_id | | | No | +| from_source | string | | Yes | +| id | string | | Yes | +| message_count | integer | | Yes | +| model_config | | | No | +| name | string | | Yes | +| read_at | | | No | +| status | string | | Yes | +| status_count | | | No | +| summary_or_query | string | | Yes | +| updated_at | | | No | +| user_feedback_stats | | | No | + +#### ConversationWithSummaryPagination + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| has_next | boolean | | Yes | +| items | [ [ConversationWithSummary](#conversationwithsummary) ] | | Yes | +| page | integer | | Yes | +| per_page | integer | | Yes | +| total | integer | | Yes | + +#### ConvertToWorkflowPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| name | | | No | + +#### CopyAppPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | Description for the copied app | No | +| icon | | Icon | No | +| icon_background | | Icon background color | No | +| icon_type | | Icon type | No | +| name | | Name for the copied app | No | + +#### CreateAnnotationPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| annotation_reply | | Annotation reply data | No | +| answer | | Answer text | No | +| content | | Content text | No | +| message_id | | Message ID | No | +| question | | Question text | No | + +#### CreateAppPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | App description (max 400 chars) | No | +| icon | | Icon | No | +| icon_background | | Icon background color | No | +| icon_type | | Icon type | No | +| mode | string | App mode
*Enum:* `"advanced-chat"`, `"agent-chat"`, `"chat"`, `"completion"`, `"workflow"` | Yes | +| name | string | App name | Yes | + +#### CredentialType + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| CredentialType | string | | | + +#### DataSource + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| info_list | [InfoList](#infolist) | | Yes | + +#### DataSourceIntegrate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| disabled | boolean | | No | +| id | string | | No | +| is_bound | boolean | | No | +| link | string | | No | +| provider | string | | No | +| source_info | [DataSourceIntegrateWorkspace](#datasourceintegrateworkspace) | | No | + +#### DataSourceIntegrateIcon + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| emoji | string | | No | +| type | string | | No | +| url | string | | No | + +#### DataSourceIntegrateList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [DataSourceIntegrate](#datasourceintegrate) ] | | No | + +#### DataSourceIntegratePage + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| page_icon | [DataSourceIntegrateIcon](#datasourceintegrateicon) | | No | +| page_id | string | | No | +| page_name | string | | No | +| parent_id | string | | No | +| type | string | | No | + +#### DataSourceIntegrateWorkspace + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| pages | [ [DataSourceIntegratePage](#datasourceintegratepage) ] | | No | +| total | integer | | No | +| workspace_icon | string | | No | +| workspace_id | string | | No | +| workspace_name | string | | No | + +#### DatasetAndDocumentResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| batch | string | | Yes | +| dataset | [DatasetResponse](#datasetresponse) | | Yes | +| documents | [ [DocumentResponse](#documentresponse) ] | | Yes | + +#### DatasetBase + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| created_by | string | | No | +| data_source_type | string | | No | +| description | string | | No | +| id | string | | No | +| indexing_technique | string | | No | +| name | string | | No | +| permission | string | | No | + +#### DatasetContent + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | No | +| content_type | string | | No | +| file_info | [DatasetFileInfo](#datasetfileinfo) | | No | + +#### DatasetCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| external_knowledge_api_id | | | No | +| external_knowledge_id | | | No | +| indexing_technique | | | No | +| name | string | | Yes | +| permission | | | No | +| provider | string | | No | + +#### DatasetDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_count | integer | | No | +| author_name | string | | No | +| built_in_field_enabled | boolean | | No | +| chunk_structure | string | | No | +| created_at | object | | No | +| created_by | string | | No | +| data_source_type | string | | No | +| description | string | | No | +| doc_form | string | | No | +| doc_metadata | [ [DatasetDocMetadata](#datasetdocmetadata) ] | | No | +| document_count | integer | | No | +| embedding_available | boolean | | No | +| embedding_model | string | | No | +| embedding_model_provider | string | | No | +| enable_api | boolean | | No | +| external_knowledge_info | [ExternalKnowledgeInfo](#externalknowledgeinfo) | | No | +| external_retrieval_model | [ExternalRetrievalModel](#externalretrievalmodel) | | No | +| icon_info | [DatasetIconInfo](#dataseticoninfo) | | No | +| id | string | | No | +| indexing_technique | string | | No | +| is_multimodal | boolean | | No | +| is_published | boolean | | No | +| name | string | | No | +| permission | string | | No | +| pipeline_id | string | | No | +| provider | string | | No | +| retrieval_model_dict | [DatasetRetrievalModel](#datasetretrievalmodel) | | No | +| runtime_mode | string | | No | +| summary_index_setting | [_AnonymousInlineModel_b1954337d565](#_anonymousinlinemodel_b1954337d565) | | No | +| tags | [ [Tag](#tag) ] | | No | +| total_available_documents | integer | | No | +| total_documents | integer | | No | +| updated_at | object | | No | +| updated_by | string | | No | +| word_count | integer | | No | + +#### DatasetDocMetadata + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | No | +| name | string | | No | +| type | string | | No | + +#### DatasetFileInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| extension | string | | No | +| id | string | | No | +| mime_type | string | | No | +| name | string | | No | +| size | integer | | No | +| source_url | string | | No | + +#### DatasetIconInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| icon | string | | No | +| icon_background | string | | No | +| icon_type | string | | No | +| icon_url | string | | No | + +#### DatasetKeywordSetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword_weight | number | | No | + +#### DatasetPermissionEnum + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| DatasetPermissionEnum | string | | | + +#### DatasetQueryDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| created_by | string | | No | +| created_by_role | string | | No | +| id | string | | No | +| queries | [DatasetContent](#datasetcontent) | | No | +| source | string | | No | +| source_app_id | string | | No | + +#### DatasetRerankingModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| reranking_model_name | string | | No | +| reranking_provider_name | string | | No | + +#### DatasetResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by | | | No | +| data_source_type | | | No | +| description | | | No | +| id | string | | Yes | +| indexing_technique | | | No | +| name | string | | Yes | +| permission | | | No | + +#### DatasetRetrievalModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| reranking_enable | boolean | | No | +| reranking_mode | string | | No | +| reranking_model | [DatasetRerankingModel](#datasetrerankingmodel) | | No | +| score_threshold | number | | No | +| score_threshold_enabled | boolean | | No | +| search_method | string | | No | +| top_k | integer | | No | +| weights | [DatasetWeightedScore](#datasetweightedscore) | | No | + +#### DatasetUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | | No | +| embedding_model | | | No | +| embedding_model_provider | | | No | +| external_knowledge_api_id | | | No | +| external_knowledge_id | | | No | +| external_retrieval_model | | | No | +| icon_info | | | No | +| indexing_technique | | | No | +| is_multimodal | | | No | +| name | | | No | +| partial_member_list | | | No | +| permission | | | No | +| retrieval_model | | | No | +| summary_index_setting | | | No | + +#### DatasetVectorSetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| embedding_model_name | string | | No | +| embedding_provider_name | string | | No | +| vector_weight | number | | No | + +#### DatasetWeightedScore + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword_setting | [DatasetKeywordSetting](#datasetkeywordsetting) | | No | +| vector_setting | [DatasetVectorSetting](#datasetvectorsetting) | | No | +| weight_type | string | | No | + +#### DatasourceCredentialDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | + +#### DatasourceCredentialPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| name | | | No | + +#### DatasourceCredentialUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| credentials | | | No | +| name | | | No | + +#### DatasourceCustomClientPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| client_params | | | No | +| enable_oauth_custom_client | | | No | + +#### DatasourceDefaultPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | + +#### DatasourceNodeRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | | | No | +| datasource_type | string | | Yes | +| inputs | object | | Yes | + +#### DatasourceUpdateNamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| name | string | | Yes | + +#### DatasourceVariablesPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| datasource_info | object | | Yes | +| datasource_type | string | | Yes | +| start_node_id | string | | Yes | +| start_node_title | string | | Yes | + +#### DebugPermission + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| DebugPermission | string | | | + +#### DefaultBlockConfigQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| q | | | No | + +#### DeletedTool + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| provider_id | string | | Yes | +| tool_name | string | | Yes | +| type | string | | Yes | + +#### DocumentBatchDownloadZipPayload + +Request payload for bulk downloading documents as a zip archive. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| document_ids | [ string (uuid) ] | | Yes | + +#### DocumentMetadataOperation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| document_id | string | | Yes | +| metadata_list | [ [MetadataDetail](#metadatadetail) ] | | Yes | +| partial_update | boolean | | No | + +#### DocumentMetadataResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | +| name | string | | Yes | +| type | string | | Yes | +| value | | | No | + +#### DocumentMetadataUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| doc_metadata | | | No | +| doc_type | | | No | + +#### DocumentRenamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | + +#### DocumentResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| archived | | | No | +| created_at | | | No | +| created_by | | | No | +| created_from | | | No | +| data_source_detail_dict | | | No | +| data_source_info_dict | | | No | +| data_source_type | | | No | +| dataset_process_rule_id | | | No | +| disabled_at | | | No | +| disabled_by | | | No | +| display_status | | | No | +| doc_form | | | No | +| doc_metadata_details | [ [DocumentMetadataResponse](#documentmetadataresponse) ] | | No | +| enabled | | | No | +| error | | | No | +| hit_count | | | No | +| id | string | | Yes | +| indexing_status | | | No | +| name | string | | Yes | +| need_summary | | | No | +| position | | | No | +| summary_index_status | | | No | +| tokens | | | No | +| word_count | | | No | + +#### DocumentRetryPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| document_ids | [ string ] | | Yes | + +#### DocumentWithSegmentsResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| archived | | | No | +| completed_segments | | | No | +| created_at | | | No | +| created_by | | | No | +| created_from | | | No | +| data_source_detail_dict | | | No | +| data_source_info_dict | | | No | +| data_source_type | | | No | +| dataset_process_rule_id | | | No | +| disabled_at | | | No | +| disabled_by | | | No | +| display_status | | | No | +| doc_form | | | No | +| doc_metadata_details | [ [DocumentMetadataResponse](#documentmetadataresponse) ] | | No | +| enabled | | | No | +| error | | | No | +| hit_count | | | No | +| id | string | | Yes | +| indexing_status | | | No | +| name | string | | Yes | +| need_summary | | | No | +| position | | | No | +| process_rule_dict | | | No | +| summary_index_status | | | No | +| tokens | | | No | +| total_segments | | | No | +| word_count | | | No | + +#### DraftWorkflowNodeRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | +| query | string | | No | + +#### DraftWorkflowRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| datasource_info_list | [ object ] | | Yes | +| datasource_type | string | | Yes | +| inputs | object | | Yes | +| start_node_id | string | | Yes | + +#### DraftWorkflowSyncPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_variables | | | No | +| environment_variables | | | No | +| features | | | No | +| graph | object | | Yes | +| hash | | | No | +| rag_pipeline_variables | | | No | + +#### DraftWorkflowTriggerRunAllPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| node_ids | [ string ] | | Yes | + +#### DraftWorkflowTriggerRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| node_id | string | | Yes | + +#### DraftWorkflowTriggerRunRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| node_id | string | Node ID | Yes | + +#### EducationActivatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| institution | string | | Yes | +| role | string | | Yes | +| token | string | | Yes | + +#### EducationAutocompleteQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keywords | string | | Yes | +| limit | integer | | No | +| page | integer | | No | + +#### EducationAutocompleteResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| curr_page | | | No | +| data | [ string ] | | No | +| has_next | | | No | + +#### EducationStatusResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| allow_refresh | | | No | +| expire_at | | | No | +| is_student | | | No | +| result | | | No | + +#### EducationVerifyResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| token | | | No | + +#### EmailCodeLoginPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| email | string | | Yes | +| language | | | No | +| token | string | | Yes | + +#### EmailPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| language | | | No | + +#### EmailRegisterResetPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_password | string | | Yes | +| password_confirm | string | | Yes | +| token | string | | Yes | + +#### EmailRegisterSendPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | Email address | Yes | +| language | | Language code | No | + +#### EmailRegisterValidityPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| email | string | | Yes | +| token | string | | Yes | + +#### EndpointCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | +| plugin_unique_identifier | string | | Yes | +| settings | object | | Yes | + +#### EndpointCreateResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| success | boolean | Operation success | Yes | + +#### EndpointDeleteResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| success | boolean | Operation success | Yes | + +#### EndpointDisableResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| success | boolean | Operation success | Yes | + +#### EndpointEnableResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| success | boolean | Operation success | Yes | + +#### EndpointIdPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| endpoint_id | string | | Yes | + +#### EndpointListForPluginQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| page | integer | | Yes | +| page_size | integer | | Yes | +| plugin_id | string | | Yes | + +#### EndpointListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| page | integer | | Yes | +| page_size | integer | | Yes | + +#### EndpointListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| endpoints | [ object ] | Endpoint information | Yes | + +#### EndpointUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | +| settings | object | | Yes | + +#### EndpointUpdateResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| success | boolean | Operation success | Yes | + +#### EnvironmentVariableUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| environment_variables | [ object ] | Environment variables for the draft workflow | Yes | + +#### ExecutionContentType + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ExecutionContentType | string | | | + +#### ExternalApiTemplateListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword | | Search keyword | No | +| limit | integer | Number of items per page | No | +| page | integer | Page number | No | + +#### ExternalDatasetCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | | No | +| external_knowledge_api_id | string | | Yes | +| external_knowledge_id | string | | Yes | +| external_retrieval_model | | | No | +| name | string | | Yes | + +#### ExternalHitTestingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| external_retrieval_model | | | No | +| metadata_filtering_conditions | | | No | +| query | string | | Yes | + +#### ExternalKnowledgeApiPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | +| settings | object | | Yes | + +#### ExternalKnowledgeInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| external_knowledge_api_endpoint | string | | No | +| external_knowledge_api_id | string | | No | +| external_knowledge_api_name | string | | No | +| external_knowledge_id | string | | No | + +#### ExternalRetrievalModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| score_threshold | number | | No | +| score_threshold_enabled | boolean | | No | +| top_k | integer | | No | + +#### FeatureResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| features | object | Feature configuration object | No | + +#### Feedback + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| from_account | | | No | +| from_end_user_id | | | No | +| from_source | string | | Yes | +| rating | string | | Yes | + +#### FeedbackExportQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| end_date | | End date (YYYY-MM-DD) | No | +| format | string | Export format
*Enum:* `"csv"`, `"json"` | No | +| from_source | | Filter by feedback source | No | +| has_comment | | Only include feedback with comments | No | +| rating | | Filter by rating | No | +| start_date | | Start date (YYYY-MM-DD) | No | + +#### FeedbackStat + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| dislike | integer | | Yes | +| like | integer | | Yes | + +#### FileInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| file_ids | [ string ] | | Yes | + +#### FileResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | | No | +| created_at | | | No | +| created_by | | | No | +| extension | | | No | +| file_key | | | No | +| id | string | | Yes | +| mime_type | | | No | +| name | string | | Yes | +| original_url | | | No | +| preview_url | | | No | +| size | integer | | Yes | +| source_url | | | No | +| tenant_id | | | No | +| user_id | | | No | + +#### ForgotPasswordCheckPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| email | string | | Yes | +| token | string | | Yes | + +#### ForgotPasswordCheckResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | Email address | Yes | +| is_valid | boolean | Whether code is valid | Yes | +| token | string | New reset token | Yes | + +#### ForgotPasswordEmailResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | | Error code if account not found | No | +| data | | Reset token | No | +| result | string | Operation result | Yes | + +#### ForgotPasswordResetPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_password | string | | Yes | +| password_confirm | string | | Yes | +| token | string | | Yes | + +#### ForgotPasswordResetResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Operation result | Yes | + +#### ForgotPasswordSendPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| language | | | No | + +#### FormInput + +Form input definition. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| default | | | No | +| output_variable_name | string | | Yes | +| type | [FormInputType](#forminputtype) | | Yes | + +#### FormInputDefault + +Default configuration for form inputs. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| selector | [ string ] | | No | +| type | [PlaceholderType](#placeholdertype) | | Yes | +| value | string | | No | + +#### FormInputType + +Form input types. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| FormInputType | string | Form input types. | | + +#### GenerateSummaryPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| document_list | [ string ] | | Yes | + +#### Github + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| github_plugin_unique_identifier | string | | Yes | +| package | string | | Yes | +| repo | string | | Yes | +| version | string | | Yes | + +#### HitTestingChildChunk + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| id | | | No | +| position | | | No | +| score | | | No | + +#### HitTestingDocument + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data_source_type | | | No | +| doc_metadata | | | No | +| doc_type | | | No | +| id | | | No | +| name | | | No | + +#### HitTestingFile + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| extension | | | No | +| id | | | No | +| mime_type | | | No | +| name | | | No | +| size | | | No | +| source_url | | | No | + +#### HitTestingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| attachment_ids | | | No | +| external_retrieval_model | | | No | +| query | string | | Yes | +| retrieval_model | | | No | + +#### HitTestingRecord + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| child_chunks | [ [HitTestingChildChunk](#hittestingchildchunk) ] | | No | +| files | [ [HitTestingFile](#hittestingfile) ] | | No | +| score | | | No | +| segment | | | No | +| summary | | | No | +| tsne_position | | | No | + +#### HitTestingResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| query | string | | Yes | +| records | [ [HitTestingRecord](#hittestingrecord) ] | | No | + +#### HitTestingSegment + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| answer | | | No | +| completed_at | | | No | +| content | | | No | +| created_at | | | No | +| created_by | | | No | +| disabled_at | | | No | +| disabled_by | | | No | +| document | | | No | +| document_id | | | No | +| enabled | | | No | +| error | | | No | +| hit_count | | | No | +| id | | | No | +| index_node_hash | | | No | +| index_node_id | | | No | +| indexing_at | | | No | +| keywords | [ string ] | | No | +| position | | | No | +| sign_content | | | No | +| status | | | No | +| stopped_at | | | No | +| tokens | | | No | +| word_count | | | No | + +#### HumanInputContent + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| form_definition | | | No | +| form_submission_data | | | No | +| submitted | boolean | | Yes | +| type | [ExecutionContentType](#executioncontenttype) | | No | +| workflow_run_id | string | | Yes | + +#### HumanInputDeliveryTestPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| delivery_method_id | string | Delivery method ID | Yes | +| inputs | object | Values used to fill missing upstream variables referenced in form_content | No | + +#### HumanInputFormDefinition + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| actions | [ [UserAction](#useraction) ] | | No | +| display_in_ui | boolean | | No | +| expiration_time | integer | | Yes | +| form_content | string | | Yes | +| form_id | string | | Yes | +| form_token | | | No | +| inputs | [ [FormInput](#forminput) ] | | No | +| node_id | string | | Yes | +| node_title | string | | Yes | +| resolved_default_values | object | | No | + +#### HumanInputFormPreviewPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| inputs | object | Values used to fill missing upstream variables referenced in form_content | No | + +#### HumanInputFormSubmissionData + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| action_id | string | | Yes | +| action_text | string | | Yes | +| node_id | string | | Yes | +| node_title | string | | Yes | +| rendered_content | string | | Yes | + +#### HumanInputFormSubmitPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| action | string | Selected action ID | Yes | +| form_inputs | object | Values the user provides for the form's own fields | Yes | +| inputs | object | Values used to fill missing upstream variables referenced in form_content | Yes | + +#### IconType + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| IconType | string | | | + +#### Import + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_id | | | No | +| app_mode | | | No | +| current_dsl_version | string | | No | +| error | string | | No | +| id | string | | Yes | +| imported_dsl_version | string | | No | +| status | [ImportStatus](#importstatus) | | Yes | + +#### ImportStatus + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ImportStatus | string | | | + +#### IncludeSecretQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| include_secret | string | | No | + +#### IndexingEstimatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| dataset_id | | | No | +| doc_form | string | | No | +| doc_language | string | | No | +| indexing_technique | string | | Yes | +| info_list | object | | Yes | +| process_rule | object | | Yes | + +#### InfoList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data_source_type | string | *Enum:* `"notion_import"`, `"upload_file"`, `"website_crawl"` | Yes | +| file_info_list | | | No | +| notion_info_list | | | No | +| website_info_list | | | No | + +#### Inner + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model | | | No | +| model_type | [ModelType](#modeltype) | | Yes | +| provider | | | No | + +#### InsertExploreAppPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_id | string | | Yes | +| can_trial | boolean | | No | +| category | string | | Yes | +| copyright | | | No | +| custom_disclaimer | | | No | +| desc | | | No | +| language | string | | Yes | +| position | integer | | Yes | +| privacy_policy | | | No | +| trial_limit | integer | | No | + +#### InsertExploreBannerPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| category | string | | Yes | +| description | string | | Yes | +| img-src | string | | Yes | +| language | string | | No | +| link | string | | Yes | +| sort | integer | | Yes | +| title | string | | Yes | + +#### InstallPermission + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| InstallPermission | string | | | + +#### InstalledAppCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_id | string | | Yes | + +#### InstalledAppInfoResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| id | string | | Yes | +| mode | | | No | +| name | | | No | +| use_icon_as_answer_icon | | | No | + +#### InstalledAppListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| installed_apps | [ [InstalledAppResponse](#installedappresponse) ] | | Yes | + +#### InstalledAppResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app | [InstalledAppInfoResponse](#installedappinforesponse) | | Yes | +| app_owner_tenant_id | string | | Yes | +| editable | boolean | | Yes | +| id | string | | Yes | +| is_pinned | boolean | | Yes | +| last_used_at | | | No | +| uninstallable | boolean | | Yes | + +#### InstalledAppUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| is_pinned | | | No | + +#### InstalledAppsListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_id | | App ID to filter by | No | + +#### InstructionGeneratePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| current | string | Current instruction text | No | +| flow_id | string | Workflow/Flow ID | Yes | +| ideal_output | string | Expected ideal output | No | +| instruction | string | Instruction for generation | Yes | +| language | string | Programming language (javascript/python) | No | +| model_config | [ModelConfig](#modelconfig) | Model configuration | Yes | +| node_id | string | Node ID for workflow context | No | + +#### InstructionTemplatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| type | string | Instruction template type | Yes | + +#### IterationNodeRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| inputs | | | No | + +#### JSONValue + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| JSONValue | | | | + +#### KnowledgeConfig + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data_source | | | No | +| doc_form | string | | No | +| doc_language | string | | No | +| duplicate | boolean | | No | +| embedding_model | | | No | +| embedding_model_provider | | | No | +| indexing_technique | string | *Enum:* `"economy"`, `"high_quality"` | Yes | +| is_multimodal | boolean | | No | +| name | | | No | +| original_document_id | | | No | +| process_rule | | | No | +| retrieval_model | | | No | +| summary_index_setting | | | No | + +#### LLMMode + +Enum class for large language model mode. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| LLMMode | string | Enum class for large language model mode. | | + +#### LangContentPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| body | string | | Yes | +| lang | string | Language tag: 'zh' \| 'en' \| 'jp' | Yes | +| subtitle | | | No | +| title | string | | Yes | +| title_pic_url | | | No | + +#### LegacyEndpointUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| endpoint_id | string | | Yes | +| name | string | | Yes | +| settings | object | | Yes | + +#### LoadBalancingCredentialPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### LoadBalancingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| configs | | | No | +| enabled | | | No | + +#### LoginPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| invite_token | | Invitation token | No | +| password | string | | Yes | +| remember_me | boolean | Remember me flag | No | + +#### LoopNodeRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| inputs | | | No | + +#### MCPAuthPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| authorization_code | | | No | +| provider_id | string | | Yes | + +#### MCPProviderCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| authentication | | | No | +| configuration | | | No | +| headers | | | No | +| icon | string | | Yes | +| icon_background | string | | No | +| icon_type | string | | Yes | +| name | string | | Yes | +| server_identifier | string | | Yes | +| server_url | string | | Yes | + +#### MCPProviderDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| provider_id | string | | Yes | + +#### MCPProviderUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| authentication | | | No | +| configuration | | | No | +| headers | | | No | +| icon | string | | Yes | +| icon_background | string | | No | +| icon_type | string | | Yes | +| name | string | | Yes | +| provider_id | string | | Yes | +| server_identifier | string | | Yes | +| server_url | string | | Yes | + +#### MCPServerCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | Server description | No | +| parameters | object | Server parameters configuration | Yes | + +#### MCPServerUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | Server description | No | +| id | string | Server ID | Yes | +| parameters | object | Server parameters configuration | Yes | +| status | | Server status | No | + +#### Marketplace + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| marketplace_plugin_unique_identifier | string | | Yes | +| version | | | No | + +#### MemberInvitePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| emails | [ string ] | | No | +| language | | | No | +| role | [TenantAccountRole](#tenantaccountrole) | | Yes | + +#### MemberRoleUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| role | string | | Yes | + +#### MessageDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| agent_thoughts | [ [AgentThought](#agentthought) ] | | Yes | +| annotation | | | No | +| annotation_hit_history | | | No | +| answer_tokens | integer | | Yes | +| conversation_id | string | | Yes | +| created_at | | | No | +| error | | | No | +| feedbacks | [ [Feedback](#feedback) ] | | Yes | +| from_account_id | | | No | +| from_end_user_id | | | No | +| from_source | string | | Yes | +| id | string | | Yes | +| inputs | object | | Yes | +| message | [JSONValue](#jsonvalue) | | Yes | +| message_files | [ [MessageFile](#messagefile) ] | | Yes | +| message_metadata_dict | [JSONValue](#jsonvalue) | | Yes | +| message_tokens | integer | | Yes | +| parent_message_id | | | No | +| provider_response_latency | number | | Yes | +| query | string | | Yes | +| re_sign_file_url_answer | string | | Yes | +| status | string | | Yes | +| workflow_run_id | | | No | + +#### MessageDetailResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| agent_thoughts | [ [AgentThought](#agentthought) ] | | No | +| annotation | | | No | +| annotation_hit_history | | | No | +| answer_tokens | | | No | +| conversation_id | string | | Yes | +| created_at | | | No | +| error | | | No | +| extra_contents | [ [HumanInputContent](#humaninputcontent) ] | | No | +| feedbacks | [ [Feedback](#feedback) ] | | No | +| from_account_id | | | No | +| from_end_user_id | | | No | +| from_source | string | | Yes | +| id | string | | Yes | +| inputs | object | | Yes | +| message | | | No | +| message_files | [ [MessageFile](#messagefile) ] | | No | +| message_metadata_dict | | | No | +| message_tokens | | | No | +| parent_message_id | | | No | +| provider_response_latency | | | No | +| query | string | | Yes | +| re_sign_file_url_answer | string | | Yes | +| status | string | | Yes | +| workflow_run_id | | | No | + +#### MessageFeedbackPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| message_id | string | Message ID | Yes | +| rating | | | No | + +#### MessageFile + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| belongs_to | | | No | +| filename | string | | Yes | +| id | string | | Yes | +| mime_type | | | No | +| size | | | No | +| transfer_method | string | | Yes | +| type | string | | Yes | +| upload_file_id | | | No | +| url | | | No | + +#### MessageInfiniteScrollPaginationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [MessageDetailResponse](#messagedetailresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | + +#### MessageListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | Conversation UUID | Yes | +| first_id | | First message ID for pagination | No | +| limit | integer | Number of messages to return (1-100) | No | + +#### MetadataArgs + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | +| type | string | *Enum:* `"number"`, `"string"`, `"time"` | Yes | + +#### MetadataDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | +| name | string | | Yes | +| value | | | No | + +#### MetadataFilteringCondition + +Metadata Filtering Condition. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conditions | | | No | +| logical_operator | | | No | + +#### MetadataOperationData + +Metadata operation data + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| operation_data | [ [DocumentMetadataOperation](#documentmetadataoperation) ] | | Yes | + +#### MetadataUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | + +#### ModelConfig + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| completion_params | object | | No | +| mode | [LLMMode](#llmmode) | | Yes | +| name | string | | Yes | +| provider | string | | Yes | + +#### ModelConfigPartial + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by | | | No | +| model_dict | | | No | +| pre_prompt | | | No | +| updated_at | | | No | +| updated_by | | | No | + +#### ModelConfigRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| agent_mode | | Agent mode configuration | No | +| configs | | Model configuration parameters | No | +| dataset_configs | | Dataset configurations | No | +| model | | Model name | No | +| more_like_this | | More like this configuration | No | +| opening_statement | | Opening statement | No | +| provider | | Model provider | No | +| retrieval_model | | Retrieval model configuration | No | +| speech_to_text | | Speech to text configuration | No | +| suggested_questions | | Suggested questions | No | +| text_to_speech | | Text to speech configuration | No | +| tools | | Available tools | No | + +#### ModelType + +Enum class for model type. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ModelType | string | Enum class for model type. | | + +#### MoreLikeThisQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| response_mode | string | *Enum:* `"blocking"`, `"streaming"` | Yes | + +#### NodeIdQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| node_id | string | | Yes | + +#### NodeRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| inputs | | | No | + +#### NodeRunRequiredPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| inputs | object | | Yes | + +#### NotionEstimatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| doc_form | string | | No | +| doc_language | string | | No | +| notion_info_list | [ object ] | | Yes | +| process_rule | object | | Yes | + +#### NotionIcon + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| emoji | | | No | +| type | string | | Yes | +| url | | | No | + +#### NotionInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| pages | [ [NotionPage](#notionpage) ] | | Yes | +| workspace_id | string | | Yes | + +#### NotionIntegrateInfoList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| notion_info | [ [NotionIntegrateWorkspace](#notionintegrateworkspace) ] | | No | + +#### NotionIntegratePage + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| is_bound | boolean | | No | +| page_icon | [DataSourceIntegrateIcon](#datasourceintegrateicon) | | No | +| page_id | string | | No | +| page_name | string | | No | +| parent_id | string | | No | +| type | string | | No | + +#### NotionIntegrateWorkspace + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| pages | [ [NotionIntegratePage](#notionintegratepage) ] | | No | +| workspace_icon | string | | No | +| workspace_id | string | | No | +| workspace_name | string | | No | + +#### NotionPage + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| page_icon | | | No | +| page_id | string | | Yes | +| page_name | string | | Yes | +| type | string | | Yes | + +#### OAuthDataSourceBindingResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Operation result | Yes | + +#### OAuthDataSourceResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | string | Authorization URL or 'internal' for internal setup | Yes | + +#### OAuthDataSourceSyncResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Operation result | Yes | + +#### OwnerTransferCheckPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| token | string | | Yes | + +#### OwnerTransferEmailPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| language | | | No | + +#### OwnerTransferPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| token | string | | Yes | + +#### Package + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| plugin_unique_identifier | string | | Yes | +| version | | | No | + +#### PaginatedConversationVariableResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [ConversationVariableResponse](#conversationvariableresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### Parser + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | | | No | +| datasource_type | string | | Yes | +| inputs | object | | Yes | + +#### ParserAsset + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| file_name | string | | Yes | +| plugin_unique_identifier | string | | Yes | + +#### ParserCreateCredential + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | +| name | | | No | + +#### ParserCredentialCreate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| name | | | No | + +#### ParserCredentialDelete + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | + +#### ParserCredentialId + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | | | No | + +#### ParserCredentialSwitch + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | + +#### ParserCredentialUpdate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| credentials | object | | Yes | +| name | | | No | + +#### ParserCredentialValidate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | + +#### ParserDeleteCredential + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### ParserDeleteModels + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### ParserDynamicOptions + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| action | string | | Yes | +| credential_id | | | No | +| parameter | string | | Yes | +| plugin_id | string | | Yes | +| provider | string | | Yes | +| provider_type | string | *Enum:* `"tool"`, `"trigger"` | Yes | + +#### ParserDynamicOptionsWithCredentials + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| action | string | | Yes | +| credential_id | string | | Yes | +| credentials | object | | Yes | +| parameter | string | | Yes | +| plugin_id | string | | Yes | +| provider | string | | Yes | + +#### ParserEnable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enable_trigger | boolean | | Yes | +| trigger_id | string | | Yes | + +#### ParserExcludePlugin + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| plugin_id | string | | Yes | + +#### ParserGetCredentials + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| config_from | | | No | +| credential_id | | | No | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### ParserGetDefault + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model_type | [ModelType](#modeltype) | | Yes | + +#### ParserGithubInstall + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| package | string | | Yes | +| plugin_unique_identifier | string | | Yes | +| repo | string | | Yes | +| version | string | | Yes | + +#### ParserGithubUpgrade + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_plugin_unique_identifier | string | | Yes | +| original_plugin_unique_identifier | string | | Yes | +| package | string | | Yes | +| repo | string | | Yes | +| version | string | | Yes | + +#### ParserGithubUpload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| package | string | | Yes | +| repo | string | | Yes | +| version | string | | Yes | + +#### ParserIcon + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| filename | string | | Yes | +| tenant_id | string | | Yes | + +#### ParserLatest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| plugin_ids | [ string ] | | Yes | + +#### ParserList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| page | integer | Page number | No | +| page_size | integer | Page size (1-256) | No | + +#### ParserMarketplaceUpgrade + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_plugin_unique_identifier | string | | Yes | +| original_plugin_unique_identifier | string | | Yes | + +#### ParserModelList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model_type | | | No | + +#### ParserParameter + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model | string | | Yes | + +#### ParserPermissionChange + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| debug_permission | [DebugPermission](#debugpermission) | | Yes | +| install_permission | [InstallPermission](#installpermission) | | Yes | + +#### ParserPluginIdentifierQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| plugin_unique_identifier | string | | Yes | + +#### ParserPluginIdentifiers + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| plugin_unique_identifiers | [ string ] | | Yes | + +#### ParserPostDefault + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model_settings | [ [Inner](#inner) ] | | Yes | + +#### ParserPostModels + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| config_from | | | No | +| credential_id | | | No | +| load_balancing | | | No | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### ParserPreferencesChange + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| auto_upgrade | [PluginAutoUpgradeSettingsPayload](#pluginautoupgradesettingspayload) | | Yes | +| permission | [PluginPermissionSettingsPayload](#pluginpermissionsettingspayload) | | Yes | + +#### ParserPreferredProviderType + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| preferred_provider_type | string | *Enum:* `"custom"`, `"system"` | Yes | + +#### ParserReadme + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| language | string | | No | +| plugin_unique_identifier | string | | Yes | + +#### ParserSwitch + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### ParserTasks + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| page | integer | Page number | No | +| page_size | integer | Page size (1-256) | No | + +#### ParserUninstall + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| plugin_installation_id | string | | Yes | + +#### ParserUpdateCredential + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | string | | Yes | +| credentials | object | | Yes | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | +| name | | | No | + +#### ParserValidate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | +| model | string | | Yes | +| model_type | [ModelType](#modeltype) | | Yes | + +#### PartnerTenantsPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| click_id | string | Click Id from partner referral link | Yes | + +#### Payload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| icon_info | | | No | +| name | string | | Yes | + +#### PipelineVariable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| allow_file_extension | [ string ] | | No | +| allow_file_upload_methods | [ string ] | | No | +| allowed_file_types | [ string ] | | No | +| belong_to_node_id | string | | No | +| default_value | object | | No | +| label | string | | No | +| max_length | integer | | No | +| options | [ string ] | | No | +| placeholder | string | | No | +| required | boolean | | No | +| tooltips | string | | No | +| type | string | | No | +| unit | string | | No | +| variable | string | | No | + +#### PlaceholderType + +Default value types for form inputs. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| PlaceholderType | string | Default value types for form inputs. | | + +#### PluginAutoUpgradeSettingsPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| exclude_plugins | [ string ] | | No | +| include_plugins | [ string ] | | No | +| strategy_setting | [StrategySetting](#strategysetting) | | No | +| upgrade_mode | [UpgradeMode](#upgrademode) | | No | +| upgrade_time_of_day | integer | | No | + +#### PluginDependency + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| current_identifier | | | No | +| type | [Type](#type) | | Yes | +| value | | | Yes | + +#### PluginEndpointListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| endpoints | [ object ] | Endpoint information | Yes | + +#### PluginPermissionSettingsPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| debug_permission | [DebugPermission](#debugpermission) | | No | +| install_permission | [InstallPermission](#installpermission) | | No | + +#### PreProcessingRule + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enabled | boolean | | Yes | +| id | string | | Yes | + +#### ProcessRule + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| mode | string | *Enum:* `"automatic"`, `"custom"`, `"hierarchical"` | Yes | +| rules | | | No | + +#### PublishWorkflowPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| marked_comment | | | No | +| marked_name | | | No | + +#### PublishedWorkflowRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| datasource_info_list | [ object ] | | Yes | +| datasource_type | string | | Yes | +| inputs | object | | Yes | +| is_preview | boolean | | No | +| original_document_id | | | No | +| response_mode | string | *Enum:* `"blocking"`, `"streaming"` | No | +| start_node_id | string | | Yes | + +#### RagPipelineDatasetImportPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| yaml_content | string | | Yes | + +#### RagPipelineImport + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| current_dsl_version | string | | No | +| dataset_id | string | | No | +| error | string | | No | +| id | string | | No | +| imported_dsl_version | string | | No | +| pipeline_id | string | | No | +| status | string | | No | + +#### RagPipelineImportCheckDependencies + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| leaked_dependencies | [ [RagPipelineLeakedDependency](#ragpipelineleakeddependency) ] | | No | + +#### RagPipelineImportPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | | No | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| mode | string | | Yes | +| name | | | No | +| pipeline_id | | | No | +| yaml_content | | | No | +| yaml_url | | | No | + +#### RagPipelineLeakedDependency + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| current_identifier | string | | No | +| type | string | | No | +| value | object | | No | + +#### RagPipelineRecommendedPluginQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| type | string | | No | + +#### RecommendedAppInfoResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| id | string | | Yes | +| mode | | | No | +| name | | | No | + +#### RecommendedAppListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| categories | [ string ] | | Yes | +| recommended_apps | [ [RecommendedAppResponse](#recommendedappresponse) ] | | Yes | + +#### RecommendedAppResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app | | | No | +| app_id | string | | Yes | +| can_trial | | | No | +| categories | [ string ] | | No | +| copyright | | | No | +| custom_disclaimer | | | No | +| description | | | No | +| is_listed | | | No | +| position | | | No | +| privacy_policy | | | No | + +#### RecommendedAppsQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| language | | | No | + +#### RelatedAppList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [AppDetailKernel](#appdetailkernel) ] | | No | +| total | integer | | No | + +#### RerankingModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| reranking_model_name | | | No | +| reranking_provider_name | | | No | + +#### ResultResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | | Yes | + +#### RetrievalMethod + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| RetrievalMethod | string | | | + +#### RetrievalModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| metadata_filtering_conditions | | | No | +| reranking_enable | boolean | | Yes | +| reranking_mode | | | No | +| reranking_model | | | No | +| score_threshold | | | No | +| score_threshold_enabled | boolean | | Yes | +| search_method | [RetrievalMethod](#retrievalmethod) | | Yes | +| top_k | integer | | Yes | +| weights | | | No | + +#### Rule + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| parent_mode | | | No | +| pre_processing_rules | | | No | +| segmentation | | | No | +| subchunk_segmentation | | | No | + +#### RuleCodeGeneratePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code_language | string | Programming language for code generation | No | +| instruction | string | Rule generation instruction | Yes | +| model_config | [ModelConfig](#modelconfig) | Model configuration | Yes | +| no_variable | boolean | Whether to exclude variables | No | + +#### RuleGeneratePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| instruction | string | Rule generation instruction | Yes | +| model_config | [ModelConfig](#modelconfig) | Model configuration | Yes | +| no_variable | boolean | Whether to exclude variables | No | + +#### RuleStructuredOutputPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| instruction | string | Structured output generation instruction | Yes | +| model_config | [ModelConfig](#modelconfig) | Model configuration | Yes | + +#### SavedMessageCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | string | | Yes | + +#### SavedMessageListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | | No | +| limit | integer | | No | + +#### SegmentCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| answer | | | No | +| attachment_ids | | | No | +| content | string | | Yes | +| keywords | | | No | + +#### SegmentListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enabled | string | | No | +| hit_count_gte | | | No | +| keyword | | | No | +| limit | integer | | No | +| page | integer | | No | +| status | [ string ] | | No | + +#### SegmentUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| answer | | | No | +| attachment_ids | | | No | +| content | string | | Yes | +| keywords | | | No | +| regenerate_child_chunks | boolean | | No | +| summary | | | No | + +#### Segmentation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| chunk_overlap | integer | | No | +| max_tokens | integer | | Yes | +| separator | string | | No | + +#### SimpleAccount + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| id | string | | Yes | +| name | string | | Yes | + +#### SimpleEndUser + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | +| is_anonymous | boolean | | Yes | +| session_id | | | No | +| type | string | | Yes | + +#### SimpleMessageDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| answer | string | | Yes | +| inputs | object | | Yes | +| message | string | | Yes | +| query | string | | Yes | + +#### SimpleModelConfig + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| model_dict | | | No | +| pre_prompt | | | No | + +#### Site + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_base_url | | | No | +| chat_color_theme | | | No | +| chat_color_theme_inverted | | | No | +| code | | | No | +| copyright | | | No | +| created_at | | | No | +| created_by | | | No | +| custom_disclaimer | | | No | +| customize_domain | | | No | +| customize_token_strategy | | | No | +| default_language | | | No | +| description | | | No | +| icon | | | No | +| icon_background | | | No | +| icon_type | | | No | +| privacy_policy | | | No | +| prompt_public | | | No | +| show_workflow_steps | | | No | +| title | | | No | +| updated_at | | | No | +| updated_by | | | No | +| use_icon_as_answer_icon | | | No | + +#### StatisticTimeRangeQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| end | | End date (YYYY-MM-DD HH:MM) | No | +| start | | Start date (YYYY-MM-DD HH:MM) | No | + +#### StatusCount + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| failed | integer | | Yes | +| partial_success | integer | | Yes | +| paused | integer | | Yes | +| success | integer | | Yes | + +#### StrategySetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| StrategySetting | string | | | + +#### SubscriptionQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| interval | string | Billing interval
*Enum:* `"month"`, `"year"` | Yes | +| plan | string | Subscription plan
*Enum:* `"professional"`, `"team"` | Yes | + +#### SuggestedQuestionsResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ string ] | Suggested question | Yes | + +#### SwitchWorkspacePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tenant_id | string | | Yes | + +#### SyncDraftWorkflowPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_variables | [ object ] | | No | +| environment_variables | [ object ] | | No | +| features | object | | Yes | +| graph | object | | Yes | +| hash | | | No | + +#### SyncDraftWorkflowResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| hash | string | | No | +| result | string | | No | +| updated_at | string | | No | + +#### SystemFeatureResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| features | object | System feature configuration object | No | + +#### Tag + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | +| name | string | | Yes | +| type | string | | Yes | + +#### TagBasePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | Tag name | Yes | +| type | [TagType](#tagtype) | | Yes | + +#### TagBindingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tag_ids | [ string ] | Tag IDs to bind | Yes | +| target_id | string | Target ID to bind tags to | Yes | +| type | [TagType](#tagtype) | | Yes | + +#### TagBindingRemovePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tag_ids | [ string ] | Tag IDs to remove | Yes | +| target_id | string | Target ID to unbind tag from | Yes | +| type | [TagType](#tagtype) | | Yes | + +#### TagListQueryParam + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword | | Search keyword | No | +| type | string | Tag type filter
*Enum:* `""`, `"app"`, `"knowledge"` | No | + +#### TagResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| binding_count | | | No | +| id | string | | Yes | +| name | string | | Yes | +| type | | | No | + +#### TagType + +Tag type + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| TagType | string | Tag type | | + +#### TenantAccountRole + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| TenantAccountRole | string | | | + +#### TenantInfoResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| custom_config | | | No | +| id | string | | Yes | +| in_trial | | | No | +| name | | | No | +| next_credit_reset_date | | | No | +| plan | | | No | +| role | | | No | +| status | | | No | +| trial_credits | | | No | +| trial_credits_used | | | No | +| trial_end_reason | | | No | + +#### TextToAudioPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | | Message ID | No | +| streaming | | Enable streaming response | No | +| text | | Text to convert to audio | No | +| voice | | Voice to use for TTS | No | + +#### TextToSpeechPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | | Message ID | No | +| streaming | | Whether to stream audio | No | +| text | string | Text to convert | Yes | +| voice | | Voice name | No | + +#### TextToSpeechRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | | | No | +| streaming | | | No | +| text | | | No | +| voice | | | No | + +#### TextToSpeechVoiceQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| language | string | Language code | Yes | + +#### ToolOAuthCustomClientPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| client_params | | | No | +| enable_oauth_custom_client | | | No | + +#### ToolParameterForm + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| ToolParameterForm | string | | | + +#### TraceConfigPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tracing_config | object | Tracing configuration data | Yes | +| tracing_provider | string | Tracing provider name | Yes | + +#### TraceProviderQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tracing_provider | string | Tracing provider name | Yes | + +#### TrialAppDetailWithSite + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| access_mode | string | | No | +| api_base_url | string | | No | +| created_at | object | | No | +| created_by | string | | No | +| deleted_tools | [ [TrialDeletedTool](#trialdeletedtool) ] | | No | +| description | string | | No | +| enable_api | boolean | | No | +| enable_site | boolean | | No | +| icon | string | | No | +| icon_background | string | | No | +| icon_type | string | | No | +| icon_url | object | | No | +| id | string | | No | +| max_active_requests | integer | | No | +| mode | string | | No | +| model_config | [TrialAppModelConfig](#trialappmodelconfig) | | No | +| name | string | | No | +| site | [TrialSite](#trialsite) | | No | +| tags | [ [TrialTag](#trialtag) ] | | No | +| updated_at | object | | No | +| updated_by | string | | No | +| use_icon_as_answer_icon | boolean | | No | +| workflow | [TrialWorkflowPartial](#trialworkflowpartial) | | No | + +#### TrialAppModelConfig + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| agent_mode | object | | No | +| annotation_reply | object | | No | +| chat_prompt_config | object | | No | +| completion_prompt_config | object | | No | +| created_at | object | | No | +| created_by | string | | No | +| dataset_configs | object | | No | +| dataset_query_variable | string | | No | +| external_data_tools | object | | No | +| file_upload | object | | No | +| model | object | | No | +| more_like_this | object | | No | +| opening_statement | string | | No | +| pre_prompt | string | | No | +| prompt_type | string | | No | +| retriever_resource | object | | No | +| sensitive_word_avoidance | object | | No | +| speech_to_text | object | | No | +| suggested_questions | object | | No | +| suggested_questions_after_answer | object | | No | +| text_to_speech | object | | No | +| updated_at | object | | No | +| updated_by | string | | No | +| user_input_form | object | | No | + +#### TrialConversationVariable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| id | string | | No | +| name | string | | No | +| value | object | | No | +| value_type | string | | No | + +#### TrialDeletedTool + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| provider_id | string | | No | +| tool_name | string | | No | +| type | string | | No | + +#### TrialPipelineVariable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| allow_file_extension | [ string ] | | No | +| allow_file_upload_methods | [ string ] | | No | +| allowed_file_types | [ string ] | | No | +| belong_to_node_id | string | | No | +| default_value | object | | No | +| label | string | | No | +| max_length | integer | | No | +| options | [ string ] | | No | +| placeholder | string | | No | +| required | boolean | | No | +| tooltips | string | | No | +| type | string | | No | +| unit | string | | No | +| variable | string | | No | + +#### TrialSite + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| access_token | string | | No | +| app_base_url | string | | No | +| chat_color_theme | string | | No | +| chat_color_theme_inverted | boolean | | No | +| code | string | | No | +| copyright | string | | No | +| created_at | object | | No | +| created_by | string | | No | +| custom_disclaimer | string | | No | +| customize_domain | string | | No | +| customize_token_strategy | string | | No | +| default_language | string | | No | +| description | string | | No | +| icon | string | | No | +| icon_background | string | | No | +| icon_type | string | | No | +| icon_url | object | | No | +| privacy_policy | string | | No | +| prompt_public | boolean | | No | +| show_workflow_steps | boolean | | No | +| title | string | | No | +| updated_at | object | | No | +| updated_by | string | | No | +| use_icon_as_answer_icon | boolean | | No | + +#### TrialTag + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | No | +| name | string | | No | +| type | string | | No | + +#### TrialWorkflow + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_variables | [ [TrialConversationVariable](#trialconversationvariable) ] | | No | +| created_at | object | | No | +| created_by | [SimpleAccount](#simpleaccount) | | No | +| environment_variables | [ object ] | | No | +| features | object | | No | +| graph | object | | No | +| hash | string | | No | +| id | string | | No | +| marked_comment | string | | No | +| marked_name | string | | No | +| rag_pipeline_variables | [ [TrialPipelineVariable](#trialpipelinevariable) ] | | No | +| tool_published | boolean | | No | +| updated_at | object | | No | +| updated_by | [SimpleAccount](#simpleaccount) | | No | +| version | string | | No | + +#### TrialWorkflowPartial + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| created_by | string | | No | +| id | string | | No | +| updated_at | object | | No | +| updated_by | string | | No | + +#### TriggerOAuthClientPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| client_params | | | No | +| enabled | | | No | + +#### TriggerSubscriptionBuilderCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_type | string | | No | + +#### TriggerSubscriptionBuilderUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | | | No | +| name | | | No | +| parameters | | | No | +| properties | | | No | + +#### TriggerSubscriptionBuilderVerifyPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credentials | object | | Yes | + +#### Type + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| Type | string | | | + +#### UpdateAnnotationPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| annotation_reply | | | No | +| answer | | | No | +| content | | | No | +| question | | | No | + +#### UpdateAppPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | App description (max 400 chars) | No | +| icon | | Icon | No | +| icon_background | | Icon background color | No | +| icon_type | | Icon type | No | +| max_active_requests | | Maximum active requests | No | +| name | string | App name | Yes | +| use_icon_as_answer_icon | | Use icon as answer icon | No | + +#### UpgradeMode + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| UpgradeMode | string | | | + +#### UploadConfig + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| attachment_image_file_size_limit | | | No | +| audio_file_size_limit | integer | | Yes | +| batch_count_limit | integer | | Yes | +| file_size_limit | integer | | Yes | +| file_upload_limit | | | No | +| image_file_batch_limit | integer | | Yes | +| image_file_size_limit | integer | | Yes | +| single_chunk_attachment_limit | integer | | Yes | +| video_file_size_limit | integer | | Yes | +| workflow_file_upload_limit | integer | | Yes | + +#### UpsertNotificationPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| contents | [ [LangContentPayload](#langcontentpayload) ] | | Yes | +| end_time | | RFC3339, e.g. 2026-03-20T23:59:59Z | No | +| frequency | string | 'once' \| 'every_page_load' | No | +| notification_id | | Omit to create; supply UUID to update | No | +| start_time | | RFC3339, e.g. 2026-03-01T00:00:00Z | No | +| status | string | 'active' \| 'inactive' | No | + +#### UserAction + +User action configuration. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| button_style | [ButtonStyle](#buttonstyle) | | No | +| id | string | | Yes | +| title | string | | Yes | + +#### WebhookTriggerResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| id | string | | Yes | +| node_id | string | | Yes | +| webhook_debug_url | string | | Yes | +| webhook_id | string | | Yes | +| webhook_url | string | | Yes | + +#### WebsiteCrawlPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| options | object | | Yes | +| provider | string | *Enum:* `"firecrawl"`, `"jinareader"`, `"watercrawl"` | Yes | +| url | string | | Yes | + +#### WebsiteCrawlStatusQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| provider | string | *Enum:* `"firecrawl"`, `"jinareader"`, `"watercrawl"` | Yes | + +#### WebsiteInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| job_id | string | | Yes | +| only_main_content | boolean | | No | +| provider | string | | Yes | +| urls | [ string ] | | Yes | + +#### WeightKeywordSetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword_weight | number | | Yes | + +#### WeightModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword_setting | | | No | +| vector_setting | | | No | +| weight_type | | | No | + +#### WeightVectorSetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| embedding_model_name | string | | Yes | +| embedding_provider_name | string | | Yes | +| vector_weight | number | | Yes | + +#### Workflow + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_variables | [ [ConversationVariable](#conversationvariable) ] | | No | +| created_at | object | | No | +| created_by | [SimpleAccount](#simpleaccount) | | No | +| environment_variables | [ object ] | | No | +| features | object | | No | +| graph | object | | No | +| hash | string | | No | +| id | string | | No | +| marked_comment | string | | No | +| marked_name | string | | No | +| rag_pipeline_variables | [ [PipelineVariable](#pipelinevariable) ] | | No | +| tool_published | boolean | | No | +| updated_at | object | | No | +| updated_by | [SimpleAccount](#simpleaccount) | | No | +| version | string | | No | + +#### WorkflowAppLogPaginationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [WorkflowAppLogPartialResponse](#workflowapplogpartialresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### WorkflowAppLogPartialResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by_account | | | No | +| created_by_end_user | | | No | +| created_by_role | | | No | +| created_from | | | No | +| details | | | No | +| id | string | | Yes | +| workflow_run | | | No | + +#### WorkflowAppLogQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at__after | | Filter logs created after this timestamp | No | +| created_at__before | | Filter logs created before this timestamp | No | +| created_by_account | | Filter by account | No | +| created_by_end_user_session_id | | Filter by end user session ID | No | +| detail | boolean | Whether to return detailed logs | No | +| keyword | | Search keyword for filtering logs | No | +| limit | integer | Number of items per page (1-100) | No | +| page | integer | Page number (1-99999) | No | +| status | | Execution status filter (succeeded, failed, stopped, partial-succeeded) | No | + +#### WorkflowArchivedLogPaginationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [WorkflowArchivedLogPartialResponse](#workflowarchivedlogpartialresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### WorkflowArchivedLogPartialResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by_account | | | No | +| created_by_end_user | | | No | +| id | string | | Yes | +| trigger_metadata | | | No | +| workflow_run | | | No | + +#### WorkflowCommentBasic + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | No | +| created_at | object | | No | +| created_by | string | | No | +| created_by_account | [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) | | No | +| id | string | | No | +| mention_count | integer | | No | +| participants | [ [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) ] | | No | +| position_x | number | | No | +| position_y | number | | No | +| reply_count | integer | | No | +| resolved | boolean | | No | +| resolved_at | object | | No | +| resolved_by | string | | No | +| resolved_by_account | [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) | | No | +| updated_at | object | | No | + +#### WorkflowCommentCreate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| id | string | | No | + +#### WorkflowCommentCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | Comment content | Yes | +| mentioned_user_ids | [ string ] | Mentioned user IDs | No | +| position_x | number | Comment X position | Yes | +| position_y | number | Comment Y position | Yes | + +#### WorkflowCommentDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | No | +| created_at | object | | No | +| created_by | string | | No | +| created_by_account | [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) | | No | +| id | string | | No | +| mentions | [ [_AnonymousInlineModel_f7ff64cce858](#_anonymousinlinemodel_f7ff64cce858) ] | | No | +| position_x | number | | No | +| position_y | number | | No | +| replies | [ [_AnonymousInlineModel_55c39c6a4b9e](#_anonymousinlinemodel_55c39c6a4b9e) ] | | No | +| resolved | boolean | | No | +| resolved_at | object | | No | +| resolved_by | string | | No | +| resolved_by_account | [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) | | No | +| updated_at | object | | No | + +#### WorkflowCommentMentionUsersPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| users | [ [AccountWithRole](#accountwithrole) ] | | Yes | + +#### WorkflowCommentReplyCreate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| id | string | | No | + +#### WorkflowCommentReplyPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | Reply content | Yes | +| mentioned_user_ids | [ string ] | Mentioned user IDs | No | + +#### WorkflowCommentReplyUpdate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | No | +| updated_at | object | | No | + +#### WorkflowCommentResolve + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | No | +| resolved | boolean | | No | +| resolved_at | object | | No | +| resolved_by | string | | No | + +#### WorkflowCommentUpdate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | No | +| updated_at | object | | No | + +#### WorkflowCommentUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | Comment content | Yes | +| mentioned_user_ids | | Mentioned user IDs. Omit to keep existing mentions. | No | +| position_x | | Comment X position | No | +| position_y | | Comment Y position | No | + +#### WorkflowDraftEnvVariable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| edited | boolean | | No | +| id | string | | No | +| name | string | | No | +| selector | [ string ] | | No | +| type | string | | No | +| value_type | string | | No | +| visible | boolean | | No | + +#### WorkflowDraftEnvVariableList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| items | [ [WorkflowDraftEnvVariable](#workflowdraftenvvariable) ] | | No | + +#### WorkflowDraftVariable + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| edited | boolean | | No | +| full_content | object | | No | +| id | string | | No | +| is_truncated | boolean | | No | +| name | string | | No | +| selector | [ string ] | | No | +| type | string | | No | +| value | object | | No | +| value_type | string | | No | +| visible | boolean | | No | + +#### WorkflowDraftVariableList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| items | [ [WorkflowDraftVariable](#workflowdraftvariable) ] | | No | + +#### WorkflowDraftVariableListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| limit | integer | Items per page | No | +| page | integer | Page number | No | + +#### WorkflowDraftVariableListWithoutValue + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| items | [ [WorkflowDraftVariableWithoutValue](#workflowdraftvariablewithoutvalue) ] | | No | +| total | object | | No | + +#### WorkflowDraftVariablePatchPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | | | No | +| value | | | No | + +#### WorkflowDraftVariableUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | | Variable name | No | +| value | | Variable value | No | + +#### WorkflowDraftVariableWithoutValue + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | No | +| edited | boolean | | No | +| id | string | | No | +| is_truncated | boolean | | No | +| name | string | | No | +| selector | [ string ] | | No | +| type | string | | No | +| value_type | string | | No | +| visible | boolean | | No | + +#### WorkflowExecutionStatus + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| WorkflowExecutionStatus | string | | | + +#### WorkflowFeaturesPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| features | object | Workflow feature configuration | Yes | + +#### WorkflowListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| limit | integer | | No | +| named_only | boolean | | No | +| page | integer | | No | +| user_id | | | No | + +#### WorkflowOnlineUsersPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| app_ids | [ string ] | App IDs | No | + +#### WorkflowPagination + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| has_more | boolean | | No | +| items | [ [Workflow](#workflow) ] | | No | +| limit | integer | | No | +| page | integer | | No | + +#### WorkflowPartial + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by | | | No | +| id | string | | Yes | +| updated_at | | | No | +| updated_by | | | No | + +#### WorkflowRunCount + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| failed | integer | | No | +| partial_succeeded | integer | | No | +| running | integer | | No | +| stopped | integer | | No | +| succeeded | integer | | No | +| total | integer | | No | + +#### WorkflowRunCountQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| status | | Workflow run status filter | No | +| time_range | | Time range filter (e.g., 7d, 4h, 30m, 30s) | No | +| triggered_from | | Filter by trigger source: debugging or app-run | No | + +#### WorkflowRunDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| created_by_account | [SimpleAccount](#simpleaccount) | | No | +| created_by_end_user | [SimpleEndUser](#simpleenduser) | | No | +| created_by_role | string | | No | +| elapsed_time | number | | No | +| error | string | | No | +| exceptions_count | integer | | No | +| finished_at | object | | No | +| graph | object | | No | +| id | string | | No | +| inputs | object | | No | +| outputs | object | | No | +| status | string | | No | +| total_steps | integer | | No | +| total_tokens | integer | | No | +| version | string | | No | + +#### WorkflowRunExport + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| presigned_url | string | Pre-signed URL for download | No | +| presigned_url_expires_at | string | Pre-signed URL expiration time | No | +| status | string | Export status: success/failed | No | + +#### WorkflowRunForArchivedLogResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| elapsed_time | | | No | +| id | string | | Yes | +| status | | | No | +| total_tokens | | | No | +| triggered_from | | | No | + +#### WorkflowRunForList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| created_by_account | [SimpleAccount](#simpleaccount) | | No | +| elapsed_time | number | | No | +| exceptions_count | integer | | No | +| finished_at | object | | No | +| id | string | | No | +| retry_index | integer | | No | +| status | string | | No | +| total_steps | integer | | No | +| total_tokens | integer | | No | +| version | string | | No | + +#### WorkflowRunForLogResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| elapsed_time | | | No | +| error | | | No | +| exceptions_count | | | No | +| finished_at | | | No | +| id | string | | Yes | +| status | | | No | +| total_steps | | | No | +| total_tokens | | | No | +| triggered_from | | | No | +| version | | | No | + +#### WorkflowRunListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | Last run ID for pagination | No | +| limit | integer | Number of items per page (1-100) | No | +| status | | Workflow run status filter | No | +| triggered_from | | Filter by trigger source: debugging or app-run | No | + +#### WorkflowRunNodeExecution + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | object | | No | +| created_by_account | [SimpleAccount](#simpleaccount) | | No | +| created_by_end_user | [SimpleEndUser](#simpleenduser) | | No | +| created_by_role | string | | No | +| elapsed_time | number | | No | +| error | string | | No | +| execution_metadata | object | | No | +| extras | object | | No | +| finished_at | object | | No | +| id | string | | No | +| index | integer | | No | +| inputs | object | | No | +| inputs_truncated | boolean | | No | +| node_id | string | | No | +| node_type | string | | No | +| outputs | object | | No | +| outputs_truncated | boolean | | No | +| predecessor_node_id | string | | No | +| process_data | object | | No | +| process_data_truncated | boolean | | No | +| status | string | | No | +| title | string | | No | + +#### WorkflowRunNodeExecutionList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [WorkflowRunNodeExecution](#workflowrunnodeexecution) ] | | No | + +#### WorkflowRunPagination + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [WorkflowRunForList](#workflowrunforlist) ] | | No | +| has_more | boolean | | No | +| limit | integer | | No | + +#### WorkflowRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | + +#### WorkflowRunQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | | No | +| limit | integer | | No | + +#### WorkflowRunRequest + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | + +#### WorkflowStatisticQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| end | | End date and time (YYYY-MM-DD HH:MM) | No | +| start | | Start date and time (YYYY-MM-DD HH:MM) | No | + +#### WorkflowToolCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | Yes | +| icon | object | | Yes | +| label | string | | Yes | +| labels | | | No | +| name | string | | Yes | +| parameters | [ [WorkflowToolParameterConfiguration](#workflowtoolparameterconfiguration) ] | | No | +| privacy_policy | | | No | +| workflow_app_id | string | | Yes | + +#### WorkflowToolDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| workflow_tool_id | string | | Yes | + +#### WorkflowToolParameterConfiguration + +Workflow tool configuration + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | The description of the parameter | Yes | +| form | [ToolParameterForm](#toolparameterform) | The form of the parameter | Yes | +| name | string | The name of the parameter | Yes | + +#### WorkflowToolUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | | Yes | +| icon | object | | Yes | +| label | string | | Yes | +| labels | | | No | +| name | string | | Yes | +| parameters | [ [WorkflowToolParameterConfiguration](#workflowtoolparameterconfiguration) ] | | No | +| privacy_policy | | | No | +| workflow_tool_id | string | | Yes | + +#### WorkflowTriggerListResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [WorkflowTriggerResponse](#workflowtriggerresponse) ] | | Yes | + +#### WorkflowTriggerResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| icon | string | | Yes | +| id | string | | Yes | +| node_id | string | | Yes | +| provider_name | string | | Yes | +| status | string | | Yes | +| title | string | | Yes | +| trigger_type | string | | Yes | +| updated_at | | | No | + +#### WorkflowUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| marked_comment | | | No | +| marked_name | | | No | + +#### WorkspaceCustomConfigPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| remove_webapp_brand | | | No | +| replace_webapp_logo | | | No | + +#### WorkspaceInfoPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | + +#### WorkspaceListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| limit | integer | | No | +| page | integer | | No | + +#### _AnonymousInlineModel_55c39c6a4b9e + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | No | +| created_at | object | | No | +| created_by | string | | No | +| created_by_account | [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) | | No | +| id | string | | No | + +#### _AnonymousInlineModel_6fec07cd0d85 + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| avatar_url | object | | No | +| email | string | | No | +| id | string | | No | +| name | string | | No | + +#### _AnonymousInlineModel_b1954337d565 + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enable | boolean | | No | +| model_name | string | | No | +| model_provider_name | string | | No | +| summary_prompt | string | | No | + +#### _AnonymousInlineModel_f7ff64cce858 + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| mentioned_user_account | [_AnonymousInlineModel_6fec07cd0d85](#_anonymousinlinemodel_6fec07cd0d85) | | No | +| mentioned_user_id | string | | No | +| reply_id | string | | No | + +## FastOpenAPI Preview (OpenAPI 3.0) + +### Dify API (FastOpenAPI PoC) +FastOpenAPI proof of concept for Dify API + +#### Version: 1.0 + +--- + +##### [GET] /console/api/init +**Get initialization validation status.** + +###### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | OK | **application/json**: [InitStatusResponse](#initstatusresponse)
| + +##### [POST] /console/api/init +**Validate initialization password.** + +###### Request Body + +| Required | Schema | +| -------- | ------ | +| Yes | **application/json**: [InitValidatePayload](#initvalidatepayload)
| + +###### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Created | **application/json**: [InitValidateResponse](#initvalidateresponse)
| + +##### [GET] /console/api/ping +**Health check endpoint for connection testing.** + +###### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | OK | **application/json**: [PingResponse](#pingresponse)
| + +##### [GET] /console/api/setup +**Get system setup status. + + NOTE: This endpoint is unauthenticated by design. + + During first-time bootstrap there is no admin account yet, so frontend initialization must be + able to query setup progress before any login flow exists. + + Only bootstrap-safe status information should be returned by this endpoint. + ** + +###### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | OK | **application/json**: [SetupStatusResponse](#setupstatusresponse)
| + +##### [POST] /console/api/setup +**Initialize system setup with admin account. + + NOTE: This endpoint is unauthenticated by design for first-time bootstrap. + Access is restricted by deployment mode (`SELF_HOSTED`), one-time setup guards, + and init-password validation rather than user session authentication. + ** + +###### Request Body + +| Required | Schema | +| -------- | ------ | +| Yes | **application/json**: [SetupRequestPayload](#setuprequestpayload)
| + +###### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Created | **application/json**: [SetupResponse](#setupresponse)
| + +##### [GET] /console/api/version +**Check for application version updates.** + +###### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| current_version | query | | Yes | string | + +###### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | OK | **application/json**: [VersionResponse](#versionresponse)
| + +--- +##### Schemas + +###### ErrorSchema + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| error | { **"details"**: string, **"message"**: string, **"status"**: integer, **"type"**: string } | | Yes | + +###### InitStatusResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| status | string,
**Available values:** "finished", "not_started" | Initialization status
*Enum:* `"finished"`, `"not_started"` | Yes | + +###### InitValidatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| password | string | Initialization password | Yes | + +###### InitValidateResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Operation result | Yes | + +###### PingResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Health check result | Yes | + +###### SetupRequestPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | Admin email address | Yes | +| language | | Admin language | No | +| name | string | Admin name (max 30 characters) | Yes | +| password | string | Admin password | Yes | + +###### SetupResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| result | string | Setup result | Yes | + +###### SetupStatusResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| setup_at | | Setup completion time (ISO format) | No | +| step | string,
**Available values:** "finished", "not_started" | Setup step status
*Enum:* `"finished"`, `"not_started"` | Yes | + +###### VersionFeatures + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| can_replace_logo | boolean | Whether logo replacement is supported | Yes | +| model_load_balancing_enabled | boolean | Whether model load balancing is enabled | Yes | + +###### VersionResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| can_auto_update | boolean | Whether auto-update is supported | Yes | +| features | [VersionFeatures](#versionfeatures) | Feature flags and capabilities | Yes | +| release_date | string | Release date of latest version | Yes | +| release_notes | string | Release notes for latest version | Yes | +| version | string | Latest version number | Yes | diff --git a/api/openapi/markdown/service-swagger.md b/api/openapi/markdown/service-swagger.md new file mode 100644 index 0000000000..ec5ed280f5 --- /dev/null +++ b/api/openapi/markdown/service-swagger.md @@ -0,0 +1,2754 @@ +# Service API +API for application services + +## Version: 1.0 + +### Security +**Bearer** + +| apiKey | *API Key* | +| ------ | --------- | +| Description | Type: Bearer {your-api-key} | +| In | header | +| Name | Authorization | + +--- +## service_api +Service operations + +### / + +#### GET +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /app/feedbacks + +#### GET +##### Summary + +Get all feedbacks for the application + +##### Description + +Get all feedbacks for the application +Returns paginated list of all feedback submitted for messages in this app. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [FeedbackListQuery](#feedbacklistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Feedbacks retrieved successfully | +| 401 | Unauthorized - invalid API token | + +### /apps/annotation-reply/{action} + +#### POST +##### Summary + +Enable or disable annotation reply feature + +##### Description + +Enable or disable annotation reply feature + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AnnotationReplyActionPayload](#annotationreplyactionpayload) | +| action | path | Action to perform: 'enable' or 'disable' | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Action completed successfully | +| 401 | Unauthorized - invalid API token | + +### /apps/annotation-reply/{action}/status/{job_id} + +#### GET +##### Summary + +Get the status of an annotation reply action job + +##### Description + +Get the status of an annotation reply action job + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | Action type | Yes | string | +| job_id | path | Job ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Job status retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Job not found | + +### /apps/annotations + +#### GET +##### Summary + +List annotations for the application + +##### Description + +List annotations for the application + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Annotations retrieved successfully | [AnnotationList](#annotationlist) | +| 401 | Unauthorized - invalid API token | | + +#### POST +##### Summary + +Create a new annotation + +##### Description + +Create a new annotation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AnnotationCreatePayload](#annotationcreatepayload) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Annotation created successfully | [Annotation](#annotation) | +| 401 | Unauthorized - invalid API token | | + +### /apps/annotations/{annotation_id} + +#### DELETE +##### Summary + +Delete an annotation + +##### Description + +Delete an annotation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| annotation_id | path | Annotation ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Annotation deleted successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Annotation not found | + +#### PUT +##### Summary + +Update an existing annotation + +##### Description + +Update an existing annotation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [AnnotationCreatePayload](#annotationcreatepayload) | +| annotation_id | path | Annotation ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Annotation updated successfully | [Annotation](#annotation) | +| 401 | Unauthorized - invalid API token | | +| 403 | Forbidden - insufficient permissions | | +| 404 | Annotation not found | | + +### /audio-to-text + +#### POST +##### Summary + +Convert audio to text using speech-to-text + +##### Description + +Convert audio to text using speech-to-text +Accepts an audio file upload and returns the transcribed text. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Audio successfully transcribed | +| 400 | Bad request - no audio or invalid audio | +| 401 | Unauthorized - invalid API token | +| 413 | Audio file too large | +| 415 | Unsupported audio type | +| 500 | Internal server error | + +### /chat-messages + +#### POST +##### Summary + +Send a message in a chat conversation + +##### Description + +Send a message in a chat conversation +This endpoint handles chat messages for chat, agent chat, and advanced chat applications. +Supports conversation management and both blocking and streaming response modes. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChatRequestPayload](#chatrequestpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Message sent successfully | +| 400 | Bad request - invalid parameters or workflow issues | +| 401 | Unauthorized - invalid API token | +| 404 | Conversation or workflow not found | +| 429 | Rate limit exceeded | +| 500 | Internal server error | + +### /chat-messages/{task_id}/stop + +#### POST +##### Summary + +Stop a running chat message generation + +##### Description + +Stop a running chat message generation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | The ID of the task to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Task stopped successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Task not found | + +### /completion-messages + +#### POST +##### Summary + +Create a completion for the given prompt + +##### Description + +Create a completion for the given prompt +This endpoint generates a completion based on the provided inputs and query. +Supports both blocking and streaming response modes. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CompletionRequestPayload](#completionrequestpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Completion created successfully | +| 400 | Bad request - invalid parameters | +| 401 | Unauthorized - invalid API token | +| 404 | Conversation not found | +| 500 | Internal server error | + +### /completion-messages/{task_id}/stop + +#### POST +##### Summary + +Stop a running completion task + +##### Description + +Stop a running completion task + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | The ID of the task to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Task stopped successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Task not found | + +### /conversations + +#### GET +##### Summary + +List all conversations for the current user + +##### Description + +List all conversations for the current user +Supports pagination using last_id and limit parameters. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConversationListQuery](#conversationlistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Conversations retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Last conversation not found | + +### /conversations/{c_id} + +#### DELETE +##### Summary + +Delete a specific conversation + +##### Description + +Delete a specific conversation + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Conversation deleted successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Conversation not found | + +### /conversations/{c_id}/name + +#### POST +##### Summary + +Rename a conversation or auto-generate a name + +##### Description + +Rename a conversation or auto-generate a name + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConversationRenamePayload](#conversationrenamepayload) | +| c_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Conversation renamed successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Conversation not found | + +### /conversations/{c_id}/variables + +#### GET +##### Summary + +List all variables for a conversation + +##### Description + +List all variables for a conversation +Conversational variables are only available for chat applications. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConversationVariablesQuery](#conversationvariablesquery) | +| c_id | path | Conversation ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Variables retrieved successfully | [ConversationVariableInfiniteScrollPaginationResponse](#conversationvariableinfinitescrollpaginationresponse) | +| 401 | Unauthorized - invalid API token | | +| 404 | Conversation not found | | + +### /conversations/{c_id}/variables/{variable_id} + +#### PUT +##### Summary + +Update a conversation variable's value + +##### Description + +Update a conversation variable's value +Allows updating the value of a specific conversation variable. +The value must match the variable's expected type. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ConversationVariableUpdatePayload](#conversationvariableupdatepayload) | +| c_id | path | Conversation ID | Yes | string | +| variable_id | path | Variable ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Variable updated successfully | [ConversationVariableResponse](#conversationvariableresponse) | +| 400 | Bad request - type mismatch | | +| 401 | Unauthorized - invalid API token | | +| 404 | Conversation or variable not found | | + +### /datasets + +#### GET +##### Summary + +Resource for getting datasets + +##### Description + +List all datasets + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Datasets retrieved successfully | +| 401 | Unauthorized - invalid API token | + +#### POST +##### Summary + +Resource for creating datasets + +##### Description + +Create a new dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DatasetCreatePayload](#datasetcreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Dataset created successfully | +| 400 | Bad request - invalid parameters | +| 401 | Unauthorized - invalid API token | + +### /datasets/pipeline/file-upload + +#### POST +##### Summary + +Upload a file for use in conversations + +##### Description + +Upload a file to a knowledgebase pipeline +Accepts a single file upload via multipart/form-data. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 201 | File uploaded successfully | +| 400 | Bad request - no file or invalid file | +| 401 | Unauthorized - invalid API token | +| 413 | File too large | +| 415 | Unsupported file type | + +### /datasets/tags + +#### DELETE +##### Summary + +Delete a knowledge type tag + +##### Description + +Delete a knowledge type tag + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagDeletePayload](#tagdeletepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Tag deleted successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | + +#### GET +##### Summary + +Get all knowledge type tags + +##### Description + +Get all knowledge type tags + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Tags retrieved successfully | +| 401 | Unauthorized - invalid API token | + +#### PATCH +##### Description + +Update a knowledge type tag + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagUpdatePayload](#tagupdatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Tag updated successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | + +#### POST +##### Summary + +Add a knowledge type tag + +##### Description + +Add a knowledge type tag + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagCreatePayload](#tagcreatepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Tag created successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | + +### /datasets/tags/binding + +#### POST +##### Description + +Bind tags to a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagBindingPayload](#tagbindingpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Tags bound successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | + +### /datasets/tags/unbinding + +#### POST +##### Description + +Unbind tags from a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TagUnbindingPayload](#tagunbindingpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Tags unbound successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | + +### /datasets/{dataset_id} + +#### DELETE +##### Summary + +Deletes a dataset given its ID + +##### Description + +Delete a dataset +Args: + _: ignore + dataset_id (UUID): The ID of the dataset to be deleted. + +Returns: + dict: A dictionary with a key 'result' and a value 'success' + if the dataset was successfully deleted. Omitted in HTTP response. + int: HTTP status code 204 indicating that the operation was successful. + +Raises: + NotFound: If the dataset with the given ID does not exist. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Dataset deleted successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | +| 409 | Conflict - dataset is in use | + +#### GET +##### Description + +Get a specific dataset by ID + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Dataset retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Dataset not found | + +#### PATCH +##### Description + +Update an existing dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DatasetUpdatePayload](#datasetupdatepayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Dataset updated successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/document/create-by-file + +#### POST +##### Description + +Create a new document by uploading a file + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document created successfully | +| 400 | Bad request - invalid file or parameters | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/document/create-by-text + +#### POST +##### Description + +Create a new document by providing text content + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DocumentTextCreatePayload](#documenttextcreatepayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document created successfully | +| 400 | Bad request - invalid parameters | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/document/create_by_file + +#### POST +##### Description + +Create a new document by uploading a file + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document created successfully | +| 400 | Bad request - invalid file or parameters | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/document/create_by_text + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for creating a new document by providing text content. Use /datasets/{dataset_id}/document/create-by-text instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DocumentTextCreatePayload](#documenttextcreatepayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document created successfully | +| 400 | Bad request - invalid parameters | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/documents + +#### GET +##### Description + +List all documents in a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Documents retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/documents/download-zip + +#### POST +##### Description + +Download selected uploaded documents as a single ZIP archive + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DocumentBatchDownloadZipPayload](#documentbatchdownloadzippayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | ZIP archive generated successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Document or dataset not found | + +### /datasets/{dataset_id}/documents/metadata + +#### POST +##### Summary + +Update metadata for multiple documents + +##### Description + +Update metadata for multiple documents + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MetadataOperationData](#metadataoperationdata) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Documents metadata updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/documents/status/{action} + +#### PATCH +##### Summary + +Batch update document status + +##### Description + +Batch update document status +Args: + tenant_id: tenant id + dataset_id: dataset id + action: action to perform (Literal["enable", "disable", "archive", "un_archive"]) + +Returns: + dict: A dictionary with a key 'result' and a value 'success' + int: HTTP status code 200 indicating that the operation was successful. + +Raises: + NotFound: If the dataset with the given ID does not exist. + Forbidden: If the user does not have permission. + InvalidActionError: If the action is invalid or cannot be performed. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | Action to perform: 'enable', 'disable', 'archive', or 'un_archive' | Yes | string | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document status updated successfully | +| 400 | Bad request - invalid action | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/documents/{batch}/indexing-status + +#### GET +##### Description + +Get indexing status for documents in a batch + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| batch | path | Batch ID | Yes | string | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Indexing status retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset or documents not found | + +### /datasets/{dataset_id}/documents/{document_id} + +#### DELETE +##### Summary + +Delete document + +##### Description + +Delete a document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Document deleted successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - document is archived | +| 404 | Document not found | + +#### GET +##### Description + +Get a specific document by ID + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Document not found | + +#### PATCH +##### Description + +Update an existing document by uploading a file + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/download + +#### GET +##### Description + +Get a signed download URL for a document's original uploaded file + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Download URL generated successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - insufficient permissions | +| 404 | Document or upload file not found | + +### /datasets/{dataset_id}/documents/{document_id}/segments + +#### GET +##### Description + +List segments in a document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [SegmentListQuery](#segmentlistquery) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Segments retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset or document not found | + +#### POST +##### Description + +Create segments in a document + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [SegmentCreatePayload](#segmentcreatepayload) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Segments created successfully | +| 400 | Bad request - segments data is missing | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset or document not found | + +### /datasets/{dataset_id}/documents/{document_id}/segments/{segment_id} + +#### DELETE +##### Description + +Delete a specific segment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| segment_id | path | Segment ID to delete | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Segment deleted successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, or segment not found | + +#### GET +##### Description + +Get a specific segment by ID + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| document_id | path | | Yes | string | +| segment_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Segment retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, or segment not found | + +#### POST +##### Description + +Update a specific segment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [SegmentUpdatePayload](#segmentupdatepayload) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| segment_id | path | Segment ID to update | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Segment updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, or segment not found | + +### /datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks + +#### GET +##### Description + +List child chunks for a segment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChildChunkListQuery](#childchunklistquery) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| segment_id | path | Parent segment ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Child chunks retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, or segment not found | + +#### POST +##### Description + +Create a new child chunk for a segment + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChildChunkCreatePayload](#childchunkcreatepayload) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| segment_id | path | Parent segment ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Child chunk created successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, or segment not found | + +### /datasets/{dataset_id}/documents/{document_id}/segments/{segment_id}/child_chunks/{child_chunk_id} + +#### DELETE +##### Description + +Delete a specific child chunk + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| child_chunk_id | path | Child chunk ID to delete | Yes | string | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| segment_id | path | Parent segment ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Child chunk deleted successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, segment, or child chunk not found | + +#### PATCH +##### Description + +Update a specific child chunk + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChildChunkUpdatePayload](#childchunkupdatepayload) | +| child_chunk_id | path | Child chunk ID to update | Yes | string | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | +| segment_id | path | Parent segment ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Child chunk updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset, document, segment, or child chunk not found | + +### /datasets/{dataset_id}/documents/{document_id}/update-by-file + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for updating an existing document by uploading a file. Use PATCH /datasets/{dataset_id}/documents/{document_id} instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/update-by-text + +#### POST +##### Description + +Update an existing document by providing text content + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DocumentTextUpdate](#documenttextupdate) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/update_by_file + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for updating an existing document by uploading a file. Use PATCH /datasets/{dataset_id}/documents/{document_id} instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Document not found | + +### /datasets/{dataset_id}/documents/{document_id}/update_by_text + +#### POST +***DEPRECATED*** +##### Description + +Deprecated legacy alias for updating an existing document by providing text content. Use /datasets/{dataset_id}/documents/{document_id}/update-by-text instead. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [DocumentTextUpdate](#documenttextupdate) | +| dataset_id | path | Dataset ID | Yes | string | +| document_id | path | Document ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Document updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Document not found | + +### /datasets/{dataset_id}/hit-testing + +#### POST +##### Summary + +Perform hit testing on a dataset + +##### Description + +Perform hit testing on a dataset +Tests retrieval performance for the specified dataset. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HitTestingPayload](#hittestingpayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Hit testing results | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/metadata + +#### GET +##### Summary + +Get all metadata for a dataset + +##### Description + +Get all metadata for a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Metadata retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +#### POST +##### Summary + +Create metadata for a dataset + +##### Description + +Create metadata for a dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MetadataArgs](#metadataargs) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 201 | Metadata created successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/metadata/built-in + +#### GET +##### Summary + +Get all built-in metadata fields + +##### Description + +Get all built-in metadata fields + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Built-in fields retrieved successfully | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/metadata/built-in/{action} + +#### POST +##### Summary + +Enable or disable built-in metadata field + +##### Description + +Enable or disable built-in metadata field + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| action | path | Action to perform: 'enable' or 'disable' | Yes | string | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Action completed successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/metadata/{metadata_id} + +#### DELETE +##### Summary + +Delete metadata + +##### Description + +Delete metadata + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | +| metadata_id | path | Metadata ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Metadata deleted successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset or metadata not found | + +#### PATCH +##### Summary + +Update metadata name + +##### Description + +Update metadata name + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MetadataUpdatePayload](#metadataupdatepayload) | +| dataset_id | path | Dataset ID | Yes | string | +| metadata_id | path | Metadata ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Metadata updated successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset or metadata not found | + +### /datasets/{dataset_id}/pipeline/datasource-plugins + +#### GET +##### Summary + +Resource for getting datasource plugins + +##### Description + +List all datasource plugins for a rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| is_published | query | Whether to get published or draft datasource plugins (true for published, false for draft, default: true) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Datasource plugins retrieved successfully | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/pipeline/datasource/nodes/{node_id}/run + +#### POST +##### Summary + +Resource for getting datasource plugins + +##### Description + +Run a datasource node for a rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | +| node_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Datasource node run successfully | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/pipeline/run + +#### POST +##### Summary + +Resource for running a rag pipeline + +##### Description + +Run a datasource node for a rag pipeline + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Pipeline run successfully | +| 401 | Unauthorized - invalid API token | + +### /datasets/{dataset_id}/retrieve + +#### POST +##### Summary + +Perform hit testing on a dataset + +##### Description + +Perform hit testing on a dataset +Tests retrieval performance for the specified dataset. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HitTestingPayload](#hittestingpayload) | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Hit testing results | +| 401 | Unauthorized - invalid API token | +| 404 | Dataset not found | + +### /datasets/{dataset_id}/tags + +#### GET +##### Summary + +Get all knowledge type tags + +##### Description + +Get tags bound to a specific dataset + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| dataset_id | path | Dataset ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Tags retrieved successfully | +| 401 | Unauthorized - invalid API token | + +### /end-users/{end_user_id} + +#### GET +##### Summary + +Get end user detail + +##### Description + +Get an end user by ID +This endpoint is scoped to the current app token's tenant/app to prevent +cross-tenant/app access when an end-user ID is known. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| end_user_id | path | End user ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | End user retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | End user not found | + +### /files/upload + +#### POST +##### Summary + +Upload a file for use in conversations + +##### Description + +Upload a file for use in conversations +Accepts a single file upload via multipart/form-data. + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | File uploaded successfully | [FileResponse](#fileresponse) | +| 400 | Bad request - no file or invalid file | | +| 401 | Unauthorized - invalid API token | | +| 413 | File too large | | +| 415 | Unsupported file type | | + +### /files/{file_id}/preview + +#### GET +##### Summary + +Preview/Download a file that was uploaded via Service API + +##### Description + +Preview or download a file uploaded via Service API +Provides secure file preview/download functionality. +Files can only be accessed if they belong to messages within the requesting app's context. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [FilePreviewQuery](#filepreviewquery) | +| file_id | path | UUID of the file to preview | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | File retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - file access denied | +| 404 | File not found | + +### /form/human_input/{form_token} + +#### GET +##### Description + +Get a paused human input form by token + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| form_token | path | Human input form token | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Form retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Form not found | +| 412 | Form already submitted or expired | + +#### POST +##### Description + +Submit a paused human input form by token + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [HumanInputFormSubmitPayload](#humaninputformsubmitpayload) | +| form_token | path | Human input form token | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Form submitted successfully | +| 400 | Bad request - invalid submission data | +| 401 | Unauthorized - invalid API token | +| 404 | Form not found | +| 412 | Form already submitted or expired | + +### /info + +#### GET +##### Summary + +Get app information + +##### Description + +Get basic application information +Returns basic information about the application including name, description, tags, and mode. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Application info retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Application not found | + +### /messages + +#### GET +##### Summary + +List messages in a conversation + +##### Description + +List messages in a conversation +Retrieves messages with pagination support using first_id. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MessageListQuery](#messagelistquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Messages retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Conversation or first message not found | + +### /messages/{message_id}/feedbacks + +#### POST +##### Summary + +Submit feedback for a message + +##### Description + +Submit feedback for a message +Allows users to rate messages as like/dislike and provide optional feedback content. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [MessageFeedbackPayload](#messagefeedbackpayload) | +| message_id | path | Message ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Feedback submitted successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Message not found | + +### /messages/{message_id}/suggested + +#### GET +##### Summary + +Get suggested follow-up questions for a message + +##### Description + +Get suggested follow-up questions for a message +Returns AI-generated follow-up questions based on the message content. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| message_id | path | Message ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Suggested questions retrieved successfully | +| 400 | Suggested questions feature is disabled | +| 401 | Unauthorized - invalid API token | +| 404 | Message not found | +| 500 | Internal server error | + +### /meta + +#### GET +##### Summary + +Get app metadata + +##### Description + +Get application metadata +Returns metadata about the application including configuration and settings. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Metadata retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Application not found | + +### /parameters + +#### GET +##### Summary + +Retrieve app parameters + +##### Description + +Retrieve application input parameters and configuration +Returns the input form parameters and configuration for the application. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Parameters retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Application not found | + +### /site + +#### GET +##### Summary + +Retrieve app site info + +##### Description + +Get application site configuration +Returns the site configuration for the application including theme, icons, and text. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Site configuration retrieved successfully | +| 401 | Unauthorized - invalid API token | +| 403 | Forbidden - site not found or tenant archived | + +### /text-to-audio + +#### POST +##### Summary + +Convert text to audio using text-to-speech + +##### Description + +Convert text to audio using text-to-speech +Converts the provided text to audio using the specified voice. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TextToAudioPayload](#texttoaudiopayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Text successfully converted to audio | +| 400 | Bad request - invalid parameters | +| 401 | Unauthorized - invalid API token | +| 500 | Internal server error | + +### /workflow/{task_id}/events + +#### GET +##### Description + +Get workflow execution events stream after resume + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | Workflow run ID | Yes | string | +| continue_on_pause | query | Whether to keep the stream open across workflow_paused events,specify `"true"` to keep the stream open for `workflow_paused` events. | No | string | +| include_state_snapshot | query | Whether to replay from persisted state snapshot, specify `"true"` to include a status snapshot of executed nodes | No | string | +| user | query | End user identifier (query param) | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | SSE event stream | +| 401 | Unauthorized - invalid API token | +| 404 | Workflow run not found | + +### /workflows/logs + +#### GET +##### Summary + +Get workflow app logs + +##### Description + +Get workflow execution logs +Returns paginated workflow execution logs with filtering options. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowLogQuery](#workflowlogquery) | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Logs retrieved successfully | [WorkflowAppLogPaginationResponse](#workflowapplogpaginationresponse) | +| 401 | Unauthorized - invalid API token | | + +### /workflows/run + +#### POST +##### Summary + +Execute a workflow + +##### Description + +Execute a workflow +Runs a workflow with the provided inputs and returns the results. +Supports both blocking and streaming response modes. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowRunPayload](#workflowrunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow executed successfully | +| 400 | Bad request - invalid parameters or workflow issues | +| 401 | Unauthorized - invalid API token | +| 404 | Workflow not found | +| 429 | Rate limit exceeded | +| 500 | Internal server error | + +### /workflows/run/{workflow_run_id} + +#### GET +##### Summary + +Get a workflow task running detail + +##### Description + +Get workflow run details +Returns detailed information about a specific workflow run. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| workflow_run_id | path | Workflow run ID | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Workflow run details retrieved successfully | [WorkflowRunResponse](#workflowrunresponse) | +| 401 | Unauthorized - invalid API token | | +| 404 | Workflow run not found | | + +### /workflows/tasks/{task_id}/stop + +#### POST +##### Summary + +Stop a running workflow task + +##### Description + +Stop a running workflow task + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | Task ID to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Task stopped successfully | +| 401 | Unauthorized - invalid API token | +| 404 | Task not found | + +### /workflows/{workflow_id}/run + +#### POST +##### Summary + +Run specific workflow by ID + +##### Description + +Execute a specific workflow by ID +Executes a specific workflow version identified by its ID. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowRunPayload](#workflowrunpayload) | +| workflow_id | path | Workflow ID to execute | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Workflow executed successfully | +| 400 | Bad request - invalid parameters or workflow issues | +| 401 | Unauthorized - invalid API token | +| 404 | Workflow not found | +| 429 | Rate limit exceeded | +| 500 | Internal server error | + +### /workspaces/current/models/model-types/{model_type} + +#### GET +##### Summary + +Get available models by model type + +##### Description + +Get available models by model type +Returns a list of available models for the specified model type. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| model_type | path | Type of model to retrieve | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Models retrieved successfully | +| 401 | Unauthorized - invalid API token | + +--- +### Models + +#### Annotation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| created_at | | | No | +| hit_count | | | No | +| id | string | | Yes | +| question | | | No | + +#### AnnotationCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| answer | string | Annotation answer | Yes | +| question | string | Annotation question | Yes | + +#### AnnotationList + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [Annotation](#annotation) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### AnnotationReplyActionPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| embedding_model_name | string | Embedding model name | Yes | +| embedding_provider_name | string | Embedding provider name | Yes | +| score_threshold | number | Score threshold for annotation matching | Yes | + +#### ChatRequestPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| auto_generate_name | boolean | Auto generate conversation name | No | +| conversation_id | | Conversation UUID | No | +| files | | | No | +| inputs | object | | Yes | +| query | string | | Yes | +| response_mode | | | No | +| retriever_from | string | | No | +| workflow_id | | Workflow ID for advanced chat | No | + +#### ChildChunkCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | Yes | + +#### ChildChunkListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword | | | No | +| limit | integer | | No | +| page | integer | | No | + +#### ChildChunkUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | string | | Yes | + +#### CompletionRequestPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | +| query | string | | No | +| response_mode | | | No | +| retriever_from | string | | No | + +#### Condition + +Condition detail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| comparison_operator | string | *Enum:* `"<"`, `"="`, `">"`, `"after"`, `"before"`, `"contains"`, `"empty"`, `"end with"`, `"in"`, `"is"`, `"is not"`, `"not contains"`, `"not empty"`, `"not in"`, `"start with"`, `"≠"`, `"≤"`, `"≥"` | Yes | +| name | string | | Yes | +| value | | | No | + +#### ConversationListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | Last conversation ID for pagination | No | +| limit | integer | Number of conversations to return | No | +| sort_by | string | Sort order for conversations
*Enum:* `"-created_at"`, `"-updated_at"`, `"created_at"`, `"updated_at"` | No | + +#### ConversationRenamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| auto_generate | boolean | | No | +| name | | | No | + +#### ConversationVariableInfiniteScrollPaginationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [ConversationVariableResponse](#conversationvariableresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | + +#### ConversationVariableResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| description | | | No | +| id | string | | Yes | +| name | string | | Yes | +| updated_at | | | No | +| value | | | No | +| value_type | string | | Yes | + +#### ConversationVariableUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| value | | | Yes | + +#### ConversationVariablesQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | Last variable ID for pagination | No | +| limit | integer | Number of variables to return | No | +| variable_name | | Filter variables by name | No | + +#### DataSetTag + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| binding_count | | | No | +| id | string | | Yes | +| name | string | | Yes | +| type | string | | Yes | + +#### DatasetCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | string | Dataset description (max 400 chars) | No | +| embedding_model | | | No | +| embedding_model_provider | | | No | +| external_knowledge_api_id | | | No | +| external_knowledge_id | | | No | +| indexing_technique | | | No | +| name | string | | Yes | +| permission | | | No | +| provider | string | | No | +| retrieval_model | | | No | +| summary_index_setting | | | No | + +#### DatasetListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| include_all | boolean | Include all datasets | No | +| keyword | | Search keyword | No | +| limit | integer | Number of items per page | No | +| page | integer | Page number | No | +| tag_ids | [ string ] | Filter by tag IDs | No | + +#### DatasetPermissionEnum + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| DatasetPermissionEnum | string | | | + +#### DatasetUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| description | | Dataset description (max 400 chars) | No | +| embedding_model | | | No | +| embedding_model_provider | | | No | +| external_knowledge_api_id | | | No | +| external_knowledge_id | | | No | +| external_retrieval_model | | | No | +| indexing_technique | | | No | +| name | | | No | +| partial_member_list | | | No | +| permission | | | No | +| retrieval_model | | | No | + +#### DatasourceNodeRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| credential_id | | | No | +| datasource_type | string | | Yes | +| inputs | object | | Yes | +| is_published | boolean | | Yes | + +#### DocumentBatchDownloadZipPayload + +Request payload for bulk downloading documents as a zip archive. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| document_ids | [ string (uuid) ] | | Yes | + +#### DocumentListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword | | Search keyword | No | +| limit | integer | Number of items per page | No | +| page | integer | Page number | No | +| status | | Document status filter | No | + +#### DocumentMetadataOperation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| document_id | string | | Yes | +| metadata_list | [ [MetadataDetail](#metadatadetail) ] | | Yes | +| partial_update | boolean | | No | + +#### DocumentTextCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| doc_form | string | | No | +| doc_language | string | | No | +| embedding_model | | | No | +| embedding_model_provider | | | No | +| indexing_technique | | | No | +| name | string | | Yes | +| original_document_id | | | No | +| process_rule | | | No | +| retrieval_model | | | No | +| text | string | | Yes | + +#### DocumentTextUpdate + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| doc_form | string | | No | +| doc_language | string | | No | +| name | | | No | +| process_rule | | | No | +| retrieval_model | | | No | +| text | | | No | + +#### FeedbackListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| limit | integer | Number of feedbacks per page | No | +| page | integer | Page number | No | + +#### FilePreviewQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| as_attachment | boolean | Download as attachment | No | + +#### FileResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | | No | +| created_at | | | No | +| created_by | | | No | +| extension | | | No | +| file_key | | | No | +| id | string | | Yes | +| mime_type | | | No | +| name | string | | Yes | +| original_url | | | No | +| preview_url | | | No | +| size | integer | | Yes | +| source_url | | | No | +| tenant_id | | | No | +| user_id | | | No | + +#### HitTestingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| attachment_ids | | | No | +| external_retrieval_model | | | No | +| query | string | | Yes | +| retrieval_model | | | No | + +#### HumanInputFormSubmitPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| action | string | | Yes | +| inputs | object | | Yes | + +#### JsonValue + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| JsonValue | | | | + +#### MessageFeedbackPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| rating | | | No | + +#### MessageListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | Conversation UUID | Yes | +| first_id | | First message ID for pagination | No | +| limit | integer | Number of messages to return (1-100) | No | + +#### MetadataArgs + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | +| type | string | *Enum:* `"number"`, `"string"`, `"time"` | Yes | + +#### MetadataDetail + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | +| name | string | | Yes | +| value | | | No | + +#### MetadataFilteringCondition + +Metadata Filtering Condition. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conditions | | | No | +| logical_operator | | | No | + +#### MetadataOperationData + +Metadata operation data + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| operation_data | [ [DocumentMetadataOperation](#documentmetadataoperation) ] | | Yes | + +#### MetadataUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | + +#### PipelineRunApiEntity + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| datasource_info_list | [ object ] | | Yes | +| datasource_type | string | | Yes | +| inputs | object | | Yes | +| is_published | boolean | | Yes | +| response_mode | string | | Yes | +| start_node_id | string | | Yes | + +#### PreProcessingRule + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| enabled | boolean | | Yes | +| id | string | | Yes | + +#### ProcessRule + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| mode | string | *Enum:* `"automatic"`, `"custom"`, `"hierarchical"` | Yes | +| rules | | | No | + +#### RerankingModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| reranking_model_name | | | No | +| reranking_provider_name | | | No | + +#### RetrievalMethod + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| RetrievalMethod | string | | | + +#### RetrievalModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| metadata_filtering_conditions | | | No | +| reranking_enable | boolean | | Yes | +| reranking_mode | | | No | +| reranking_model | | | No | +| score_threshold | | | No | +| score_threshold_enabled | boolean | | Yes | +| search_method | [RetrievalMethod](#retrievalmethod) | | Yes | +| top_k | integer | | Yes | +| weights | | | No | + +#### Rule + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| parent_mode | | | No | +| pre_processing_rules | | | No | +| segmentation | | | No | +| subchunk_segmentation | | | No | + +#### SegmentCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| segments | | | No | + +#### SegmentListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword | | | No | +| status | [ string ] | | No | + +#### SegmentUpdateArgs + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| answer | | | No | +| attachment_ids | | | No | +| content | | | No | +| enabled | | | No | +| keywords | | | No | +| regenerate_child_chunks | boolean | | No | +| summary | | | No | + +#### SegmentUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| segment | [SegmentUpdateArgs](#segmentupdateargs) | | Yes | + +#### Segmentation + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| chunk_overlap | integer | | No | +| max_tokens | integer | | Yes | +| separator | string | | No | + +#### SimpleAccount + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| id | string | | Yes | +| name | string | | Yes | + +#### SimpleEndUser + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| id | string | | Yes | +| is_anonymous | boolean | | Yes | +| session_id | | | No | +| type | string | | Yes | + +#### TagBindingPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tag_ids | [ string ] | | Yes | +| target_id | string | | Yes | + +#### TagCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | + +#### TagDeletePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tag_id | string | | Yes | + +#### TagUnbindingPayload + +Accept the legacy single-tag Service API payload while exposing a normalized tag_ids list internally. + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| tag_id | | | No | +| tag_ids | [ string ] | | No | +| target_id | string | | Yes | + +#### TagUpdatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| name | string | | Yes | +| tag_id | string | | Yes | + +#### TextToAudioPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | | Message ID | No | +| streaming | | Enable streaming response | No | +| text | | Text to convert to audio | No | +| voice | | Voice to use for TTS | No | + +#### WeightKeywordSetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword_weight | number | | Yes | + +#### WeightModel + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| keyword_setting | | | No | +| vector_setting | | | No | +| weight_type | | | No | + +#### WeightVectorSetting + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| embedding_model_name | string | | Yes | +| embedding_provider_name | string | | Yes | +| vector_weight | number | | Yes | + +#### WorkflowAppLogPaginationResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| data | [ [WorkflowAppLogPartialResponse](#workflowapplogpartialresponse) ] | | Yes | +| has_more | boolean | | Yes | +| limit | integer | | Yes | +| page | integer | | Yes | +| total | integer | | Yes | + +#### WorkflowAppLogPartialResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by_account | | | No | +| created_by_end_user | | | No | +| created_by_role | | | No | +| created_from | | | No | +| details | | | No | +| id | string | | Yes | +| workflow_run | | | No | + +#### WorkflowLogQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at__after | | | No | +| created_at__before | | | No | +| created_by_account | | | No | +| created_by_end_user_session_id | | | No | +| keyword | | | No | +| limit | integer | | No | +| page | integer | | No | +| status | | | No | + +#### WorkflowRunForLogResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| elapsed_time | | | No | +| error | | | No | +| exceptions_count | | | No | +| finished_at | | | No | +| id | string | | Yes | +| status | | | No | +| total_steps | | | No | +| total_tokens | | | No | +| triggered_from | | | No | +| version | | | No | + +#### WorkflowRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | +| response_mode | | | No | + +#### WorkflowRunResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| elapsed_time | | | No | +| error | | | No | +| finished_at | | | No | +| id | string | | Yes | +| inputs | | | No | +| outputs | object | | No | +| status | string | | Yes | +| total_steps | | | No | +| total_tokens | | | No | +| workflow_id | string | | Yes | diff --git a/api/openapi/markdown/web-swagger.md b/api/openapi/markdown/web-swagger.md new file mode 100644 index 0000000000..c9b3b31357 --- /dev/null +++ b/api/openapi/markdown/web-swagger.md @@ -0,0 +1,1224 @@ +# Web API +Public APIs for web applications including file uploads, chat interactions, and app management + +## Version: 1.0 + +### Security +**Bearer** + +| apiKey | *API Key* | +| ------ | --------- | +| Description | Type: Bearer {your-api-key} | +| In | header | +| Name | Authorization | + +--- +## web +Web application API operations + +### /audio-to-text + +#### POST +##### Summary + +Convert audio to text + +##### Description + +Convert audio file to text using speech-to-text service. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 413 | Audio file too large | +| 415 | Unsupported audio type | +| 500 | Internal Server Error | + +### /chat-messages + +#### POST +##### Description + +Create a chat message for conversational applications. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ChatMessagePayload](#chatmessagepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +### /chat-messages/{task_id}/stop + +#### POST +##### Description + +Stop a running chat message task. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | Task ID to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Task Not Found | +| 500 | Internal Server Error | + +### /completion-messages + +#### POST +##### Description + +Create a completion message for text generation applications. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [CompletionMessagePayload](#completionmessagepayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +### /completion-messages/{task_id}/stop + +#### POST +##### Description + +Stop a running completion message task. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | Task ID to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Task Not Found | +| 500 | Internal Server Error | + +### /conversations + +#### GET +##### Description + +Retrieve paginated list of conversations for a chat application. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| last_id | query | Last conversation ID for pagination | No | string | +| limit | query | Number of conversations to return (1-100) | No | integer | +| pinned | query | Filter by pinned status | No | string | +| sort_by | query | Sort order | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found or Not a Chat App | +| 500 | Internal Server Error | + +### /conversations/{c_id} + +#### DELETE +##### Description + +Delete a specific conversation. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | Conversation UUID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Conversation deleted successfully | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Conversation Not Found or Not a Chat App | +| 500 | Internal Server Error | + +### /conversations/{c_id}/name + +#### POST +##### Description + +Rename a specific conversation with a custom name or auto-generate one. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | Conversation UUID | Yes | string | +| auto_generate | query | Auto-generate conversation name | No | boolean | +| name | query | New conversation name | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Conversation renamed successfully | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Conversation Not Found or Not a Chat App | +| 500 | Internal Server Error | + +### /conversations/{c_id}/pin + +#### PATCH +##### Description + +Pin a specific conversation to keep it at the top of the list. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | Conversation UUID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Conversation pinned successfully | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Conversation Not Found or Not a Chat App | +| 500 | Internal Server Error | + +### /conversations/{c_id}/unpin + +#### PATCH +##### Description + +Unpin a specific conversation to remove it from the top of the list. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| c_id | path | Conversation UUID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Conversation unpinned successfully | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Conversation Not Found or Not a Chat App | +| 500 | Internal Server Error | + +### /email-code-login + +#### POST +##### Description + +Send email verification code for login + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EmailCodeLoginSendPayload](#emailcodeloginsendpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Email code sent successfully | +| 400 | Bad request - invalid email format | +| 404 | Account not found | + +### /email-code-login/validity + +#### POST +##### Description + +Verify email code and complete login + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [EmailCodeLoginVerifyPayload](#emailcodeloginverifypayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Email code verified and login successful | +| 400 | Bad request - invalid code or token | +| 401 | Invalid token or expired code | +| 404 | Account not found | + +### /files/upload + +#### POST +##### Summary + +Upload a file for use in web applications + +##### Description + +Upload a file for use in web applications +Accepts file uploads for use within web applications, supporting +multiple file types with automatic validation and storage. + +Args: + app_model: The associated application model + end_user: The end user uploading the file + +Form Parameters: + file: The file to upload (required) + source: Optional source type (datasets or None) + +Returns: + dict: File information including ID, URL, and metadata + int: HTTP status code 201 for success + +Raises: + NoFileUploadedError: No file provided in request + TooManyFilesError: Multiple files provided (only one allowed) + FilenameNotExistsError: File has no filename + FileTooLargeError: File exceeds size limit + UnsupportedFileTypeError: File type not supported + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | File uploaded successfully | [FileResponse](#fileresponse) | +| 400 | Bad request - invalid file or parameters | | +| 413 | File too large | | +| 415 | Unsupported file type | | + +### /forgot-password + +#### POST +##### Description + +Send password reset email + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ForgotPasswordSendPayload](#forgotpasswordsendpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Password reset email sent successfully | +| 400 | Bad request - invalid email format | +| 404 | Account not found | +| 429 | Too many requests - rate limit exceeded | + +### /forgot-password/resets + +#### POST +##### Description + +Reset user password with verification token + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ForgotPasswordResetPayload](#forgotpasswordresetpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Password reset successfully | +| 400 | Bad request - invalid parameters or password mismatch | +| 401 | Invalid or expired token | +| 404 | Account not found | + +### /forgot-password/validity + +#### POST +##### Description + +Verify password reset token validity + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [ForgotPasswordCheckPayload](#forgotpasswordcheckpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Token is valid | +| 400 | Bad request - invalid token format | +| 401 | Invalid or expired token | + +### /form/human_input/{form_token} + +#### GET +##### Summary + +Get human input form definition by token + +##### Description + +GET /api/form/human_input/ + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| form_token | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +#### POST +##### Summary + +Submit human input form by token + +##### Description + +POST /api/form/human_input/ + +Request body: +{ + "inputs": { + "content": "User input content" + }, + "action": "Approve" +} + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| form_token | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +### /login + +#### POST +##### Summary + +Authenticate user and login + +##### Description + +Authenticate user for web application access + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [LoginPayload](#loginpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Authentication successful | +| 400 | Bad request - invalid email or password format | +| 401 | Authentication failed - email or password mismatch | +| 403 | Account banned or login disabled | +| 404 | Account not found | + +### /login/status + +#### GET +##### Description + +Check login status + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Login status | +| 401 | Login status | + +### /logout + +#### POST +##### Description + +Logout user from web application + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Logout successful | + +### /messages + +#### GET +##### Description + +Retrieve paginated list of messages from a conversation in a chat application. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| conversation_id | query | Conversation UUID | Yes | string | +| first_id | query | First message ID for pagination | No | string | +| limit | query | Number of messages to return (1-100) | No | integer | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Conversation Not Found or Not a Chat App | +| 500 | Internal Server Error | + +### /messages/{message_id}/feedbacks + +#### POST +##### Description + +Submit feedback (like/dislike) for a specific message. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| message_id | path | Message UUID | Yes | string | +| content | query | Feedback content | No | string | +| rating | query | Feedback rating | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Feedback submitted successfully | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Message Not Found | +| 500 | Internal Server Error | + +### /messages/{message_id}/more-like-this + +#### GET +##### Description + +Generate a new completion similar to an existing message (completion apps only). + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| message_id | path | | Yes | string | +| payload | body | | Yes | [MessageMoreLikeThisQuery](#messagemorelikethisquery) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request - Not a completion app or feature disabled | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Message Not Found | +| 500 | Internal Server Error | + +### /messages/{message_id}/suggested-questions + +#### GET +##### Description + +Get suggested follow-up questions after a message (chat apps only). + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| message_id | path | Message UUID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request - Not a chat app or feature disabled | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Message Not Found or Conversation Not Found | +| 500 | Internal Server Error | + +### /meta + +#### GET +##### Summary + +Get app meta + +##### Description + +Retrieve the metadata for a specific app. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +### /parameters + +#### GET +##### Summary + +Retrieve app parameters + +##### Description + +Retrieve the parameters for a specific app. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +### /passport + +#### GET +##### Description + +Get authentication passport for web application access + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Passport retrieved successfully | +| 401 | Unauthorized - missing app code or invalid authentication | +| 404 | Application or user not found | + +### /remote-files/upload + +#### POST +##### Summary + +Upload a file from a remote URL + +##### Description + +Upload a file from a remote URL +Downloads a file from the provided remote URL and uploads it +to the platform storage for use in web applications. + +Args: + app_model: The associated application model + end_user: The end user making the request + +JSON Parameters: + url: The remote URL to download the file from (required) + +Returns: + dict: File information including ID, signed URL, and metadata + int: HTTP status code 201 for success + +Raises: + RemoteFileUploadError: Failed to fetch file from remote URL + FileTooLargeError: File exceeds size limit + UnsupportedFileTypeError: File type not supported + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 201 | Remote file uploaded successfully | [FileWithSignedUrl](#filewithsignedurl) | +| 400 | Bad request - invalid URL or parameters | | +| 413 | File too large | | +| 415 | Unsupported file type | | +| 500 | Failed to fetch remote file | | + +### /remote-files/{url} + +#### GET +##### Summary + +Get information about a remote file + +##### Description + +Get information about a remote file +Retrieves basic information about a file located at a remote URL, +including content type and content length. + +Args: + app_model: The associated application model + end_user: The end user making the request + url: URL-encoded path to the remote file + +Returns: + dict: Remote file information including type and length + +Raises: + HTTPException: If the remote file cannot be accessed + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| url | path | | Yes | string | + +##### Responses + +| Code | Description | Schema | +| ---- | ----------- | ------ | +| 200 | Remote file information retrieved successfully | [RemoteFileInfo](#remotefileinfo) | +| 400 | Bad request - invalid URL | | +| 404 | Remote file not found | | +| 500 | Failed to fetch remote file | | + +### /saved-messages + +#### GET +##### Description + +Retrieve paginated list of saved messages for a completion application. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| last_id | query | Last message ID for pagination | No | string | +| limit | query | Number of messages to return (1-100) | No | integer | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request - Not a completion app | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +#### POST +##### Description + +Save a specific message for later reference. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| message_id | query | Message UUID to save | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Message saved successfully | +| 400 | Bad Request - Not a completion app | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Message Not Found | +| 500 | Internal Server Error | + +### /saved-messages/{message_id} + +#### DELETE +##### Description + +Remove a message from saved messages. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| message_id | path | Message UUID to delete | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 204 | Message removed successfully | +| 400 | Bad Request - Not a completion app | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Message Not Found | +| 500 | Internal Server Error | + +### /site + +#### GET +##### Summary + +Retrieve app site info + +##### Description + +Retrieve app site information and configuration. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +### /system-features + +#### GET +##### Summary + +Get system feature flags and configuration + +##### Description + +Get system feature flags and configuration +Returns the current system feature flags and configuration +that control various functionalities across the platform. + +Returns: + dict: System feature configuration object + +This endpoint is akin to the `SystemFeatureApi` endpoint in api/controllers/console/feature.py, +except it is intended for use by the web app, instead of the console dashboard. + +NOTE: This endpoint is unauthenticated by design, as it provides system features +data required for webapp initialization. + +Authentication would create circular dependency (can't authenticate without webapp loading). + +Only non-sensitive configuration data should be returned by this endpoint. + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | System features retrieved successfully | +| 500 | Internal server error | + +### /text-to-audio + +#### POST +##### Summary + +Convert text to audio + +##### Description + +Convert text to audio using text-to-speech service. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [TextToAudioPayload](#texttoaudiopayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 500 | Internal Server Error | + +### /webapp/access-mode + +#### GET +##### Description + +Retrieve the access mode for a web application (public or restricted). + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| appCode | query | Application code | No | string | +| appId | query | Application ID | No | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 500 | Internal Server Error | + +### /webapp/permission + +#### GET +##### Description + +Check if user has permission to access a web application. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| appId | query | Application ID | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 500 | Internal Server Error | + +### /workflows/run + +#### POST +##### Summary + +Run workflow + +##### Description + +Execute a workflow with provided inputs and files. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| payload | body | | Yes | [WorkflowRunPayload](#workflowrunpayload) | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | App Not Found | +| 500 | Internal Server Error | + +### /workflows/tasks/{task_id}/stop + +#### POST +##### Summary + +Stop workflow task + +##### Description + +Stop a running workflow task. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | Task ID to stop | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | +| 400 | Bad Request | +| 401 | Unauthorized | +| 403 | Forbidden | +| 404 | Task Not Found | +| 500 | Internal Server Error | + +--- +## default +Default namespace + +### /workflow/{task_id}/events + +#### GET +##### Summary + +Get workflow execution events stream after resume + +##### Description + +GET /api/workflow//events + +Returns Server-Sent Events stream. + +##### Parameters + +| Name | Located in | Description | Required | Schema | +| ---- | ---------- | ----------- | -------- | ------ | +| task_id | path | | Yes | string | + +##### Responses + +| Code | Description | +| ---- | ----------- | +| 200 | Success | + +--- +### Models + +#### AppAccessModeQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| appCode | | Application code | No | +| appId | | Application ID | No | + +#### ChatMessagePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | Conversation ID | No | +| files | | Files to be processed | No | +| inputs | object | Input variables for the chat | Yes | +| parent_message_id | | Parent message ID | No | +| query | string | User query/message | Yes | +| response_mode | | Response mode: blocking or streaming | No | +| retriever_from | string | Source of retriever | No | + +#### CompletionMessagePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | Files to be processed | No | +| inputs | object | Input variables for the completion | Yes | +| query | string | Query text for completion | No | +| response_mode | | Response mode: blocking or streaming | No | +| retriever_from | string | Source of retriever | No | + +#### ConversationListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | | No | +| limit | integer | | No | +| pinned | | | No | +| sort_by | string | *Enum:* `"-created_at"`, `"-updated_at"`, `"created_at"`, `"updated_at"` | No | + +#### ConversationRenamePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| auto_generate | boolean | | No | +| name | | | No | + +#### EmailCodeLoginSendPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| language | | | No | + +#### EmailCodeLoginVerifyPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| email | string | | Yes | +| token | string | | Yes | + +#### FileResponse + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | | | No | +| created_at | | | No | +| created_by | | | No | +| extension | | | No | +| file_key | | | No | +| id | string | | Yes | +| mime_type | | | No | +| name | string | | Yes | +| original_url | | | No | +| preview_url | | | No | +| size | integer | | Yes | +| source_url | | | No | +| tenant_id | | | No | +| user_id | | | No | + +#### FileWithSignedUrl + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| created_at | | | No | +| created_by | | | No | +| extension | | | No | +| id | string | | Yes | +| mime_type | | | No | +| name | string | | Yes | +| size | integer | | Yes | +| url | | | No | + +#### ForgotPasswordCheckPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| code | string | | Yes | +| email | string | | Yes | +| token | string | | Yes | + +#### ForgotPasswordResetPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| new_password | string | | Yes | +| password_confirm | string | | Yes | +| token | string | | Yes | + +#### ForgotPasswordSendPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| language | | | No | + +#### LoginPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| email | string | | Yes | +| password | string | | Yes | + +#### MessageFeedbackPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| content | | | No | +| rating | | | No | + +#### MessageListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| conversation_id | string | Conversation UUID | Yes | +| first_id | | First message ID for pagination | No | +| limit | integer | Number of messages to return (1-100) | No | + +#### MessageMoreLikeThisQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| response_mode | string | Response mode
*Enum:* `"blocking"`, `"streaming"` | Yes | + +#### RemoteFileInfo + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| file_length | integer | | Yes | +| file_type | string | | Yes | + +#### RemoteFileUploadPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| url | string (uri) | Remote file URL | Yes | + +#### SavedMessageCreatePayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | string | | Yes | + +#### SavedMessageListQuery + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| last_id | | | No | +| limit | integer | | No | + +#### TextToAudioPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| message_id | | Message ID | No | +| streaming | | Enable streaming response | No | +| text | | Text to convert to audio | No | +| voice | | Voice to use for TTS | No | + +#### WorkflowRunPayload + +| Name | Type | Description | Required | +| ---- | ---- | ----------- | -------- | +| files | | | No | +| inputs | object | | Yes | diff --git a/api/tests/unit_tests/commands/test_generate_swagger_markdown_docs.py b/api/tests/unit_tests/commands/test_generate_swagger_markdown_docs.py new file mode 100644 index 0000000000..62d3d79cf1 --- /dev/null +++ b/api/tests/unit_tests/commands/test_generate_swagger_markdown_docs.py @@ -0,0 +1,103 @@ +"""Unit tests for the Markdown API docs generator.""" + +import importlib.util +import sys +from pathlib import Path + + +def _load_generate_swagger_markdown_docs_module(): + api_dir = Path(__file__).resolve().parents[3] + script_path = api_dir / "dev" / "generate_swagger_markdown_docs.py" + + spec = importlib.util.spec_from_file_location("generate_swagger_markdown_docs", script_path) + assert spec + assert spec.loader + + module = importlib.util.module_from_spec(spec) + sys.modules[spec.name] = module + spec.loader.exec_module(module) # type: ignore[attr-defined] + return module + + +def test_generate_markdown_docs_keeps_split_docs_and_merges_fastopenapi_into_console(tmp_path, monkeypatch): + module = _load_generate_swagger_markdown_docs_module() + swagger_dir = tmp_path / "openapi" + markdown_dir = tmp_path / "markdown" + stale_combined_doc = markdown_dir / "api-reference.md" + markdown_dir.mkdir() + stale_combined_doc.write_text("stale", encoding="utf-8") + + def write_specs(output_dir: Path) -> list[Path]: + output_dir.mkdir(parents=True, exist_ok=True) + paths = [] + for target in module.SPEC_TARGETS: + path = output_dir / target.filename + path.write_text("{}", encoding="utf-8") + paths.append(path) + return paths + + def write_fastopenapi_specs(output_dir: Path) -> list[Path]: + output_dir.mkdir(parents=True, exist_ok=True) + path = output_dir / module.FASTOPENAPI_SPEC_TARGETS[0].filename + path.write_text("{}", encoding="utf-8") + return [path] + + def convert_spec_to_markdown(spec_path: Path, markdown_path: Path) -> None: + markdown_path.write_text(f"# {spec_path.stem}\n\n## Routes\n", encoding="utf-8") + + monkeypatch.setattr(module, "generate_specs", write_specs) + monkeypatch.setattr(module, "generate_fastopenapi_specs", write_fastopenapi_specs) + monkeypatch.setattr(module, "_convert_spec_to_markdown", convert_spec_to_markdown) + + written_paths = module.generate_markdown_docs(swagger_dir, markdown_dir) + + assert [path.name for path in written_paths] == [ + "console-swagger.md", + "web-swagger.md", + "service-swagger.md", + ] + assert not stale_combined_doc.exists() + assert not list(swagger_dir.glob("*.json")) + + console_markdown = (markdown_dir / "console-swagger.md").read_text(encoding="utf-8") + assert "## FastOpenAPI Preview (OpenAPI 3.0)" in console_markdown + assert "### fastopenapi-console-openapi" in console_markdown + assert "#### Routes" in console_markdown + assert "FastOpenAPI Preview" not in (markdown_dir / "web-swagger.md").read_text(encoding="utf-8") + assert "FastOpenAPI Preview" not in (markdown_dir / "service-swagger.md").read_text(encoding="utf-8") + + +def test_generate_markdown_docs_only_removes_generated_specs_from_separate_swagger_dir(tmp_path, monkeypatch): + module = _load_generate_swagger_markdown_docs_module() + swagger_dir = tmp_path / "swagger" + markdown_dir = tmp_path / "markdown" + swagger_dir.mkdir() + existing_file = swagger_dir / "existing.txt" + existing_file.write_text("keep me", encoding="utf-8") + + def write_specs(output_dir: Path) -> list[Path]: + output_dir.mkdir(parents=True, exist_ok=True) + paths = [] + for target in module.SPEC_TARGETS: + path = output_dir / target.filename + path.write_text("{}", encoding="utf-8") + paths.append(path) + return paths + + def write_fastopenapi_specs(output_dir: Path) -> list[Path]: + output_dir.mkdir(parents=True, exist_ok=True) + path = output_dir / module.FASTOPENAPI_SPEC_TARGETS[0].filename + path.write_text("{}", encoding="utf-8") + return [path] + + def convert_spec_to_markdown(spec_path: Path, markdown_path: Path) -> None: + markdown_path.write_text(f"# {spec_path.stem}\n", encoding="utf-8") + + monkeypatch.setattr(module, "generate_specs", write_specs) + monkeypatch.setattr(module, "generate_fastopenapi_specs", write_fastopenapi_specs) + monkeypatch.setattr(module, "_convert_spec_to_markdown", convert_spec_to_markdown) + + module.generate_markdown_docs(swagger_dir, markdown_dir) + + assert existing_file.read_text(encoding="utf-8") == "keep me" + assert not list(swagger_dir.glob("*.json")) diff --git a/api/tests/unit_tests/commands/test_generate_swagger_specs.py b/api/tests/unit_tests/commands/test_generate_swagger_specs.py index e77e875081..79a577087d 100644 --- a/api/tests/unit_tests/commands/test_generate_swagger_specs.py +++ b/api/tests/unit_tests/commands/test_generate_swagger_specs.py @@ -6,6 +6,16 @@ import sys from pathlib import Path +def _walk_values(value): + yield value + if isinstance(value, dict): + for child in value.values(): + yield from _walk_values(child) + elif isinstance(value, list): + for child in value: + yield from _walk_values(child) + + def _load_generate_swagger_specs_module(): api_dir = Path(__file__).resolve().parents[3] script_path = api_dir / "dev" / "generate_swagger_specs.py" @@ -35,3 +45,32 @@ def test_generate_specs_writes_console_web_and_service_swagger_files(tmp_path): payload = json.loads(path.read_text(encoding="utf-8")) assert payload["swagger"] == "2.0" assert "paths" in payload + + +def test_generate_specs_writes_swagger_with_resolvable_references_and_no_nulls(tmp_path): + module = _load_generate_swagger_specs_module() + + written_paths = module.generate_specs(tmp_path) + + for path in written_paths: + payload = json.loads(path.read_text(encoding="utf-8")) + definitions = payload["definitions"] + refs = { + item["$ref"].removeprefix("#/definitions/") + for item in _walk_values(payload) + if isinstance(item, dict) and isinstance(item.get("$ref"), str) + } + + assert refs <= set(definitions) + assert all(value is not None for value in _walk_values(payload)) + + +def test_generate_specs_is_idempotent(tmp_path): + module = _load_generate_swagger_specs_module() + + first_paths = module.generate_specs(tmp_path / "first") + second_paths = module.generate_specs(tmp_path / "second") + + assert [path.name for path in first_paths] == [path.name for path in second_paths] + for first_path, second_path in zip(first_paths, second_paths): + assert first_path.read_text(encoding="utf-8") == second_path.read_text(encoding="utf-8") diff --git a/api/tests/unit_tests/controllers/common/test_schema.py b/api/tests/unit_tests/controllers/common/test_schema.py index 56c8160f02..fbca2539a5 100644 --- a/api/tests/unit_tests/controllers/common/test_schema.py +++ b/api/tests/unit_tests/controllers/common/test_schema.py @@ -17,6 +17,14 @@ class ProductModel(BaseModel): price: float +class ChildModel(BaseModel): + value: str + + +class ParentModel(BaseModel): + child: ChildModel + + @pytest.fixture(autouse=True) def mock_console_ns(): """Mock the console_ns to avoid circular imports during test collection.""" @@ -64,6 +72,22 @@ def test_register_schema_model_passes_schema_from_pydantic(): assert schema == expected_schema +def test_register_schema_model_promotes_nested_pydantic_definitions(): + from controllers.common.schema import DEFAULT_REF_TEMPLATE_SWAGGER_2_0, register_schema_model + + namespace = MagicMock(spec=Namespace) + + register_schema_model(namespace, ParentModel) + + called_schemas = {call.args[0]: call.args[1] for call in namespace.schema_model.call_args_list} + parent_schema = ParentModel.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0) + + assert set(called_schemas) == {"ParentModel", "ChildModel"} + assert "$defs" not in called_schemas["ParentModel"] + assert called_schemas["ParentModel"]["properties"]["child"]["$ref"] == "#/definitions/ChildModel" + assert called_schemas["ChildModel"] == parent_schema["$defs"]["ChildModel"] + + def test_register_schema_models_registers_multiple_models(): from controllers.common.schema import register_schema_models From e03eb3a76c3062de9083a6b36c37c8b3f63ac608 Mon Sep 17 00:00:00 2001 From: Crazywoola <100913391+crazywoola@users.noreply.github.com> Date: Sat, 9 May 2026 11:14:14 +0800 Subject: [PATCH 04/13] chore: bump LiteLLM for CVE-2026-42208 (#35953) --- api/pyproject.toml | 1 + api/uv.lock | 68 ++++++++++++++++++++++++---------------------- 2 files changed, 37 insertions(+), 32 deletions(-) diff --git a/api/pyproject.toml b/api/pyproject.toml index 69add5c68d..0c488c34d9 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -103,6 +103,7 @@ dify-trace-weave = { workspace = true } default-groups = ["storage", "tools", "vdb-all", "trace-all"] package = false override-dependencies = [ + "litellm>=1.83.7", "pyarrow>=18.0.0", ] diff --git a/api/uv.lock b/api/uv.lock index 6f75c9f6fe..c3db4b514c 100644 --- a/api/uv.lock +++ b/api/uv.lock @@ -50,7 +50,10 @@ members = [ "dify-vdb-vikingdb", "dify-vdb-weaviate", ] -overrides = [{ name = "pyarrow", specifier = ">=18.0.0" }] +overrides = [ + { name = "litellm", specifier = ">=1.83.7" }, + { name = "pyarrow", specifier = ">=18.0.0" }, +] [[package]] name = "abnf" @@ -889,14 +892,14 @@ wheels = [ [[package]] name = "click" -version = "8.3.1" +version = "8.1.8" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/3d/fa/656b739db8587d7b5dfa22e22ed02566950fbfbcdc20311993483657a5c0/click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a", size = 295065, upload-time = "2025-11-15T20:45:42.706Z" } +sdist = { url = "https://files.pythonhosted.org/packages/b9/2e/0090cbf739cee7d23781ad4b89a9894a41538e4fcf4c31dcdd705b78eb8b/click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a", size = 226593, upload-time = "2024-12-21T18:38:44.339Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6", size = 108274, upload-time = "2025-11-15T20:45:41.139Z" }, + { url = "https://files.pythonhosted.org/packages/7e/d4/7ebdbd03970677812aac39c869717059dbb71a4cfc033ca6e5221787892c/click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2", size = 98188, upload-time = "2024-12-21T18:38:41.666Z" }, ] [[package]] @@ -3355,14 +3358,14 @@ wheels = [ [[package]] name = "importlib-metadata" -version = "8.4.0" +version = "8.5.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "zipp" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/c0/bd/fa8ce65b0a7d4b6d143ec23b0f5fd3f7ab80121078c465bc02baeaab22dc/importlib_metadata-8.4.0.tar.gz", hash = "sha256:9a547d3bc3608b025f93d403fdd1aae741c24fbb8314df4b155675742ce303c5", size = 54320, upload-time = "2024-08-20T17:11:42.348Z" } +sdist = { url = "https://files.pythonhosted.org/packages/cd/12/33e59336dca5be0c398a7482335911a33aa0e20776128f038019f1a95f1b/importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7", size = 55304, upload-time = "2024-09-11T14:56:08.937Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c0/14/362d31bf1076b21e1bcdcb0dc61944822ff263937b804a79231df2774d28/importlib_metadata-8.4.0-py3-none-any.whl", hash = "sha256:66f342cc6ac9818fc6ff340576acd24d65ba0b3efabb2b4ac08b598965a4a2f1", size = 26269, upload-time = "2024-08-20T17:11:41.102Z" }, + { url = "https://files.pythonhosted.org/packages/a0/d9/a1e041c5e7caa9a05c925f4bdbdfb7f006d1f74996af53467bc394c97be7/importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b", size = 26514, upload-time = "2024-09-11T14:56:07.019Z" }, ] [[package]] @@ -3503,7 +3506,7 @@ wheels = [ [[package]] name = "jsonschema" -version = "4.25.1" +version = "4.23.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "attrs" }, @@ -3511,9 +3514,9 @@ dependencies = [ { name = "referencing" }, { name = "rpds-py" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/74/69/f7185de793a29082a9f3c7728268ffb31cb5095131a9c139a74078e27336/jsonschema-4.25.1.tar.gz", hash = "sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85", size = 357342, upload-time = "2025-08-18T17:03:50.038Z" } +sdist = { url = "https://files.pythonhosted.org/packages/38/2e/03362ee4034a4c917f697890ccd4aec0800ccf9ded7f511971c75451deec/jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4", size = 325778, upload-time = "2024-07-08T18:40:05.546Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/bf/9c/8c95d856233c1f82500c2450b8c68576b4cf1c871db3afac5c34ff84e6fd/jsonschema-4.25.1-py3-none-any.whl", hash = "sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63", size = 90040, upload-time = "2025-08-18T17:03:48.373Z" }, + { url = "https://files.pythonhosted.org/packages/69/4a/4f9dbeb84e8850557c02365a0eee0649abe5eb1d84af92a25731c6c0f922/jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566", size = 88462, upload-time = "2024-07-08T18:40:00.165Z" }, ] [[package]] @@ -3654,7 +3657,7 @@ wheels = [ [[package]] name = "litellm" -version = "1.83.0" +version = "1.83.14" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp" }, @@ -3670,9 +3673,9 @@ dependencies = [ { name = "tiktoken" }, { name = "tokenizers" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/22/92/6ce9737554994ca8e536e5f4f6a87cc7c4774b656c9eb9add071caf7d54b/litellm-1.83.0.tar.gz", hash = "sha256:860bebc76c4bb27b4cf90b4a77acd66dba25aced37e3db98750de8a1766bfb7a", size = 17333062, upload-time = "2026-03-31T05:08:25.331Z" } +sdist = { url = "https://files.pythonhosted.org/packages/8d/7c/c095649380adc96c8630273c1768c2ad1e74aa2ee1dd8dd05d218a60569f/litellm-1.83.14.tar.gz", hash = "sha256:24aef9b47cdc424c833e32f3727f411741c690832cd1fe4405e0077144fe09c9", size = 14836599, upload-time = "2026-04-26T03:16:10.176Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/19/2c/a670cc050fcd6f45c6199eb99e259c73aea92edba8d5c2fc1b3686d36217/litellm-1.83.0-py3-none-any.whl", hash = "sha256:88c536d339248f3987571493015784671ba3f193a328e1ea6780dbebaa2094a8", size = 15610306, upload-time = "2026-03-31T05:08:21.987Z" }, + { url = "https://files.pythonhosted.org/packages/7f/5c/1b5691575420135e90578543b2bf219497caa33cfd0af64cb38f30288450/litellm-1.83.14-py3-none-any.whl", hash = "sha256:92b11ba2a32cf80707ddf388d18526696c7999a21b418c5e3b6eda1243d2cfdb", size = 16457054, upload-time = "2026-04-26T03:16:05.72Z" }, ] [[package]] @@ -4135,7 +4138,7 @@ wheels = [ [[package]] name = "openai" -version = "2.8.1" +version = "2.24.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -4147,9 +4150,9 @@ dependencies = [ { name = "tqdm" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d5/e4/42591e356f1d53c568418dc7e30dcda7be31dd5a4d570bca22acb0525862/openai-2.8.1.tar.gz", hash = "sha256:cb1b79eef6e809f6da326a7ef6038719e35aa944c42d081807bfa1be8060f15f", size = 602490, upload-time = "2025-11-17T22:39:59.549Z" } +sdist = { url = "https://files.pythonhosted.org/packages/55/13/17e87641b89b74552ed408a92b231283786523edddc95f3545809fab673c/openai-2.24.0.tar.gz", hash = "sha256:1e5769f540dbd01cb33bc4716a23e67b9d695161a734aff9c5f925e2bf99a673", size = 658717, upload-time = "2026-02-24T20:02:07.958Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/55/4f/dbc0c124c40cb390508a82770fb9f6e3ed162560181a85089191a851c59a/openai-2.8.1-py3-none-any.whl", hash = "sha256:c6c3b5a04994734386e8dad3c00a393f56d3b68a27cd2e8acae91a59e4122463", size = 1022688, upload-time = "2025-11-17T22:39:57.675Z" }, + { url = "https://files.pythonhosted.org/packages/c9/30/844dc675ee6902579b8eef01ed23917cc9319a1c9c0c14ec6e39340c96d0/openai-2.24.0-py3-none-any.whl", hash = "sha256:fed30480d7d6c884303287bde864980a4b137b60553ffbcf9ab4a233b7a73d94", size = 1120122, upload-time = "2026-02-24T20:02:05.669Z" }, ] [[package]] @@ -6447,27 +6450,28 @@ wheels = [ [[package]] name = "tokenizers" -version = "0.22.1" +version = "0.22.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "huggingface-hub" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/1c/46/fb6854cec3278fbfa4a75b50232c77622bc517ac886156e6afbfa4d8fc6e/tokenizers-0.22.1.tar.gz", hash = "sha256:61de6522785310a309b3407bac22d99c4db5dba349935e99e4d15ea2226af2d9", size = 363123, upload-time = "2025-09-19T09:49:23.424Z" } +sdist = { url = "https://files.pythonhosted.org/packages/73/6f/f80cfef4a312e1fb34baf7d85c72d4411afde10978d4657f8cdd811d3ccc/tokenizers-0.22.2.tar.gz", hash = "sha256:473b83b915e547aa366d1eee11806deaf419e17be16310ac0a14077f1e28f917", size = 372115, upload-time = "2026-01-05T10:45:15.988Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/bf/33/f4b2d94ada7ab297328fc671fed209368ddb82f965ec2224eb1892674c3a/tokenizers-0.22.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:59fdb013df17455e5f950b4b834a7b3ee2e0271e6378ccb33aa74d178b513c73", size = 3069318, upload-time = "2025-09-19T09:49:11.848Z" }, - { url = "https://files.pythonhosted.org/packages/1c/58/2aa8c874d02b974990e89ff95826a4852a8b2a273c7d1b4411cdd45a4565/tokenizers-0.22.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:8d4e484f7b0827021ac5f9f71d4794aaef62b979ab7608593da22b1d2e3c4edc", size = 2926478, upload-time = "2025-09-19T09:49:09.759Z" }, - { url = "https://files.pythonhosted.org/packages/1e/3b/55e64befa1e7bfea963cf4b787b2cea1011362c4193f5477047532ce127e/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19d2962dd28bc67c1f205ab180578a78eef89ac60ca7ef7cbe9635a46a56422a", size = 3256994, upload-time = "2025-09-19T09:48:56.701Z" }, - { url = "https://files.pythonhosted.org/packages/71/0b/fbfecf42f67d9b7b80fde4aabb2b3110a97fac6585c9470b5bff103a80cb/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:38201f15cdb1f8a6843e6563e6e79f4abd053394992b9bbdf5213ea3469b4ae7", size = 3153141, upload-time = "2025-09-19T09:48:59.749Z" }, - { url = "https://files.pythonhosted.org/packages/17/a9/b38f4e74e0817af8f8ef925507c63c6ae8171e3c4cb2d5d4624bf58fca69/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1cbe5454c9a15df1b3443c726063d930c16f047a3cc724b9e6e1a91140e5a21", size = 3508049, upload-time = "2025-09-19T09:49:05.868Z" }, - { url = "https://files.pythonhosted.org/packages/d2/48/dd2b3dac46bb9134a88e35d72e1aa4869579eacc1a27238f1577270773ff/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e7d094ae6312d69cc2a872b54b91b309f4f6fbce871ef28eb27b52a98e4d0214", size = 3710730, upload-time = "2025-09-19T09:49:01.832Z" }, - { url = "https://files.pythonhosted.org/packages/93/0e/ccabc8d16ae4ba84a55d41345207c1e2ea88784651a5a487547d80851398/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:afd7594a56656ace95cdd6df4cca2e4059d294c5cfb1679c57824b605556cb2f", size = 3412560, upload-time = "2025-09-19T09:49:03.867Z" }, - { url = "https://files.pythonhosted.org/packages/d0/c6/dc3a0db5a6766416c32c034286d7c2d406da1f498e4de04ab1b8959edd00/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2ef6063d7a84994129732b47e7915e8710f27f99f3a3260b8a38fc7ccd083f4", size = 3250221, upload-time = "2025-09-19T09:49:07.664Z" }, - { url = "https://files.pythonhosted.org/packages/d7/a6/2c8486eef79671601ff57b093889a345dd3d576713ef047776015dc66de7/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ba0a64f450b9ef412c98f6bcd2a50c6df6e2443b560024a09fa6a03189726879", size = 9345569, upload-time = "2025-09-19T09:49:14.214Z" }, - { url = "https://files.pythonhosted.org/packages/6b/16/32ce667f14c35537f5f605fe9bea3e415ea1b0a646389d2295ec348d5657/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:331d6d149fa9c7d632cde4490fb8bbb12337fa3a0232e77892be656464f4b446", size = 9271599, upload-time = "2025-09-19T09:49:16.639Z" }, - { url = "https://files.pythonhosted.org/packages/51/7c/a5f7898a3f6baa3fc2685c705e04c98c1094c523051c805cdd9306b8f87e/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:607989f2ea68a46cb1dfbaf3e3aabdf3f21d8748312dbeb6263d1b3b66c5010a", size = 9533862, upload-time = "2025-09-19T09:49:19.146Z" }, - { url = "https://files.pythonhosted.org/packages/36/65/7e75caea90bc73c1dd8d40438adf1a7bc26af3b8d0a6705ea190462506e1/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a0f307d490295717726598ef6fa4f24af9d484809223bbc253b201c740a06390", size = 9681250, upload-time = "2025-09-19T09:49:21.501Z" }, - { url = "https://files.pythonhosted.org/packages/30/2c/959dddef581b46e6209da82df3b78471e96260e2bc463f89d23b1bf0e52a/tokenizers-0.22.1-cp39-abi3-win32.whl", hash = "sha256:b5120eed1442765cd90b903bb6cfef781fd8fe64e34ccaecbae4c619b7b12a82", size = 2472003, upload-time = "2025-09-19T09:49:27.089Z" }, - { url = "https://files.pythonhosted.org/packages/b3/46/e33a8c93907b631a99377ef4c5f817ab453d0b34f93529421f42ff559671/tokenizers-0.22.1-cp39-abi3-win_amd64.whl", hash = "sha256:65fd6e3fb11ca1e78a6a93602490f134d1fdeb13bcef99389d5102ea318ed138", size = 2674684, upload-time = "2025-09-19T09:49:24.953Z" }, + { url = "https://files.pythonhosted.org/packages/92/97/5dbfabf04c7e348e655e907ed27913e03db0923abb5dfdd120d7b25630e1/tokenizers-0.22.2-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:544dd704ae7238755d790de45ba8da072e9af3eea688f698b137915ae959281c", size = 3100275, upload-time = "2026-01-05T10:41:02.158Z" }, + { url = "https://files.pythonhosted.org/packages/2e/47/174dca0502ef88b28f1c9e06b73ce33500eedfac7a7692108aec220464e7/tokenizers-0.22.2-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:1e418a55456beedca4621dbab65a318981467a2b188e982a23e117f115ce5001", size = 2981472, upload-time = "2026-01-05T10:41:00.276Z" }, + { url = "https://files.pythonhosted.org/packages/d6/84/7990e799f1309a8b87af6b948f31edaa12a3ed22d11b352eaf4f4b2e5753/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2249487018adec45d6e3554c71d46eb39fa8ea67156c640f7513eb26f318cec7", size = 3290736, upload-time = "2026-01-05T10:40:32.165Z" }, + { url = "https://files.pythonhosted.org/packages/78/59/09d0d9ba94dcd5f4f1368d4858d24546b4bdc0231c2354aa31d6199f0399/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25b85325d0815e86e0bac263506dd114578953b7b53d7de09a6485e4a160a7dd", size = 3168835, upload-time = "2026-01-05T10:40:38.847Z" }, + { url = "https://files.pythonhosted.org/packages/47/50/b3ebb4243e7160bda8d34b731e54dd8ab8b133e50775872e7a434e524c28/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bfb88f22a209ff7b40a576d5324bf8286b519d7358663db21d6246fb17eea2d5", size = 3521673, upload-time = "2026-01-05T10:40:56.614Z" }, + { url = "https://files.pythonhosted.org/packages/e0/fa/89f4cb9e08df770b57adb96f8cbb7e22695a4cb6c2bd5f0c4f0ebcf33b66/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c774b1276f71e1ef716e5486f21e76333464f47bece56bbd554485982a9e03e", size = 3724818, upload-time = "2026-01-05T10:40:44.507Z" }, + { url = "https://files.pythonhosted.org/packages/64/04/ca2363f0bfbe3b3d36e95bf67e56a4c88c8e3362b658e616d1ac185d47f2/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df6c4265b289083bf710dff49bc51ef252f9d5be33a45ee2bed151114a56207b", size = 3379195, upload-time = "2026-01-05T10:40:51.139Z" }, + { url = "https://files.pythonhosted.org/packages/2e/76/932be4b50ef6ccedf9d3c6639b056a967a86258c6d9200643f01269211ca/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:369cc9fc8cc10cb24143873a0d95438bb8ee257bb80c71989e3ee290e8d72c67", size = 3274982, upload-time = "2026-01-05T10:40:58.331Z" }, + { url = "https://files.pythonhosted.org/packages/1d/28/5f9f5a4cc211b69e89420980e483831bcc29dade307955cc9dc858a40f01/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:29c30b83d8dcd061078b05ae0cb94d3c710555fbb44861139f9f83dcca3dc3e4", size = 9478245, upload-time = "2026-01-05T10:41:04.053Z" }, + { url = "https://files.pythonhosted.org/packages/6c/fb/66e2da4704d6aadebf8cb39f1d6d1957df667ab24cff2326b77cda0dcb85/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:37ae80a28c1d3265bb1f22464c856bd23c02a05bb211e56d0c5301a435be6c1a", size = 9560069, upload-time = "2026-01-05T10:45:10.673Z" }, + { url = "https://files.pythonhosted.org/packages/16/04/fed398b05caa87ce9b1a1bb5166645e38196081b225059a6edaff6440fac/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:791135ee325f2336f498590eb2f11dc5c295232f288e75c99a36c5dbce63088a", size = 9899263, upload-time = "2026-01-05T10:45:12.559Z" }, + { url = "https://files.pythonhosted.org/packages/05/a1/d62dfe7376beaaf1394917e0f8e93ee5f67fea8fcf4107501db35996586b/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:38337540fbbddff8e999d59970f3c6f35a82de10053206a7562f1ea02d046fa5", size = 10033429, upload-time = "2026-01-05T10:45:14.333Z" }, + { url = "https://files.pythonhosted.org/packages/fd/18/a545c4ea42af3df6effd7d13d250ba77a0a86fb20393143bbb9a92e434d4/tokenizers-0.22.2-cp39-abi3-win32.whl", hash = "sha256:a6bf3f88c554a2b653af81f3204491c818ae2ac6fbc09e76ef4773351292bc92", size = 2502363, upload-time = "2026-01-05T10:45:20.593Z" }, + { url = "https://files.pythonhosted.org/packages/65/71/0670843133a43d43070abeb1949abfdef12a86d490bea9cd9e18e37c5ff7/tokenizers-0.22.2-cp39-abi3-win_amd64.whl", hash = "sha256:c9ea31edff2968b44a88f97d784c2f16dc0729b8b143ed004699ebca91f05c48", size = 2747786, upload-time = "2026-01-05T10:45:18.411Z" }, + { url = "https://files.pythonhosted.org/packages/72/f4/0de46cfa12cdcbcd464cc59fde36912af405696f687e53a091fb432f694c/tokenizers-0.22.2-cp39-abi3-win_arm64.whl", hash = "sha256:9ce725d22864a1e965217204946f830c37876eee3b2ba6fc6255e8e903d5fcbc", size = 2612133, upload-time = "2026-01-05T10:45:17.232Z" }, ] [[package]] From 140ad6ba4e7ec5228f0a09e9bee775a0d6bfdd3e Mon Sep 17 00:00:00 2001 From: Asuka Minato Date: Sat, 9 May 2026 12:16:22 +0900 Subject: [PATCH 05/13] chore: add Type to test (#35942) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- .../aliyun_trace/test_aliyun_trace_utils.py | 9 +- .../langfuse_trace/test_langfuse_trace.py | 18 ++-- .../langsmith_trace/test_langsmith_trace.py | 16 ++-- .../mlflow_trace/test_mlflow_trace.py | 2 +- .../unit_tests/opik_trace/test_opik_trace.py | 18 ++-- .../weave_trace/test_weave_trace.py | 40 ++++---- .../test_alibabacloud_mysql_factory.py | 4 +- .../unit_tests/test_analyticdb_vector.py | 4 +- .../test_analyticdb_vector_openapi.py | 30 +++--- .../unit_tests/test_analyticdb_vector_sql.py | 18 ++-- .../tests/unit_tests/test_baidu_vector.py | 18 ++-- .../tests/unit_tests/test_chroma_vector.py | 6 +- .../unit_tests/test_clickzetta_vector.py | 22 ++--- .../tests/unit_tests/test_couchbase_vector.py | 6 +- .../test_elasticsearch_ja_vector.py | 8 +- .../unit_tests/test_elasticsearch_vector.py | 6 +- .../tests/unit_tests/test_hologres_vector.py | 10 +- .../unit_tests/test_huawei_cloud_vector.py | 8 +- .../tests/unit_tests/test_iris_vector.py | 8 +- .../tests/unit_tests/test_lindorm_vector.py | 8 +- .../tests/unit_tests/test_matrixone_vector.py | 12 +-- .../tests/unit_tests/test_milvus.py | 8 +- .../tests/unit_tests/test_myscale_vector.py | 6 +- .../tests/unit_tests/test_oceanbase_vector.py | 12 +-- .../tests/unit_tests/test_opengauss.py | 20 ++-- .../tests/unit_tests/test_opensearch.py | 4 +- .../unit_tests/test_opensearch_vector.py | 10 +- .../tests/unit_tests/test_oraclevector.py | 18 ++-- .../tests/unit_tests/test_pgvecto_rs.py | 12 +-- .../tests/unit_tests/test_pgvector.py | 6 +- .../tests/unit_tests/test_qdrant_vector.py | 6 +- .../tests/unit_tests/test_relyt_vector.py | 18 ++-- .../unit_tests/test_tablestore_vector.py | 6 +- .../tests/unit_tests/test_tencent_vector.py | 6 +- .../tests/unit_tests/test_tidb_vector.py | 18 ++-- .../tests/unit_tests/test_upstash_vector.py | 6 +- .../tests/unit_tests/test_vastbase_vector.py | 10 +- .../tests/unit_tests/test_vikingdb_vector.py | 10 +- .../app/test_chat_message_permissions.py | 8 +- .../console/app/test_feedback_export_api.py | 28 ++++-- .../app/test_model_config_permissions.py | 5 +- .../test_datasource_manager_integration.py | 4 +- .../test_datasource_node_integration.py | 4 +- .../workflow/nodes/test_tool.py | 6 +- .../libs/test_rate_limiter_integration.py | 2 +- .../services/test_agent_service.py | 50 +++++----- .../services/test_app_dsl_service.py | 78 ++++++++++------ .../services/test_app_service.py | 5 +- .../services/test_conversation_service.py | 7 +- .../test_conversation_service_variables.py | 2 +- .../services/test_dataset_service_document.py | 71 ++++++++------ .../test_document_service_display_status.py | 7 +- .../services/test_end_user_service.py | 5 +- .../services/test_feature_service.py | 2 +- .../services/test_feedback_service.py | 2 +- .../test_human_input_delivery_test_service.py | 10 +- ...message_service_execution_extra_content.py | 3 +- .../test_create_segment_to_index_task.py | 6 +- .../tasks/test_dataset_indexing_task.py | 8 +- .../tasks/test_mail_account_deletion_task.py | 3 +- .../tasks/test_mail_email_code_login_task.py | 5 +- .../test_mail_human_input_delivery_task.py | 4 +- .../test_remove_app_and_related_data_task.py | 4 +- .../controllers/common/test_helpers.py | 4 +- .../controllers/common/test_schema.py | 2 +- .../datasets/test_datasets_segments.py | 2 +- .../console/datasets/test_hit_testing.py | 11 ++- .../console/datasets/test_metadata.py | 3 +- .../console/datasets/test_website.py | 15 +-- .../console/datasets/test_wraps.py | 9 +- .../controllers/console/test_admin.py | 39 ++++---- .../controllers/console/test_feature.py | 7 +- .../console/workspace/test_models.py | 32 +++---- .../inner_api/plugin/test_plugin_wraps.py | 6 +- .../service_api/app/test_conversation.py | 26 +++--- .../service_api/end_user/test_end_user.py | 7 +- .../output_parser/test_cot_output_parser.py | 3 +- .../core/agent/strategy/test_plugin.py | 5 +- .../core/agent/test_base_agent_runner.py | 93 ++++++++++--------- .../core/agent/test_cot_agent_runner.py | 41 ++++---- .../core/agent/test_cot_chat_agent_runner.py | 11 ++- .../agent/test_cot_completion_agent_runner.py | 20 ++-- .../core/agent/test_fc_agent_runner.py | 9 +- .../core/agent/test_plugin_entities.py | 17 ++-- .../common/test_parameters_mapping.py | 4 +- .../test_sensitive_word_avoidance_manager.py | 11 ++- .../easy_ui_based_app/test_agent_manager.py | 5 +- .../easy_ui_based_app/test_dataset_manager.py | 13 +-- .../test_model_config_converter.py | 19 ++-- .../test_model_config_manager.py | 25 +++-- .../test_prompt_template_manager.py | 27 +++--- .../test_variables_manager.py | 9 +- .../test_base_app_config_manager.py | 15 +-- .../test_workflow_ui_based_app_manager.py | 7 +- .../apps/advanced_chat/test_app_generator.py | 32 +++---- .../test_generate_task_pipeline_core.py | 8 +- .../test_agent_chat_app_config_manager.py | 13 +-- .../test_agent_chat_app_generator.py | 23 ++--- .../agent_chat/test_agent_chat_app_runner.py | 23 ++--- .../app/apps/completion/test_app_runner.py | 11 ++- .../test_completion_app_config_manager.py | 10 +- ...est_completion_completion_app_generator.py | 21 +++-- .../pipeline/test_pipeline_config_manager.py | 6 +- .../apps/pipeline/test_pipeline_generator.py | 33 +++---- .../pipeline/test_pipeline_queue_manager.py | 7 +- .../app/apps/pipeline/test_pipeline_runner.py | 9 +- .../apps/test_advanced_chat_app_generator.py | 4 +- .../core/app/apps/test_base_app_generator.py | 2 +- .../core/app/apps/test_base_app_runner.py | 26 +++--- .../apps/test_message_based_app_generator.py | 6 +- .../core/app/apps/test_pause_resume.py | 8 +- .../core/app/apps/test_streaming_utils.py | 4 +- .../app/apps/test_workflow_app_generator.py | 8 +- .../app/apps/test_workflow_app_runner_core.py | 8 +- .../apps/workflow/test_app_generator_extra.py | 4 +- .../test_generate_task_pipeline_core.py | 10 +- ...sy_ui_based_generate_task_pipeline_core.py | 26 +++--- .../core/app/workflow/test_node_factory.py | 20 ++-- .../test_observability_layer_extra.py | 14 +-- .../app/workflow/test_persistence_layer.py | 2 +- .../base/test_app_generator_tts_publisher.py | 15 +-- .../test_agent_tool_callback_handler.py | 48 ++++++---- .../test_index_tool_callback_handler.py | 25 +++-- .../test_workflow_tool_callback_handler.py | 21 +++-- .../datasource/test_datasource_manager.py | 35 +++---- .../test_api_based_extension_requestor.py | 15 +-- .../unit_tests/core/helper/test_creators.py | 2 +- .../core/ops/test_base_trace_instance.py | 2 +- .../core/ops/test_ops_trace_manager.py | 28 +++--- .../core/ops/test_trace_queue_manager.py | 2 +- .../core/plugin/impl/test_agent_client.py | 8 +- .../core/plugin/impl/test_asset_manager.py | 9 +- .../core/plugin/impl/test_base_client_impl.py | 17 ++-- .../plugin/impl/test_datasource_manager.py | 22 +++-- .../core/plugin/impl/test_debugging_client.py | 4 +- .../plugin/impl/test_endpoint_client_impl.py | 13 +-- .../core/plugin/impl/test_exc_impl.py | 8 +- .../core/plugin/impl/test_model_client.py | 61 ++++++------ .../core/plugin/impl/test_oauth_handler.py | 13 +-- .../core/plugin/impl/test_tool_manager.py | 12 ++- .../core/plugin/impl/test_trigger_client.py | 17 ++-- .../plugin/test_backwards_invocation_app.py | 33 +++---- .../core/plugin/test_plugin_entities.py | 3 +- .../prompt/test_extract_thread_messages.py | 6 +- .../core/prompt/test_prompt_transform.py | 2 +- .../datasource/keyword/jieba/test_jieba.py | 10 +- .../jieba/test_jieba_keyword_table_handler.py | 14 +-- .../keyword/test_keyword_factory.py | 4 +- .../datasource/test_datasource_retrieval.py | 14 ++- .../rag/datasource/vdb/test_vector_factory.py | 22 +++-- .../core/rag/extractor/test_csv_extractor.py | 8 +- .../rag/extractor/test_excel_extractor.py | 4 +- .../rag/extractor/test_extract_processor.py | 24 +++-- .../core/rag/extractor/test_helpers.py | 4 +- .../rag/extractor/test_markdown_extractor.py | 8 +- .../rag/extractor/test_notion_extractor.py | 12 +-- .../core/rag/extractor/test_pdf_extractor.py | 8 +- .../core/rag/extractor/test_text_extractor.py | 8 +- .../core/rag/extractor/test_word_extractor.py | 16 ++-- .../test_unstructured_extractors.py | 28 +++--- .../extractor/watercrawl/test_watercrawl.py | 32 +++---- .../unit_tests/core/telemetry/test_facade.py | 2 +- .../core/tools/test_builtin_tool_provider.py | 2 +- .../core/tools/test_builtin_tools_extra.py | 20 ++-- .../unit_tests/core/tools/test_custom_tool.py | 6 +- .../core/tools/test_tool_manager.py | 2 +- .../core/tools/utils/test_configuration.py | 6 +- .../tools/utils/test_message_transformer.py | 2 +- .../utils/test_system_oauth_encryption.py | 2 +- .../core/variables/test_segment_type.py | 2 +- .../nodes/datasource/test_datasource_node.py | 4 +- .../test_knowledge_index_node.py | 5 +- .../test_knowledge_retrieval_node.py | 3 +- .../core/workflow/nodes/llm/test_node.py | 4 +- .../core/workflow/test_node_factory.py | 36 +++---- .../core/workflow/test_workflow_entry.py | 2 +- .../workflow/test_workflow_entry_helpers.py | 8 +- .../extensions/test_ext_request_logging.py | 8 +- .../extensions/test_pubsub_channel.py | 6 +- .../redis/test_channel_unit_tests.py | 8 +- .../unit_tests/libs/test_archive_storage.py | 38 ++++---- api/tests/unit_tests/libs/test_pandas.py | 7 +- .../unit_tests/libs/test_rate_limiter.py | 6 +- api/tests/unit_tests/libs/test_token.py | 7 +- .../unit_tests/services/plugin/conftest.py | 2 +- .../test_rag_pipeline_task_proxy.py | 3 +- .../services/test_app_generate_service.py | 89 +++++++++--------- ..._generate_service_streaming_integration.py | 6 +- .../test_dataset_service_lock_not_owned.py | 12 +-- .../services/test_human_input_service.py | 17 +++- .../services/test_message_service.py | 6 +- .../test_model_load_balancing_service.py | 50 +++++----- .../services/test_model_provider_service.py | 30 +++--- .../services/test_trigger_provider_service.py | 26 +++--- .../services/test_webhook_service.py | 6 +- .../services/test_workflow_service.py | 28 +++--- .../test_workflow_draft_variable_service.py | 4 +- .../test_workflow_event_snapshot_service.py | 24 ++--- ...kflow_event_snapshot_service_additional.py | 20 ++-- .../tasks/test_workflow_execute_task.py | 4 +- 200 files changed, 1497 insertions(+), 1264 deletions(-) diff --git a/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace_utils.py b/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace_utils.py index 1b97746dea..0900dfda97 100644 --- a/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace_utils.py +++ b/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace_utils.py @@ -3,6 +3,7 @@ from collections.abc import Mapping from typing import Any, cast from unittest.mock import MagicMock +import pytest from dify_trace_aliyun.entities.semconv import ( GEN_AI_FRAMEWORK, GEN_AI_SESSION_ID, @@ -31,7 +32,7 @@ from graphon.enums import WorkflowNodeExecutionStatus from models import EndUser -def test_get_user_id_from_message_data_no_end_user(monkeypatch): +def test_get_user_id_from_message_data_no_end_user(monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.from_account_id = "account_id" message_data.from_end_user_id = None @@ -39,7 +40,7 @@ def test_get_user_id_from_message_data_no_end_user(monkeypatch): assert get_user_id_from_message_data(message_data) == "account_id" -def test_get_user_id_from_message_data_with_end_user(monkeypatch): +def test_get_user_id_from_message_data_with_end_user(monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.from_account_id = "account_id" message_data.from_end_user_id = "end_user_id" @@ -57,7 +58,7 @@ def test_get_user_id_from_message_data_with_end_user(monkeypatch): assert get_user_id_from_message_data(message_data) == "session_id" -def test_get_user_id_from_message_data_end_user_not_found(monkeypatch): +def test_get_user_id_from_message_data_end_user_not_found(monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.from_account_id = "account_id" message_data.from_end_user_id = "end_user_id" @@ -111,7 +112,7 @@ def test_get_workflow_node_status(): assert status.status_code == StatusCode.UNSET -def test_create_links_from_trace_id(monkeypatch): +def test_create_links_from_trace_id(monkeypatch: pytest.MonkeyPatch): # Mock create_link mock_link = MagicMock(spec=Link) import dify_trace_aliyun.data_exporter.traceclient diff --git a/api/providers/trace/trace-langfuse/tests/unit_tests/langfuse_trace/test_langfuse_trace.py b/api/providers/trace/trace-langfuse/tests/unit_tests/langfuse_trace/test_langfuse_trace.py index 952f10c34f..95e27c791f 100644 --- a/api/providers/trace/trace-langfuse/tests/unit_tests/langfuse_trace/test_langfuse_trace.py +++ b/api/providers/trace/trace-langfuse/tests/unit_tests/langfuse_trace/test_langfuse_trace.py @@ -40,7 +40,7 @@ def langfuse_config(): @pytest.fixture -def trace_instance(langfuse_config, monkeypatch): +def trace_instance(langfuse_config, monkeypatch: pytest.MonkeyPatch): # Mock Langfuse client to avoid network calls mock_client = MagicMock() monkeypatch.setattr("dify_trace_langfuse.langfuse_trace.Langfuse", lambda **kwargs: mock_client) @@ -49,7 +49,7 @@ def trace_instance(langfuse_config, monkeypatch): return instance -def test_init(langfuse_config, monkeypatch): +def test_init(langfuse_config, monkeypatch: pytest.MonkeyPatch): mock_langfuse = MagicMock() monkeypatch.setattr("dify_trace_langfuse.langfuse_trace.Langfuse", mock_langfuse) monkeypatch.setenv("FILES_URL", "http://test.url") @@ -64,7 +64,7 @@ def test_init(langfuse_config, monkeypatch): assert instance.file_base_url == "http://test.url" -def test_trace_dispatch(trace_instance, monkeypatch): +def test_trace_dispatch(trace_instance, monkeypatch: pytest.MonkeyPatch): methods = [ "workflow_trace", "message_trace", @@ -114,7 +114,7 @@ def test_trace_dispatch(trace_instance, monkeypatch): mocks["generate_name_trace"].assert_called_once_with(info) -def test_workflow_trace_with_message_id(trace_instance, monkeypatch): +def test_workflow_trace_with_message_id(trace_instance, monkeypatch: pytest.MonkeyPatch): # Setup trace info trace_info = WorkflowTraceInfo( workflow_id="wf-1", @@ -218,7 +218,7 @@ def test_workflow_trace_with_message_id(trace_instance, monkeypatch): assert other_span.level == LevelEnum.ERROR -def test_workflow_trace_no_message_id(trace_instance, monkeypatch): +def test_workflow_trace_no_message_id(trace_instance, monkeypatch: pytest.MonkeyPatch): trace_info = WorkflowTraceInfo( workflow_id="wf-1", tenant_id="tenant-1", @@ -259,7 +259,7 @@ def test_workflow_trace_no_message_id(trace_instance, monkeypatch): assert trace_data.name == TraceTaskName.WORKFLOW_TRACE -def test_workflow_trace_missing_app_id(trace_instance, monkeypatch): +def test_workflow_trace_missing_app_id(trace_instance, monkeypatch: pytest.MonkeyPatch): trace_info = WorkflowTraceInfo( workflow_id="wf-1", tenant_id="tenant-1", @@ -287,7 +287,7 @@ def test_workflow_trace_missing_app_id(trace_instance, monkeypatch): trace_instance.workflow_trace(trace_info) -def test_message_trace_basic(trace_instance, monkeypatch): +def test_message_trace_basic(trace_instance, monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.id = "msg-1" message_data.from_account_id = "acc-1" @@ -331,7 +331,7 @@ def test_message_trace_basic(trace_instance, monkeypatch): assert gen_data.usage.total == 30 -def test_message_trace_with_end_user(trace_instance, monkeypatch): +def test_message_trace_with_end_user(trace_instance, monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.id = "msg-1" message_data.from_account_id = "acc-1" @@ -636,7 +636,7 @@ def test_langfuse_trace_entity_with_list_dict_input(): assert data.input[0]["content"] == "hello" -def test_workflow_trace_handles_usage_extraction_error(trace_instance, monkeypatch, caplog): +def test_workflow_trace_handles_usage_extraction_error(trace_instance, monkeypatch: pytest.MonkeyPatch, caplog): # Setup trace info to trigger LLM node usage extraction trace_info = WorkflowTraceInfo( workflow_id="wf-1", diff --git a/api/providers/trace/trace-langsmith/tests/unit_tests/langsmith_trace/test_langsmith_trace.py b/api/providers/trace/trace-langsmith/tests/unit_tests/langsmith_trace/test_langsmith_trace.py index 45e5894e4a..ee59acb17e 100644 --- a/api/providers/trace/trace-langsmith/tests/unit_tests/langsmith_trace/test_langsmith_trace.py +++ b/api/providers/trace/trace-langsmith/tests/unit_tests/langsmith_trace/test_langsmith_trace.py @@ -35,7 +35,7 @@ def langsmith_config(): @pytest.fixture -def trace_instance(langsmith_config, monkeypatch): +def trace_instance(langsmith_config, monkeypatch: pytest.MonkeyPatch): # Mock LangSmith client mock_client = MagicMock() monkeypatch.setattr("dify_trace_langsmith.langsmith_trace.Client", lambda **kwargs: mock_client) @@ -44,7 +44,7 @@ def trace_instance(langsmith_config, monkeypatch): return instance -def test_init(langsmith_config, monkeypatch): +def test_init(langsmith_config, monkeypatch: pytest.MonkeyPatch): mock_client_class = MagicMock() monkeypatch.setattr("dify_trace_langsmith.langsmith_trace.Client", mock_client_class) monkeypatch.setenv("FILES_URL", "http://test.url") @@ -57,7 +57,7 @@ def test_init(langsmith_config, monkeypatch): assert instance.file_base_url == "http://test.url" -def test_trace_dispatch(trace_instance, monkeypatch): +def test_trace_dispatch(trace_instance, monkeypatch: pytest.MonkeyPatch): methods = [ "workflow_trace", "message_trace", @@ -107,7 +107,7 @@ def test_trace_dispatch(trace_instance, monkeypatch): mocks["generate_name_trace"].assert_called_once_with(info) -def test_workflow_trace(trace_instance, monkeypatch): +def test_workflow_trace(trace_instance, monkeypatch: pytest.MonkeyPatch): # Setup trace info workflow_data = MagicMock() workflow_data.created_at = _dt() @@ -223,7 +223,7 @@ def test_workflow_trace(trace_instance, monkeypatch): assert call_args[4].run_type == LangSmithRunType.retriever -def test_workflow_trace_no_start_time(trace_instance, monkeypatch): +def test_workflow_trace_no_start_time(trace_instance, monkeypatch: pytest.MonkeyPatch): workflow_data = MagicMock() workflow_data.created_at = _dt() workflow_data.finished_at = _dt() + timedelta(seconds=1) @@ -266,7 +266,7 @@ def test_workflow_trace_no_start_time(trace_instance, monkeypatch): assert trace_instance.add_run.called -def test_workflow_trace_missing_app_id(trace_instance, monkeypatch): +def test_workflow_trace_missing_app_id(trace_instance, monkeypatch: pytest.MonkeyPatch): trace_info = MagicMock(spec=WorkflowTraceInfo) trace_info.trace_id = "trace-1" trace_info.message_id = None @@ -290,7 +290,7 @@ def test_workflow_trace_missing_app_id(trace_instance, monkeypatch): trace_instance.workflow_trace(trace_info) -def test_message_trace(trace_instance, monkeypatch): +def test_message_trace(trace_instance, monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.id = "msg-1" message_data.from_account_id = "acc-1" @@ -516,7 +516,7 @@ def test_update_run_error(trace_instance): trace_instance.update_run(update_data) -def test_workflow_trace_usage_extraction_error(trace_instance, monkeypatch, caplog): +def test_workflow_trace_usage_extraction_error(trace_instance, monkeypatch: pytest.MonkeyPatch, caplog): workflow_data = MagicMock() workflow_data.created_at = _dt() workflow_data.finished_at = _dt() + timedelta(seconds=1) diff --git a/api/providers/trace/trace-mlflow/tests/unit_tests/mlflow_trace/test_mlflow_trace.py b/api/providers/trace/trace-mlflow/tests/unit_tests/mlflow_trace/test_mlflow_trace.py index 46c9750a5d..324f894b25 100644 --- a/api/providers/trace/trace-mlflow/tests/unit_tests/mlflow_trace/test_mlflow_trace.py +++ b/api/providers/trace/trace-mlflow/tests/unit_tests/mlflow_trace/test_mlflow_trace.py @@ -614,7 +614,7 @@ class TestMessageTrace: span.set_status.assert_called_once() span.add_event.assert_called_once() - def test_message_trace_with_file_data(self, trace_instance, mock_tracing, mock_db, monkeypatch): + def test_message_trace_with_file_data(self, trace_instance, mock_tracing, mock_db, monkeypatch: pytest.MonkeyPatch): span = MagicMock() mock_tracing["start"].return_value = span mock_tracing["set"].return_value = "token" diff --git a/api/providers/trace/trace-opik/tests/unit_tests/opik_trace/test_opik_trace.py b/api/providers/trace/trace-opik/tests/unit_tests/opik_trace/test_opik_trace.py index eefed3c78c..5daaa7132c 100644 --- a/api/providers/trace/trace-opik/tests/unit_tests/opik_trace/test_opik_trace.py +++ b/api/providers/trace/trace-opik/tests/unit_tests/opik_trace/test_opik_trace.py @@ -35,7 +35,7 @@ def opik_config(): @pytest.fixture -def trace_instance(opik_config, monkeypatch): +def trace_instance(opik_config, monkeypatch: pytest.MonkeyPatch): mock_client = MagicMock() monkeypatch.setattr("dify_trace_opik.opik_trace.Opik", lambda **kwargs: mock_client) @@ -65,7 +65,7 @@ def test_prepare_opik_uuid(): assert result is not None -def test_init(opik_config, monkeypatch): +def test_init(opik_config, monkeypatch: pytest.MonkeyPatch): mock_opik = MagicMock() monkeypatch.setattr("dify_trace_opik.opik_trace.Opik", mock_opik) monkeypatch.setenv("FILES_URL", "http://test.url") @@ -82,7 +82,7 @@ def test_init(opik_config, monkeypatch): assert instance.project == opik_config.project -def test_trace_dispatch(trace_instance, monkeypatch): +def test_trace_dispatch(trace_instance, monkeypatch: pytest.MonkeyPatch): methods = [ "workflow_trace", "message_trace", @@ -132,7 +132,7 @@ def test_trace_dispatch(trace_instance, monkeypatch): mocks["generate_name_trace"].assert_called_once_with(info) -def test_workflow_trace_with_message_id(trace_instance, monkeypatch): +def test_workflow_trace_with_message_id(trace_instance, monkeypatch: pytest.MonkeyPatch): # Define constants for better readability WORKFLOW_ID = "fb05c7cd-6cec-4add-8a84-df03a408b4ce" WORKFLOW_RUN_ID = "33c67568-7a8a-450e-8916-a5f135baeaef" @@ -221,7 +221,7 @@ def test_workflow_trace_with_message_id(trace_instance, monkeypatch): assert trace_instance.add_span.call_count >= 1 -def test_workflow_trace_no_message_id(trace_instance, monkeypatch): +def test_workflow_trace_no_message_id(trace_instance, monkeypatch: pytest.MonkeyPatch): # Define constants for better readability WORKFLOW_ID = "f0708b36-b1d7-42b3-a876-1d01b7d8f1a3" WORKFLOW_RUN_ID = "d42ec285-c2fd-4248-8866-5c9386b101ac" @@ -265,7 +265,7 @@ def test_workflow_trace_no_message_id(trace_instance, monkeypatch): trace_instance.add_trace.assert_called_once() -def test_workflow_trace_missing_app_id(trace_instance, monkeypatch): +def test_workflow_trace_missing_app_id(trace_instance, monkeypatch: pytest.MonkeyPatch): trace_info = WorkflowTraceInfo( workflow_id="5745f1b8-f8e6-4859-8110-996acb6c8d6a", tenant_id="tenant-1", @@ -293,7 +293,7 @@ def test_workflow_trace_missing_app_id(trace_instance, monkeypatch): trace_instance.workflow_trace(trace_info) -def test_message_trace_basic(trace_instance, monkeypatch): +def test_message_trace_basic(trace_instance, monkeypatch: pytest.MonkeyPatch): # Define constants for better readability MESSAGE_DATA_ID = "e3a26712-8cac-4a25-94a4-a3bff21ee3ab" CONVERSATION_ID = "9d3f3751-7521-4c19-9307-20e3cf6789a3" @@ -340,7 +340,7 @@ def test_message_trace_basic(trace_instance, monkeypatch): trace_instance.add_span.assert_called_once() -def test_message_trace_with_end_user(trace_instance, monkeypatch): +def test_message_trace_with_end_user(trace_instance, monkeypatch: pytest.MonkeyPatch): message_data = MagicMock() message_data.id = "85411059-79fb-4deb-a76c-c2e215f1b97e" message_data.from_account_id = "acc-1" @@ -614,7 +614,7 @@ def test_get_project_url_error(trace_instance): trace_instance.get_project_url() -def test_workflow_trace_usage_extraction_error_fixed(trace_instance, monkeypatch, caplog): +def test_workflow_trace_usage_extraction_error_fixed(trace_instance, monkeypatch: pytest.MonkeyPatch, caplog): trace_info = WorkflowTraceInfo( workflow_id="86a52565-4a6b-4a1b-9bfd-98e4595e70de", tenant_id="66e8e918-472e-4b69-8051-12502c34fc07", diff --git a/api/providers/trace/trace-weave/tests/unit_tests/weave_trace/test_weave_trace.py b/api/providers/trace/trace-weave/tests/unit_tests/weave_trace/test_weave_trace.py index 6028d0c550..30646815d8 100644 --- a/api/providers/trace/trace-weave/tests/unit_tests/weave_trace/test_weave_trace.py +++ b/api/providers/trace/trace-weave/tests/unit_tests/weave_trace/test_weave_trace.py @@ -267,14 +267,14 @@ class TestInit: with pytest.raises(ValueError, match="Weave login failed"): WeaveDataTrace(config) - def test_init_files_url_from_env(self, mock_wandb, mock_weave, monkeypatch): + def test_init_files_url_from_env(self, mock_wandb, mock_weave, monkeypatch: pytest.MonkeyPatch): """Test FILES_URL is read from environment.""" monkeypatch.setenv("FILES_URL", "http://files.example.com") config = _make_weave_config() instance = WeaveDataTrace(config) assert instance.file_base_url == "http://files.example.com" - def test_init_files_url_default(self, mock_wandb, mock_weave, monkeypatch): + def test_init_files_url_default(self, mock_wandb, mock_weave, monkeypatch: pytest.MonkeyPatch): """Test FILES_URL defaults to http://127.0.0.1:5001.""" monkeypatch.delenv("FILES_URL", raising=False) config = _make_weave_config() @@ -302,7 +302,7 @@ class TestGetProjectUrl: url = instance.get_project_url() assert url == "https://wandb.ai/my-project" - def test_get_project_url_exception_raises(self, trace_instance, monkeypatch): + def test_get_project_url_exception_raises(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Raises ValueError when exception occurs in get_project_url.""" monkeypatch.setattr(trace_instance, "entity", None) monkeypatch.setattr(trace_instance, "project_name", None) @@ -583,7 +583,7 @@ class TestFinishCall: class TestWorkflowTrace: - def _setup_repo(self, monkeypatch, nodes=None): + def _setup_repo(self, monkeypatch: pytest.MonkeyPatch, nodes=None): """Helper to patch session/repo dependencies.""" if nodes is None: nodes = [] @@ -599,7 +599,7 @@ class TestWorkflowTrace: monkeypatch.setattr("dify_trace_weave.weave_trace.db", MagicMock(engine="engine")) return repo - def test_workflow_trace_no_nodes_no_message_id(self, trace_instance, monkeypatch): + def test_workflow_trace_no_nodes_no_message_id(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Workflow trace with no nodes and no message_id.""" self._setup_repo(monkeypatch, nodes=[]) monkeypatch.setattr(trace_instance, "get_service_account_with_tenant", lambda app_id: MagicMock()) @@ -614,7 +614,7 @@ class TestWorkflowTrace: assert trace_instance.start_call.call_count == 1 assert trace_instance.finish_call.call_count == 1 - def test_workflow_trace_with_message_id(self, trace_instance, monkeypatch): + def test_workflow_trace_with_message_id(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Workflow trace with message_id creates both message and workflow runs.""" self._setup_repo(monkeypatch, nodes=[]) monkeypatch.setattr(trace_instance, "get_service_account_with_tenant", lambda app_id: MagicMock()) @@ -629,7 +629,7 @@ class TestWorkflowTrace: assert trace_instance.start_call.call_count == 2 assert trace_instance.finish_call.call_count == 2 - def test_workflow_trace_with_node_execution(self, trace_instance, monkeypatch): + def test_workflow_trace_with_node_execution(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Workflow trace iterates node executions and creates node runs.""" node = _make_node( id="node-1", @@ -652,7 +652,7 @@ class TestWorkflowTrace: # workflow run + node run = 2 calls assert trace_instance.start_call.call_count == 2 - def test_workflow_trace_with_llm_node(self, trace_instance, monkeypatch): + def test_workflow_trace_with_llm_node(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """LLM node uses process_data prompts as inputs.""" node = _make_node( node_type=BuiltinNodeTypes.LLM, @@ -680,7 +680,7 @@ class TestWorkflowTrace: # The key "messages" should be present (validator transforms the list) assert "messages" in node_run.inputs - def test_workflow_trace_with_non_llm_node_uses_inputs(self, trace_instance, monkeypatch): + def test_workflow_trace_with_non_llm_node_uses_inputs(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Non-LLM node uses node_execution.inputs directly.""" node = _make_node( node_type=BuiltinNodeTypes.TOOL, @@ -701,7 +701,7 @@ class TestWorkflowTrace: node_run = node_call_args[0][0] assert node_run.inputs.get("tool_input") == "val" - def test_workflow_trace_missing_app_id_raises(self, trace_instance, monkeypatch): + def test_workflow_trace_missing_app_id_raises(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Raises ValueError when app_id is missing from metadata.""" monkeypatch.setattr("dify_trace_weave.weave_trace.sessionmaker", lambda bind: MagicMock()) monkeypatch.setattr("dify_trace_weave.weave_trace.db", MagicMock(engine="engine")) @@ -714,7 +714,7 @@ class TestWorkflowTrace: with pytest.raises(ValueError, match="No app_id found in trace_info metadata"): trace_instance.workflow_trace(trace_info) - def test_workflow_trace_start_time_none_defaults_to_now(self, trace_instance, monkeypatch): + def test_workflow_trace_start_time_none_defaults_to_now(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """start_time defaults to datetime.now() when None.""" self._setup_repo(monkeypatch, nodes=[]) monkeypatch.setattr(trace_instance, "get_service_account_with_tenant", lambda app_id: MagicMock()) @@ -727,7 +727,7 @@ class TestWorkflowTrace: assert trace_instance.start_call.call_count == 1 - def test_workflow_trace_node_created_at_none(self, trace_instance, monkeypatch): + def test_workflow_trace_node_created_at_none(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Node with created_at=None uses datetime.now().""" node = _make_node(created_at=None, elapsed_time=0.5) self._setup_repo(monkeypatch, nodes=[node]) @@ -740,7 +740,7 @@ class TestWorkflowTrace: trace_instance.workflow_trace(trace_info) assert trace_instance.start_call.call_count == 2 - def test_workflow_trace_chat_mode_llm_node_adds_provider(self, trace_instance, monkeypatch): + def test_workflow_trace_chat_mode_llm_node_adds_provider(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Chat mode LLM node adds ls_provider and ls_model_name to attributes.""" node = _make_node( node_type=BuiltinNodeTypes.LLM, @@ -765,7 +765,7 @@ class TestWorkflowTrace: assert node_run.attributes.get("ls_provider") == "openai" assert node_run.attributes.get("ls_model_name") == "gpt-4" - def test_workflow_trace_nodes_sorted_by_created_at(self, trace_instance, monkeypatch): + def test_workflow_trace_nodes_sorted_by_created_at(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """Nodes are sorted by created_at before processing.""" node1 = _make_node(id="node-b", created_at=_dt() + timedelta(seconds=2)) node2 = _make_node(id="node-a", created_at=_dt()) @@ -799,7 +799,7 @@ class TestMessageTrace: trace_instance.message_trace(trace_info) trace_instance.start_call.assert_not_called() - def test_basic_message_trace(self, trace_instance, monkeypatch): + def test_basic_message_trace(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """message_trace creates message run and llm child run.""" monkeypatch.setattr( "dify_trace_weave.weave_trace.db.session.get", @@ -816,7 +816,7 @@ class TestMessageTrace: assert trace_instance.start_call.call_count == 2 assert trace_instance.finish_call.call_count == 2 - def test_message_trace_with_file_data(self, trace_instance, monkeypatch): + def test_message_trace_with_file_data(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """message_trace appends file URL to file_list.""" file_data = MagicMock() file_data.url = "path/to/file.png" @@ -839,7 +839,7 @@ class TestMessageTrace: message_run = trace_instance.start_call.call_args_list[0][0][0] assert "http://files.test/path/to/file.png" in message_run.file_list - def test_message_trace_with_end_user(self, trace_instance, monkeypatch): + def test_message_trace_with_end_user(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """message_trace looks up end user and sets end_user_id attribute.""" end_user = MagicMock() end_user.session_id = "session-xyz" @@ -862,7 +862,7 @@ class TestMessageTrace: message_run = trace_instance.start_call.call_args_list[0][0][0] assert message_run.attributes.get("end_user_id") == "session-xyz" - def test_message_trace_no_end_user(self, trace_instance, monkeypatch): + def test_message_trace_no_end_user(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """message_trace handles when from_end_user_id is None.""" mock_db = MagicMock() mock_db.session.get.return_value = None @@ -880,7 +880,7 @@ class TestMessageTrace: trace_instance.message_trace(trace_info) assert trace_instance.start_call.call_count == 2 - def test_message_trace_trace_id_fallback_to_message_id(self, trace_instance, monkeypatch): + def test_message_trace_trace_id_fallback_to_message_id(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """trace_id falls back to message_id when trace_id is None.""" mock_db = MagicMock() mock_db.session.get.return_value = None @@ -895,7 +895,7 @@ class TestMessageTrace: message_run = trace_instance.start_call.call_args_list[0][0][0] assert message_run.id == "msg-1" - def test_message_trace_file_list_none(self, trace_instance, monkeypatch): + def test_message_trace_file_list_none(self, trace_instance, monkeypatch: pytest.MonkeyPatch): """message_trace handles file_list=None gracefully.""" mock_db = MagicMock() mock_db.session.get.return_value = None diff --git a/api/providers/vdb/vdb-alibabacloud-mysql/tests/unit_tests/test_alibabacloud_mysql_factory.py b/api/providers/vdb/vdb-alibabacloud-mysql/tests/unit_tests/test_alibabacloud_mysql_factory.py index a907f918c3..37b2331f0f 100644 --- a/api/providers/vdb/vdb-alibabacloud-mysql/tests/unit_tests/test_alibabacloud_mysql_factory.py +++ b/api/providers/vdb/vdb-alibabacloud-mysql/tests/unit_tests/test_alibabacloud_mysql_factory.py @@ -20,7 +20,7 @@ def test_validate_distance_function_rejects_unsupported_values(): factory._validate_distance_function("dot_product") -def test_factory_init_vector_uses_existing_index_struct_class_prefix(monkeypatch): +def test_factory_init_vector_uses_existing_index_struct_class_prefix(monkeypatch: pytest.MonkeyPatch): factory = AlibabaCloudMySQLVectorFactory() dataset = SimpleNamespace( id="dataset-1", @@ -45,7 +45,7 @@ def test_factory_init_vector_uses_existing_index_struct_class_prefix(monkeypatch assert vector_cls.call_args.kwargs["collection_name"] == "existing_collection" -def test_factory_init_vector_generates_collection_name_when_index_struct_is_missing(monkeypatch): +def test_factory_init_vector_generates_collection_name_when_index_struct_is_missing(monkeypatch: pytest.MonkeyPatch): factory = AlibabaCloudMySQLVectorFactory() dataset = SimpleNamespace( id="dataset-2", diff --git a/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector.py b/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector.py index d1d471761d..2e8052b7dc 100644 --- a/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector.py +++ b/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector.py @@ -83,7 +83,7 @@ def test_get_type_is_analyticdb(): assert vector.get_type() == "analyticdb" -def test_factory_builds_openapi_config_when_host_is_missing(monkeypatch): +def test_factory_builds_openapi_config_when_host_is_missing(monkeypatch: pytest.MonkeyPatch): factory = AnalyticdbVectorFactory() dataset = SimpleNamespace(id="dataset-1", index_struct_dict=None, index_struct=None) @@ -109,7 +109,7 @@ def test_factory_builds_openapi_config_when_host_is_missing(monkeypatch): assert dataset.index_struct is not None -def test_factory_builds_sql_config_when_host_is_present(monkeypatch): +def test_factory_builds_sql_config_when_host_is_present(monkeypatch: pytest.MonkeyPatch): factory = AnalyticdbVectorFactory() dataset = SimpleNamespace( id="dataset-2", index_struct_dict={"vector_store": {"class_prefix": "EXISTING"}}, index_struct=None diff --git a/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_openapi.py b/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_openapi.py index d2d735ae3e..26bd385333 100644 --- a/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_openapi.py +++ b/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_openapi.py @@ -24,7 +24,7 @@ def _request_class(name: str): return _Request -def _install_openapi_stubs(monkeypatch): +def _install_openapi_stubs(monkeypatch: pytest.MonkeyPatch): gpdb_package = types.ModuleType("alibabacloud_gpdb20160503") gpdb_package.__path__ = [] gpdb_models = types.ModuleType("alibabacloud_gpdb20160503.models") @@ -130,7 +130,7 @@ def test_openapi_config_to_client_params(): assert params["read_timeout"] == 60000 -def test_init_creates_openapi_client_and_runs_initialize(monkeypatch): +def test_init_creates_openapi_client_and_runs_initialize(monkeypatch: pytest.MonkeyPatch): stubs = _install_openapi_stubs(monkeypatch) initialize_mock = MagicMock() monkeypatch.setattr(openapi_module.AnalyticdbVectorOpenAPI, "_initialize", initialize_mock) @@ -145,7 +145,7 @@ def test_init_creates_openapi_client_and_runs_initialize(monkeypatch): initialize_mock.assert_called_once_with() -def test_initialize_skips_when_cached(monkeypatch): +def test_initialize_skips_when_cached(monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -164,7 +164,7 @@ def test_initialize_skips_when_cached(monkeypatch): vector._create_namespace_if_not_exists.assert_not_called() -def test_initialize_runs_when_cache_is_missing(monkeypatch): +def test_initialize_runs_when_cache_is_missing(monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -184,7 +184,7 @@ def test_initialize_runs_when_cache_is_missing(monkeypatch): openapi_module.redis_client.set.assert_called_once() -def test_initialize_vector_database_calls_openapi_client(monkeypatch): +def test_initialize_vector_database_calls_openapi_client(monkeypatch: pytest.MonkeyPatch): _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector.config = _config() @@ -199,7 +199,7 @@ def test_initialize_vector_database_calls_openapi_client(monkeypatch): assert request.manager_account_password == "password" -def test_create_namespace_creates_when_namespace_not_found(monkeypatch): +def test_create_namespace_creates_when_namespace_not_found(monkeypatch: pytest.MonkeyPatch): stubs = _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector.config = _config() @@ -211,7 +211,7 @@ def test_create_namespace_creates_when_namespace_not_found(monkeypatch): vector._client.create_namespace.assert_called_once() -def test_create_namespace_raises_on_unexpected_api_error(monkeypatch): +def test_create_namespace_raises_on_unexpected_api_error(monkeypatch: pytest.MonkeyPatch): stubs = _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector.config = _config() @@ -222,7 +222,7 @@ def test_create_namespace_raises_on_unexpected_api_error(monkeypatch): vector._create_namespace_if_not_exists() -def test_create_namespace_noop_when_namespace_exists(monkeypatch): +def test_create_namespace_noop_when_namespace_exists(monkeypatch: pytest.MonkeyPatch): _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector.config = _config() @@ -234,7 +234,7 @@ def test_create_namespace_noop_when_namespace_exists(monkeypatch): vector._client.create_namespace.assert_not_called() -def test_create_collection_if_not_exists_creates_when_missing(monkeypatch): +def test_create_collection_if_not_exists_creates_when_missing(monkeypatch: pytest.MonkeyPatch): stubs = _install_openapi_stubs(monkeypatch) lock = MagicMock() lock.__enter__.return_value = None @@ -255,7 +255,7 @@ def test_create_collection_if_not_exists_creates_when_missing(monkeypatch): openapi_module.redis_client.set.assert_called_once() -def test_create_collection_if_not_exists_skips_when_cached(monkeypatch): +def test_create_collection_if_not_exists_skips_when_cached(monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -274,7 +274,7 @@ def test_create_collection_if_not_exists_skips_when_cached(monkeypatch): vector._client.create_collection.assert_not_called() -def test_create_collection_if_not_exists_raises_on_non_404_errors(monkeypatch): +def test_create_collection_if_not_exists_raises_on_non_404_errors(monkeypatch: pytest.MonkeyPatch): stubs = _install_openapi_stubs(monkeypatch) lock = MagicMock() lock.__enter__.return_value = None @@ -293,7 +293,7 @@ def test_create_collection_if_not_exists_raises_on_non_404_errors(monkeypatch): vector.create_collection_if_not_exists(embedding_dimension=512) -def test_openapi_add_delete_and_search_methods(monkeypatch): +def test_openapi_add_delete_and_search_methods(monkeypatch: pytest.MonkeyPatch): _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector._collection_name = "collection_1" @@ -348,7 +348,7 @@ def test_openapi_add_delete_and_search_methods(monkeypatch): assert docs_by_text[0].page_content == "high" -def test_text_exists_returns_false_when_matches_empty(monkeypatch): +def test_text_exists_returns_false_when_matches_empty(monkeypatch: pytest.MonkeyPatch): _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector._collection_name = "collection_1" @@ -361,7 +361,7 @@ def test_text_exists_returns_false_when_matches_empty(monkeypatch): assert vector.text_exists("missing-id") is False -def test_openapi_delete_success(monkeypatch): +def test_openapi_delete_success(monkeypatch: pytest.MonkeyPatch): _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector._collection_name = "collection_1" @@ -372,7 +372,7 @@ def test_openapi_delete_success(monkeypatch): vector._client.delete_collection.assert_called_once() -def test_openapi_delete_propagates_errors(monkeypatch): +def test_openapi_delete_propagates_errors(monkeypatch: pytest.MonkeyPatch): _install_openapi_stubs(monkeypatch) vector = AnalyticdbVectorOpenAPI.__new__(AnalyticdbVectorOpenAPI) vector._collection_name = "collection_1" diff --git a/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_sql.py b/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_sql.py index 49a2ae72d0..cd255b37cf 100644 --- a/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_sql.py +++ b/api/providers/vdb/vdb-analyticdb/tests/unit_tests/test_analyticdb_vector_sql.py @@ -53,7 +53,7 @@ def test_sql_config_rejects_min_connection_greater_than_max_connection(): AnalyticdbVectorBySqlConfig.model_validate(values) -def test_initialize_skips_when_cache_exists(monkeypatch): +def test_initialize_skips_when_cache_exists(monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -70,7 +70,7 @@ def test_initialize_skips_when_cache_exists(monkeypatch): vector._initialize_vector_database.assert_not_called() -def test_initialize_runs_when_cache_is_missing(monkeypatch): +def test_initialize_runs_when_cache_is_missing(monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -88,7 +88,7 @@ def test_initialize_runs_when_cache_is_missing(monkeypatch): sql_module.redis_client.set.assert_called_once() -def test_create_connection_pool_uses_psycopg2_pool(monkeypatch): +def test_create_connection_pool_uses_psycopg2_pool(monkeypatch: pytest.MonkeyPatch): vector = AnalyticdbVectorBySql.__new__(AnalyticdbVectorBySql) vector.config = AnalyticdbVectorBySqlConfig(**_config_values()) vector.databaseName = "knowledgebase" @@ -119,7 +119,7 @@ def test_get_cursor_context_manager_handles_connection_lifecycle(): pool.putconn.assert_called_once_with(connection) -def test_add_texts_inserts_only_documents_with_metadata(monkeypatch): +def test_add_texts_inserts_only_documents_with_metadata(monkeypatch: pytest.MonkeyPatch): vector = AnalyticdbVectorBySql.__new__(AnalyticdbVectorBySql) vector.table_name = "dify.collection" @@ -273,7 +273,7 @@ def test_delete_drops_table(): cursor.execute.assert_called_once() -def test_init_normalizes_collection_name_and_creates_pool_when_missing(monkeypatch): +def test_init_normalizes_collection_name_and_creates_pool_when_missing(monkeypatch: pytest.MonkeyPatch): config = AnalyticdbVectorBySqlConfig(**_config_values()) created_pool = MagicMock() @@ -288,7 +288,7 @@ def test_init_normalizes_collection_name_and_creates_pool_when_missing(monkeypat assert vector.pool is created_pool -def test_initialize_vector_database_handles_existing_database_and_search_config(monkeypatch): +def test_initialize_vector_database_handles_existing_database_and_search_config(monkeypatch: pytest.MonkeyPatch): vector = AnalyticdbVectorBySql.__new__(AnalyticdbVectorBySql) vector.config = AnalyticdbVectorBySqlConfig(**_config_values()) vector.databaseName = "knowledgebase" @@ -326,7 +326,7 @@ def test_initialize_vector_database_handles_existing_database_and_search_config( assert any("CREATE SCHEMA IF NOT EXISTS dify" in call.args[0] for call in worker_cursor.execute.call_args_list) -def test_initialize_vector_database_raises_runtime_error_when_zhparser_fails(monkeypatch): +def test_initialize_vector_database_raises_runtime_error_when_zhparser_fails(monkeypatch: pytest.MonkeyPatch): vector = AnalyticdbVectorBySql.__new__(AnalyticdbVectorBySql) vector.config = AnalyticdbVectorBySqlConfig(**_config_values()) vector.databaseName = "knowledgebase" @@ -353,7 +353,7 @@ def test_initialize_vector_database_raises_runtime_error_when_zhparser_fails(mon worker_connection.rollback.assert_called_once() -def test_create_collection_if_not_exists_creates_table_indexes_and_cache(monkeypatch): +def test_create_collection_if_not_exists_creates_table_indexes_and_cache(monkeypatch: pytest.MonkeyPatch): vector = AnalyticdbVectorBySql.__new__(AnalyticdbVectorBySql) vector.config = AnalyticdbVectorBySqlConfig(**_config_values()) vector._collection_name = "collection" @@ -381,7 +381,7 @@ def test_create_collection_if_not_exists_creates_table_indexes_and_cache(monkeyp sql_module.redis_client.set.assert_called_once() -def test_create_collection_if_not_exists_raises_for_non_existing_error(monkeypatch): +def test_create_collection_if_not_exists_raises_for_non_existing_error(monkeypatch: pytest.MonkeyPatch): vector = AnalyticdbVectorBySql.__new__(AnalyticdbVectorBySql) vector.config = AnalyticdbVectorBySqlConfig(**_config_values()) vector._collection_name = "collection" diff --git a/api/providers/vdb/vdb-baidu/tests/unit_tests/test_baidu_vector.py b/api/providers/vdb/vdb-baidu/tests/unit_tests/test_baidu_vector.py index 851c09f47a..f0dddee3b9 100644 --- a/api/providers/vdb/vdb-baidu/tests/unit_tests/test_baidu_vector.py +++ b/api/providers/vdb/vdb-baidu/tests/unit_tests/test_baidu_vector.py @@ -121,7 +121,7 @@ def _build_fake_pymochow_modules(): @pytest.fixture -def baidu_module(monkeypatch): +def baidu_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_pymochow_modules().items(): monkeypatch.setitem(sys.modules, name, module) import dify_vdb_baidu.baidu_vector as module @@ -254,7 +254,7 @@ def test_search_methods_delegate_to_database_table(baidu_module): assert vector._get_search_res.call_count == 2 -def test_factory_initializes_collection_name_and_index_struct(baidu_module, monkeypatch): +def test_factory_initializes_collection_name_and_index_struct(baidu_module, monkeypatch: pytest.MonkeyPatch): factory = baidu_module.BaiduVectorFactory() dataset = SimpleNamespace(id="dataset-1", index_struct_dict=None, index_struct=None) monkeypatch.setattr(baidu_module.Dataset, "gen_collection_name_by_id", lambda _id: "AUTO_COLLECTION") @@ -279,7 +279,7 @@ def test_factory_initializes_collection_name_and_index_struct(baidu_module, monk assert dataset.index_struct is not None -def test_init_get_type_to_index_struct_and_create_delegate(baidu_module, monkeypatch): +def test_init_get_type_to_index_struct_and_create_delegate(baidu_module, monkeypatch: pytest.MonkeyPatch): init_client = MagicMock(return_value="client") init_database = MagicMock(return_value="database") monkeypatch.setattr(baidu_module.BaiduVector, "_init_client", init_client) @@ -372,7 +372,7 @@ def test_get_search_result_handles_invalid_metadata_json(baidu_module): assert "document_id" not in docs[0].metadata -def test_init_client_constructs_configuration_and_client(baidu_module, monkeypatch): +def test_init_client_constructs_configuration_and_client(baidu_module, monkeypatch: pytest.MonkeyPatch): credentials = MagicMock(return_value="credentials") configuration = MagicMock(return_value="configuration") client_cls = MagicMock(return_value="client") @@ -411,7 +411,7 @@ def test_init_database_raises_for_unknown_create_database_error(baidu_module): vector._init_database() -def test_create_table_handles_cache_and_validation_paths(baidu_module, monkeypatch): +def test_create_table_handles_cache_and_validation_paths(baidu_module, monkeypatch: pytest.MonkeyPatch): vector = baidu_module.BaiduVector.__new__(baidu_module.BaiduVector) vector._collection_name = "collection_1" vector._client_config = SimpleNamespace( @@ -460,7 +460,7 @@ def test_create_table_handles_cache_and_validation_paths(baidu_module, monkeypat vector._wait_for_index_ready.assert_called_once_with(table, 3600) -def test_create_table_raises_for_invalid_index_or_metric(baidu_module, monkeypatch): +def test_create_table_raises_for_invalid_index_or_metric(baidu_module, monkeypatch: pytest.MonkeyPatch): vector = baidu_module.BaiduVector.__new__(baidu_module.BaiduVector) vector._collection_name = "collection_1" vector._db = MagicMock() @@ -493,7 +493,7 @@ def test_create_table_raises_for_invalid_index_or_metric(baidu_module, monkeypat vector._create_table(3) -def test_create_table_raises_timeout_if_table_never_becomes_normal(baidu_module, monkeypatch): +def test_create_table_raises_timeout_if_table_never_becomes_normal(baidu_module, monkeypatch: pytest.MonkeyPatch): vector = baidu_module.BaiduVector.__new__(baidu_module.BaiduVector) vector._collection_name = "collection_1" vector._client_config = SimpleNamespace( @@ -524,7 +524,9 @@ def test_create_table_raises_timeout_if_table_never_becomes_normal(baidu_module, vector._create_table(3) -def test_factory_uses_existing_collection_prefix_when_index_struct_exists(baidu_module, monkeypatch): +def test_factory_uses_existing_collection_prefix_when_index_struct_exists( + baidu_module, monkeypatch: pytest.MonkeyPatch +): factory = baidu_module.BaiduVectorFactory() dataset = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-chroma/tests/unit_tests/test_chroma_vector.py b/api/providers/vdb/vdb-chroma/tests/unit_tests/test_chroma_vector.py index b209c9df96..f18f9a6561 100644 --- a/api/providers/vdb/vdb-chroma/tests/unit_tests/test_chroma_vector.py +++ b/api/providers/vdb/vdb-chroma/tests/unit_tests/test_chroma_vector.py @@ -44,7 +44,7 @@ def _build_fake_chroma_modules(): @pytest.fixture -def chroma_module(monkeypatch): +def chroma_module(monkeypatch: pytest.MonkeyPatch): fake_chroma = _build_fake_chroma_modules() monkeypatch.setitem(sys.modules, "chromadb", fake_chroma) import dify_vdb_chroma.chroma_vector as module @@ -73,7 +73,7 @@ def test_chroma_config_to_params_builds_expected_payload(chroma_module): assert params["settings"].chroma_client_auth_credentials == "credentials" -def test_create_collection_uses_redis_lock_and_cache(chroma_module, monkeypatch): +def test_create_collection_uses_redis_lock_and_cache(chroma_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -173,7 +173,7 @@ def test_search_by_full_text_returns_empty_list(chroma_module): assert vector.search_by_full_text("query") == [] -def test_factory_init_vector_uses_existing_or_generated_collection(chroma_module, monkeypatch): +def test_factory_init_vector_uses_existing_or_generated_collection(chroma_module, monkeypatch: pytest.MonkeyPatch): factory = chroma_module.ChromaVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", index_struct_dict={"vector_store": {"class_prefix": "EXISTING"}}, index_struct=None diff --git a/api/providers/vdb/vdb-clickzetta/tests/unit_tests/test_clickzetta_vector.py b/api/providers/vdb/vdb-clickzetta/tests/unit_tests/test_clickzetta_vector.py index a7473f1b91..4f8395e475 100644 --- a/api/providers/vdb/vdb-clickzetta/tests/unit_tests/test_clickzetta_vector.py +++ b/api/providers/vdb/vdb-clickzetta/tests/unit_tests/test_clickzetta_vector.py @@ -45,7 +45,7 @@ def _build_fake_clickzetta_module(): @pytest.fixture -def clickzetta_module(monkeypatch): +def clickzetta_module(monkeypatch: pytest.MonkeyPatch): monkeypatch.setitem(sys.modules, "clickzetta", _build_fake_clickzetta_module()) import dify_vdb_clickzetta.clickzetta_vector as module @@ -218,7 +218,7 @@ def test_search_by_like_returns_documents_with_default_score(clickzetta_module): assert docs[0].metadata["score"] == 0.5 -def test_factory_initializes_clickzetta_vector(clickzetta_module, monkeypatch): +def test_factory_initializes_clickzetta_vector(clickzetta_module, monkeypatch: pytest.MonkeyPatch): factory = clickzetta_module.ClickzettaVectorFactory() dataset = SimpleNamespace(id="dataset-1") @@ -243,7 +243,7 @@ def test_factory_initializes_clickzetta_vector(clickzetta_module, monkeypatch): assert vector_cls.call_args.kwargs["collection_name"] == "collection" -def test_connection_pool_singleton_and_config_key(clickzetta_module, monkeypatch): +def test_connection_pool_singleton_and_config_key(clickzetta_module, monkeypatch: pytest.MonkeyPatch): clickzetta_module.ClickzettaConnectionPool._instance = None monkeypatch.setattr(clickzetta_module.ClickzettaConnectionPool, "_start_cleanup_thread", MagicMock()) @@ -255,7 +255,7 @@ def test_connection_pool_singleton_and_config_key(clickzetta_module, monkeypatch assert "username:instance:service:workspace:cluster:dify" in key -def test_connection_pool_create_connection_retries_and_configures(clickzetta_module, monkeypatch): +def test_connection_pool_create_connection_retries_and_configures(clickzetta_module, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(clickzetta_module.ClickzettaConnectionPool, "_start_cleanup_thread", MagicMock()) pool = clickzetta_module.ClickzettaConnectionPool() config = _config(clickzetta_module) @@ -274,7 +274,7 @@ def test_connection_pool_create_connection_retries_and_configures(clickzetta_mod pool._configure_connection.assert_called_once_with(connection) -def test_connection_pool_create_connection_raises_after_retries(clickzetta_module, monkeypatch): +def test_connection_pool_create_connection_raises_after_retries(clickzetta_module, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(clickzetta_module.ClickzettaConnectionPool, "_start_cleanup_thread", MagicMock()) pool = clickzetta_module.ClickzettaConnectionPool() config = _config(clickzetta_module) @@ -318,7 +318,7 @@ def test_connection_pool_configure_connection_swallows_errors(clickzetta_module) monkeypatch.undo() -def test_connection_pool_get_return_cleanup_and_shutdown(clickzetta_module, monkeypatch): +def test_connection_pool_get_return_cleanup_and_shutdown(clickzetta_module, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(clickzetta_module.ClickzettaConnectionPool, "_start_cleanup_thread", MagicMock()) pool = clickzetta_module.ClickzettaConnectionPool() config = _config(clickzetta_module) @@ -360,7 +360,7 @@ def test_connection_pool_get_return_cleanup_and_shutdown(clickzetta_module, monk assert pool._shutdown is True -def test_connection_pool_start_cleanup_thread_runs_worker_once(clickzetta_module, monkeypatch): +def test_connection_pool_start_cleanup_thread_runs_worker_once(clickzetta_module, monkeypatch: pytest.MonkeyPatch): pool = clickzetta_module.ClickzettaConnectionPool.__new__(clickzetta_module.ClickzettaConnectionPool) pool._shutdown = False pool._cleanup_expired_connections = MagicMock(side_effect=lambda: setattr(pool, "_shutdown", True)) @@ -384,7 +384,7 @@ def test_connection_pool_start_cleanup_thread_runs_worker_once(clickzetta_module pool._cleanup_expired_connections.assert_called_once() -def test_vector_init_connection_context_and_helpers(clickzetta_module, monkeypatch): +def test_vector_init_connection_context_and_helpers(clickzetta_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() pool.get_connection.return_value = "conn" monkeypatch.setattr(clickzetta_module.ClickzettaConnectionPool, "get_instance", MagicMock(return_value=pool)) @@ -405,7 +405,7 @@ def test_vector_init_connection_context_and_helpers(clickzetta_module, monkeypat assert vector._ensure_connection() == "conn" -def test_write_queue_initialization_worker_and_execute_write(clickzetta_module, monkeypatch): +def test_write_queue_initialization_worker_and_execute_write(clickzetta_module, monkeypatch: pytest.MonkeyPatch): class _Thread: def __init__(self, target, daemon): self.target = target @@ -579,7 +579,7 @@ def test_create_inverted_index_branches(clickzetta_module): vector._create_inverted_index(cursor) -def test_add_texts_batches_and_insert_batch_behaviors(clickzetta_module, monkeypatch): +def test_add_texts_batches_and_insert_batch_behaviors(clickzetta_module, monkeypatch: pytest.MonkeyPatch): vector = clickzetta_module.ClickzettaVector.__new__(clickzetta_module.ClickzettaVector) vector._config = _config(clickzetta_module) vector._config.batch_size = 2 @@ -811,7 +811,7 @@ def test_clickzetta_pool_cleanup_and_shutdown_edge_paths(clickzetta_module): assert pool._shutdown is True -def test_clickzetta_pool_cleanup_thread_and_worker_exception_paths(clickzetta_module, monkeypatch): +def test_clickzetta_pool_cleanup_thread_and_worker_exception_paths(clickzetta_module, monkeypatch: pytest.MonkeyPatch): pool = clickzetta_module.ClickzettaConnectionPool.__new__(clickzetta_module.ClickzettaConnectionPool) pool._shutdown = False diff --git a/api/providers/vdb/vdb-couchbase/tests/unit_tests/test_couchbase_vector.py b/api/providers/vdb/vdb-couchbase/tests/unit_tests/test_couchbase_vector.py index 7e5c40b8f2..d474b566d3 100644 --- a/api/providers/vdb/vdb-couchbase/tests/unit_tests/test_couchbase_vector.py +++ b/api/providers/vdb/vdb-couchbase/tests/unit_tests/test_couchbase_vector.py @@ -150,7 +150,7 @@ def _build_fake_couchbase_modules(): @pytest.fixture -def couchbase_module(monkeypatch): +def couchbase_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_couchbase_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -194,7 +194,7 @@ def test_init_sets_cluster_handles(couchbase_module): vector._cluster.wait_until_ready.assert_called_once() -def test_create_and_create_collection_branches(couchbase_module, monkeypatch): +def test_create_and_create_collection_branches(couchbase_module, monkeypatch: pytest.MonkeyPatch): vector = couchbase_module.CouchbaseVector.__new__(couchbase_module.CouchbaseVector) vector._collection_name = "collection_1" vector._client_config = _config(couchbase_module) @@ -319,7 +319,7 @@ def test_search_methods_and_format_metadata(couchbase_module): assert vector._format_metadata({"metadata.a": 1, "plain": 2}) == {"a": 1, "plain": 2} -def test_delete_collection_and_factory(couchbase_module, monkeypatch): +def test_delete_collection_and_factory(couchbase_module, monkeypatch: pytest.MonkeyPatch): vector = couchbase_module.CouchbaseVector("collection_1", _config(couchbase_module)) scopes = [ SimpleNamespace(collections=[SimpleNamespace(name="other")]), diff --git a/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_ja_vector.py b/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_ja_vector.py index f81ed6beea..91cc2e0fdb 100644 --- a/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_ja_vector.py +++ b/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_ja_vector.py @@ -28,7 +28,7 @@ def _build_fake_elasticsearch_modules(): @pytest.fixture -def elasticsearch_ja_module(monkeypatch): +def elasticsearch_ja_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_elasticsearch_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -39,7 +39,7 @@ def elasticsearch_ja_module(monkeypatch): return importlib.reload(ja_module) -def test_create_collection_cache_hit(elasticsearch_ja_module, monkeypatch): +def test_create_collection_cache_hit(elasticsearch_ja_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -57,7 +57,7 @@ def test_create_collection_cache_hit(elasticsearch_ja_module, monkeypatch): elasticsearch_ja_module.redis_client.set.assert_not_called() -def test_create_collection_create_and_exists_paths(elasticsearch_ja_module, monkeypatch): +def test_create_collection_create_and_exists_paths(elasticsearch_ja_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -87,7 +87,7 @@ def test_create_collection_create_and_exists_paths(elasticsearch_ja_module, monk elasticsearch_ja_module.redis_client.set.assert_called_once() -def test_ja_factory_uses_existing_or_generated_collection(elasticsearch_ja_module, monkeypatch): +def test_ja_factory_uses_existing_or_generated_collection(elasticsearch_ja_module, monkeypatch: pytest.MonkeyPatch): factory = elasticsearch_ja_module.ElasticSearchJaVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_vector.py b/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_vector.py index 48f1f6dc26..d54c105a0f 100644 --- a/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_vector.py +++ b/api/providers/vdb/vdb-elasticsearch/tests/unit_tests/test_elasticsearch_vector.py @@ -38,7 +38,7 @@ def _build_fake_elasticsearch_modules(): @pytest.fixture -def elasticsearch_module(monkeypatch): +def elasticsearch_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_elasticsearch_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -287,7 +287,7 @@ def test_search_by_vector_and_full_text(elasticsearch_module): assert "bool" in query -def test_create_and_create_collection_paths(elasticsearch_module, monkeypatch): +def test_create_and_create_collection_paths(elasticsearch_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -331,7 +331,7 @@ def test_create_and_create_collection_paths(elasticsearch_module, monkeypatch): elasticsearch_module.redis_client.set.assert_called_once() -def test_elasticsearch_factory_branches(elasticsearch_module, monkeypatch): +def test_elasticsearch_factory_branches(elasticsearch_module, monkeypatch: pytest.MonkeyPatch): factory = elasticsearch_module.ElasticSearchVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-hologres/tests/unit_tests/test_hologres_vector.py b/api/providers/vdb/vdb-hologres/tests/unit_tests/test_hologres_vector.py index f9a557ecce..8b197662e3 100644 --- a/api/providers/vdb/vdb-hologres/tests/unit_tests/test_hologres_vector.py +++ b/api/providers/vdb/vdb-hologres/tests/unit_tests/test_hologres_vector.py @@ -38,7 +38,7 @@ def _build_fake_hologres_modules(): @pytest.fixture -def hologres_module(monkeypatch): +def hologres_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_hologres_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -266,7 +266,7 @@ def test_delete_handles_existing_and_missing_tables(hologres_module): vector._client.drop_table.assert_called_once_with(vector.table_name) -def test_create_collection_returns_early_when_cache_hits(hologres_module, monkeypatch): +def test_create_collection_returns_early_when_cache_hits(hologres_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = False @@ -281,7 +281,7 @@ def test_create_collection_returns_early_when_cache_hits(hologres_module, monkey hologres_module.redis_client.set.assert_not_called() -def test_create_collection_creates_table_and_indexes(hologres_module, monkeypatch): +def test_create_collection_creates_table_and_indexes(hologres_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = False @@ -313,7 +313,7 @@ def test_create_collection_creates_table_and_indexes(hologres_module, monkeypatc hologres_module.redis_client.set.assert_called_once() -def test_create_collection_raises_when_table_never_becomes_ready(hologres_module, monkeypatch): +def test_create_collection_raises_when_table_never_becomes_ready(hologres_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = False @@ -331,7 +331,7 @@ def test_create_collection_raises_when_table_never_becomes_ready(hologres_module hologres_module.redis_client.set.assert_not_called() -def test_hologres_factory_uses_existing_or_generated_collection(hologres_module, monkeypatch): +def test_hologres_factory_uses_existing_or_generated_collection(hologres_module, monkeypatch: pytest.MonkeyPatch): factory = hologres_module.HologresVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-huawei-cloud/tests/unit_tests/test_huawei_cloud_vector.py b/api/providers/vdb/vdb-huawei-cloud/tests/unit_tests/test_huawei_cloud_vector.py index ba3f14912b..a1617b6d43 100644 --- a/api/providers/vdb/vdb-huawei-cloud/tests/unit_tests/test_huawei_cloud_vector.py +++ b/api/providers/vdb/vdb-huawei-cloud/tests/unit_tests/test_huawei_cloud_vector.py @@ -29,7 +29,7 @@ def _build_fake_elasticsearch_modules(): @pytest.fixture -def huawei_module(monkeypatch): +def huawei_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_elasticsearch_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -155,7 +155,7 @@ def test_search_by_vector_and_full_text(huawei_module): assert docs[0].page_content == "text-hit" -def test_search_by_vector_skips_hits_without_metadata(huawei_module, monkeypatch): +def test_search_by_vector_skips_hits_without_metadata(huawei_module, monkeypatch: pytest.MonkeyPatch): class FakeDocument: def __init__(self, page_content, vector, metadata): self.page_content = page_content @@ -185,7 +185,7 @@ def test_search_by_vector_skips_hits_without_metadata(huawei_module, monkeypatch assert docs == [] -def test_create_and_create_collection_paths(huawei_module, monkeypatch): +def test_create_and_create_collection_paths(huawei_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -218,7 +218,7 @@ def test_create_and_create_collection_paths(huawei_module, monkeypatch): huawei_module.redis_client.set.assert_called_once() -def test_huawei_factory_branches(huawei_module, monkeypatch): +def test_huawei_factory_branches(huawei_module, monkeypatch: pytest.MonkeyPatch): factory = huawei_module.HuaweiCloudVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-iris/tests/unit_tests/test_iris_vector.py b/api/providers/vdb/vdb-iris/tests/unit_tests/test_iris_vector.py index 8c038e82b9..b4ea6ea6c1 100644 --- a/api/providers/vdb/vdb-iris/tests/unit_tests/test_iris_vector.py +++ b/api/providers/vdb/vdb-iris/tests/unit_tests/test_iris_vector.py @@ -23,7 +23,7 @@ def _build_fake_iris_module(): @pytest.fixture -def iris_module(monkeypatch): +def iris_module(monkeypatch: pytest.MonkeyPatch): monkeypatch.setitem(sys.modules, "iris", _build_fake_iris_module()) import dify_vdb_iris.iris_vector as module @@ -249,7 +249,7 @@ def test_iris_vector_init_get_cursor_and_create(iris_module): vector._create_collection.assert_called_once_with(2) -def test_iris_vector_crud_and_vector_search(iris_module, monkeypatch): +def test_iris_vector_crud_and_vector_search(iris_module, monkeypatch: pytest.MonkeyPatch): with patch.object(iris_module, "get_iris_pool", return_value=MagicMock()): vector = iris_module.IrisVector("collection", _config(iris_module)) @@ -297,7 +297,7 @@ def test_iris_vector_crud_and_vector_search(iris_module, monkeypatch): assert docs[0].metadata["score"] == pytest.approx(0.9) -def test_iris_vector_full_text_search_paths(iris_module, monkeypatch): +def test_iris_vector_full_text_search_paths(iris_module, monkeypatch: pytest.MonkeyPatch): cfg = _config(iris_module, IRIS_TEXT_INDEX=True) with patch.object(iris_module, "get_iris_pool", return_value=MagicMock()): vector = iris_module.IrisVector("collection", cfg) @@ -344,7 +344,7 @@ def test_iris_vector_full_text_search_paths(iris_module, monkeypatch): assert vector_like.search_by_full_text("100%", top_k=1) == [] -def test_iris_vector_delete_create_collection_and_factory(iris_module, monkeypatch): +def test_iris_vector_delete_create_collection_and_factory(iris_module, monkeypatch: pytest.MonkeyPatch): with patch.object(iris_module, "get_iris_pool", return_value=MagicMock()): vector = iris_module.IrisVector("collection", _config(iris_module, IRIS_TEXT_INDEX=True)) diff --git a/api/providers/vdb/vdb-lindorm/tests/unit_tests/test_lindorm_vector.py b/api/providers/vdb/vdb-lindorm/tests/unit_tests/test_lindorm_vector.py index 238145c1d6..4a408d1b10 100644 --- a/api/providers/vdb/vdb-lindorm/tests/unit_tests/test_lindorm_vector.py +++ b/api/providers/vdb/vdb-lindorm/tests/unit_tests/test_lindorm_vector.py @@ -47,7 +47,7 @@ def _build_fake_opensearch_modules(): @pytest.fixture -def lindorm_module(monkeypatch): +def lindorm_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_opensearch_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -100,7 +100,7 @@ def test_to_opensearch_params_and_init(lindorm_module): assert vector_ugc._routing == "route" -def test_create_refresh_and_add_texts_success(lindorm_module, monkeypatch): +def test_create_refresh_and_add_texts_success(lindorm_module, monkeypatch: pytest.MonkeyPatch): vector = lindorm_module.LindormVectorStore( "collection", _config(lindorm_module), using_ugc=True, routing_value="route" ) @@ -301,7 +301,7 @@ def test_search_by_full_text_success_and_error(lindorm_module): vector.search_by_full_text("hello") -def test_create_collection_paths(lindorm_module, monkeypatch): +def test_create_collection_paths(lindorm_module, monkeypatch: pytest.MonkeyPatch): vector = lindorm_module.LindormVectorStore("collection", _config(lindorm_module), using_ugc=False) with pytest.raises(ValueError, match="cannot be empty"): @@ -331,7 +331,7 @@ def test_create_collection_paths(lindorm_module, monkeypatch): vector._client.indices.create.assert_not_called() -def test_lindorm_factory_branches(lindorm_module, monkeypatch): +def test_lindorm_factory_branches(lindorm_module, monkeypatch: pytest.MonkeyPatch): factory = lindorm_module.LindormVectorStoreFactory() monkeypatch.setattr(lindorm_module.dify_config, "LINDORM_URL", "http://localhost:9200") diff --git a/api/providers/vdb/vdb-matrixone/tests/unit_tests/test_matrixone_vector.py b/api/providers/vdb/vdb-matrixone/tests/unit_tests/test_matrixone_vector.py index c22f4304e5..762ec330b2 100644 --- a/api/providers/vdb/vdb-matrixone/tests/unit_tests/test_matrixone_vector.py +++ b/api/providers/vdb/vdb-matrixone/tests/unit_tests/test_matrixone_vector.py @@ -32,7 +32,7 @@ def _build_fake_mo_vector_modules(): @pytest.fixture -def matrixone_module(monkeypatch): +def matrixone_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_mo_vector_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -70,7 +70,7 @@ def test_matrixone_config_validation(matrixone_module, field, value, message): matrixone_module.MatrixoneConfig.model_validate(values) -def test_get_client_creates_full_text_index_when_cache_misses(matrixone_module, monkeypatch): +def test_get_client_creates_full_text_index_when_cache_misses(matrixone_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -86,7 +86,7 @@ def test_get_client_creates_full_text_index_when_cache_misses(matrixone_module, matrixone_module.redis_client.set.assert_called_once() -def test_get_client_skips_index_creation_when_cache_hits(matrixone_module, monkeypatch): +def test_get_client_skips_index_creation_when_cache_hits(matrixone_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -146,7 +146,7 @@ def test_get_type_and_create_delegate_to_add_texts(matrixone_module): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_get_client_handles_full_text_index_creation_error(matrixone_module, monkeypatch): +def test_get_client_handles_full_text_index_creation_error(matrixone_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -165,7 +165,7 @@ def test_get_client_handles_full_text_index_creation_error(matrixone_module, mon matrixone_module.redis_client.set.assert_not_called() -def test_add_texts_generates_ids_and_inserts(matrixone_module, monkeypatch): +def test_add_texts_generates_ids_and_inserts(matrixone_module, monkeypatch: pytest.MonkeyPatch): vector = matrixone_module.MatrixoneVector("collection_1", _valid_config(matrixone_module)) vector.client = MagicMock() monkeypatch.setattr(matrixone_module.uuid, "uuid4", lambda: "generated-uuid") @@ -224,7 +224,7 @@ def test_search_by_vector_builds_documents(matrixone_module): assert vector.client.query.call_args.kwargs["filter"] == {"document_id": {"$in": ["d-1"]}} -def test_matrixone_factory_uses_existing_or_generated_collection(matrixone_module, monkeypatch): +def test_matrixone_factory_uses_existing_or_generated_collection(matrixone_module, monkeypatch: pytest.MonkeyPatch): factory = matrixone_module.MatrixoneVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-milvus/tests/unit_tests/test_milvus.py b/api/providers/vdb/vdb-milvus/tests/unit_tests/test_milvus.py index 36c0ed8f6f..730ff9f296 100644 --- a/api/providers/vdb/vdb-milvus/tests/unit_tests/test_milvus.py +++ b/api/providers/vdb/vdb-milvus/tests/unit_tests/test_milvus.py @@ -99,7 +99,7 @@ def _build_fake_pymilvus_modules(): @pytest.fixture -def milvus_module(monkeypatch): +def milvus_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_pymilvus_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -327,7 +327,7 @@ def test_process_search_results_and_search_methods(milvus_module): assert "document_id" in vector._client.search.call_args.kwargs["filter"] -def test_create_collection_cache_and_existing_collection(milvus_module, monkeypatch): +def test_create_collection_cache_and_existing_collection(milvus_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -351,7 +351,7 @@ def test_create_collection_cache_and_existing_collection(milvus_module, monkeypa milvus_module.redis_client.set.assert_called() -def test_create_collection_builds_schema_and_indexes(milvus_module, monkeypatch): +def test_create_collection_builds_schema_and_indexes(milvus_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -385,7 +385,7 @@ def test_create_collection_builds_schema_and_indexes(milvus_module, monkeypatch) assert call_kwargs["consistency_level"] == "Session" -def test_factory_initializes_milvus_vector(milvus_module, monkeypatch): +def test_factory_initializes_milvus_vector(milvus_module, monkeypatch: pytest.MonkeyPatch): factory = milvus_module.MilvusVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-myscale/tests/unit_tests/test_myscale_vector.py b/api/providers/vdb/vdb-myscale/tests/unit_tests/test_myscale_vector.py index 228ea92639..900c75fdab 100644 --- a/api/providers/vdb/vdb-myscale/tests/unit_tests/test_myscale_vector.py +++ b/api/providers/vdb/vdb-myscale/tests/unit_tests/test_myscale_vector.py @@ -38,7 +38,7 @@ def _build_fake_clickhouse_connect_module(): @pytest.fixture -def myscale_module(monkeypatch): +def myscale_module(monkeypatch: pytest.MonkeyPatch): fake_module = _build_fake_clickhouse_connect_module() monkeypatch.setitem(sys.modules, "clickhouse_connect", fake_module) @@ -90,7 +90,7 @@ def test_delete_by_ids_short_circuits_on_empty_list(myscale_module): vector._client.command.assert_not_called() -def test_factory_initializes_lower_case_collection_name(myscale_module, monkeypatch): +def test_factory_initializes_lower_case_collection_name(myscale_module, monkeypatch: pytest.MonkeyPatch): factory = myscale_module.MyScaleVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", @@ -160,7 +160,7 @@ def test_create_collection_builds_expected_sql(myscale_module): assert "INDEX text_idx text TYPE fts('tokenizer=unicode')" in sql -def test_add_texts_inserts_rows_and_returns_ids(myscale_module, monkeypatch): +def test_add_texts_inserts_rows_and_returns_ids(myscale_module, monkeypatch: pytest.MonkeyPatch): vector = myscale_module.MyScaleVector("collection_1", _config(myscale_module)) monkeypatch.setattr(myscale_module.uuid, "uuid4", lambda: "generated-uuid") docs = [ diff --git a/api/providers/vdb/vdb-oceanbase/tests/unit_tests/test_oceanbase_vector.py b/api/providers/vdb/vdb-oceanbase/tests/unit_tests/test_oceanbase_vector.py index 31f9ff3e56..36393cc486 100644 --- a/api/providers/vdb/vdb-oceanbase/tests/unit_tests/test_oceanbase_vector.py +++ b/api/providers/vdb/vdb-oceanbase/tests/unit_tests/test_oceanbase_vector.py @@ -53,7 +53,7 @@ def _build_fake_pyobvector_module(): @pytest.fixture -def oceanbase_module(monkeypatch): +def oceanbase_module(monkeypatch: pytest.MonkeyPatch): monkeypatch.setitem(sys.modules, "pyobvector", _build_fake_pyobvector_module()) import dify_vdb_oceanbase.oceanbase_vector as module @@ -208,7 +208,7 @@ def test_create_delegates_to_collection_and_insert(oceanbase_module): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_create_collection_cache_and_existing_table_short_circuits(oceanbase_module, monkeypatch): +def test_create_collection_cache_and_existing_table_short_circuits(oceanbase_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -234,7 +234,7 @@ def test_create_collection_cache_and_existing_table_short_circuits(oceanbase_mod vector.delete.assert_not_called() -def test_create_collection_happy_path_with_hybrid_and_index(oceanbase_module, monkeypatch): +def test_create_collection_happy_path_with_hybrid_and_index(oceanbase_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -271,7 +271,7 @@ def test_create_collection_happy_path_with_hybrid_and_index(oceanbase_module, mo oceanbase_module.redis_client.set.assert_called_once() -def test_create_collection_error_paths(oceanbase_module, monkeypatch): +def test_create_collection_error_paths(oceanbase_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -308,7 +308,7 @@ def test_create_collection_error_paths(oceanbase_module, monkeypatch): vector._create_collection() -def test_create_collection_fulltext_and_metadata_index_exceptions(oceanbase_module, monkeypatch): +def test_create_collection_fulltext_and_metadata_index_exceptions(oceanbase_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -517,7 +517,7 @@ def test_delete_success_and_exception(oceanbase_module): vector.delete() -def test_oceanbase_factory_uses_existing_or_generated_collection(oceanbase_module, monkeypatch): +def test_oceanbase_factory_uses_existing_or_generated_collection(oceanbase_module, monkeypatch: pytest.MonkeyPatch): factory = oceanbase_module.OceanBaseVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-opengauss/tests/unit_tests/test_opengauss.py b/api/providers/vdb/vdb-opengauss/tests/unit_tests/test_opengauss.py index 09abd625fc..57c9b14d9f 100644 --- a/api/providers/vdb/vdb-opengauss/tests/unit_tests/test_opengauss.py +++ b/api/providers/vdb/vdb-opengauss/tests/unit_tests/test_opengauss.py @@ -37,7 +37,7 @@ def _build_fake_psycopg2_modules(): @pytest.fixture -def opengauss_module(monkeypatch): +def opengauss_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_psycopg2_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -88,7 +88,7 @@ def test_opengauss_config_validation_rejects_min_greater_than_max(opengauss_modu opengauss_module.OpenGaussConfig.model_validate(values) -def test_init_sets_table_name_and_vector_type(opengauss_module, monkeypatch): +def test_init_sets_table_name_and_vector_type(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) @@ -99,7 +99,7 @@ def test_init_sets_table_name_and_vector_type(opengauss_module, monkeypatch): assert vector.pool is pool -def test_create_index_with_pq_executes_pq_sql(opengauss_module, monkeypatch): +def test_create_index_with_pq_executes_pq_sql(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) @@ -126,7 +126,7 @@ def test_create_index_with_pq_executes_pq_sql(opengauss_module, monkeypatch): opengauss_module.redis_client.set.assert_called_once() -def test_create_index_skips_index_sql_for_large_dimension(opengauss_module, monkeypatch): +def test_create_index_skips_index_sql_for_large_dimension(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) @@ -158,7 +158,7 @@ def test_search_by_vector_validates_top_k(opengauss_module): vector.search_by_vector([0.1, 0.2], top_k=0) -def test_delete_by_ids_short_circuits_with_empty_input(opengauss_module, monkeypatch): +def test_delete_by_ids_short_circuits_with_empty_input(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) vector = opengauss_module.OpenGauss("collection_1", _config(opengauss_module)) @@ -200,7 +200,7 @@ def test_create_calls_collection_insert_and_index(opengauss_module): vector._create_index.assert_called_once_with(2) -def test_create_index_returns_early_on_cache_hit(opengauss_module, monkeypatch): +def test_create_index_returns_early_on_cache_hit(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) @@ -220,7 +220,7 @@ def test_create_index_returns_early_on_cache_hit(opengauss_module, monkeypatch): opengauss_module.redis_client.set.assert_not_called() -def test_create_index_without_pq_executes_standard_index_sql(opengauss_module, monkeypatch): +def test_create_index_without_pq_executes_standard_index_sql(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) @@ -245,7 +245,7 @@ def test_create_index_without_pq_executes_standard_index_sql(opengauss_module, m assert any("embedding_cosine_embedding_collection_1_idx" in query for query in sql) -def test_add_texts_uses_execute_values(opengauss_module, monkeypatch): +def test_add_texts_uses_execute_values(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) vector = opengauss_module.OpenGauss("collection_1", _config(opengauss_module)) @@ -342,7 +342,7 @@ def test_search_by_full_text_validates_top_k(opengauss_module): vector.search_by_full_text("query", top_k=0) -def test_create_collection_cache_and_create_path(opengauss_module, monkeypatch): +def test_create_collection_cache_and_create_path(opengauss_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(opengauss_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) lock = MagicMock() @@ -370,7 +370,7 @@ def test_create_collection_cache_and_create_path(opengauss_module, monkeypatch): opengauss_module.redis_client.set.assert_called_once() -def test_opengauss_factory_uses_existing_or_generated_collection(opengauss_module, monkeypatch): +def test_opengauss_factory_uses_existing_or_generated_collection(opengauss_module, monkeypatch: pytest.MonkeyPatch): factory = opengauss_module.OpenGaussFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch.py b/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch.py index f2ed7cb6fb..b2b004a4de 100644 --- a/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch.py +++ b/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch.py @@ -59,7 +59,7 @@ def _build_fake_opensearch_modules(): @pytest.fixture -def opensearch_module(monkeypatch): +def opensearch_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_opensearch_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -95,7 +95,7 @@ class TestOpenSearchConfig: assert params["connection_class"].__name__ == "Urllib3HttpConnection" assert params["http_auth"] == ("admin", "password") - def test_to_opensearch_params_with_aws_managed_iam(self, opensearch_module, monkeypatch): + def test_to_opensearch_params_with_aws_managed_iam(self, opensearch_module, monkeypatch: pytest.MonkeyPatch): class _Session: def get_credentials(self): return "creds" diff --git a/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch_vector.py b/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch_vector.py index 1c2921f85b..80bf20e820 100644 --- a/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch_vector.py +++ b/api/providers/vdb/vdb-opensearch/tests/unit_tests/test_opensearch_vector.py @@ -58,7 +58,7 @@ def _build_fake_opensearch_modules(): @pytest.fixture -def opensearch_module(monkeypatch): +def opensearch_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_opensearch_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -116,7 +116,7 @@ def test_config_validation_for_aws_auth_and_https_fields(opensearch_module): opensearch_module.OpenSearchConfig.model_validate(values) -def test_create_aws_managed_iam_auth(opensearch_module, monkeypatch): +def test_create_aws_managed_iam_auth(opensearch_module, monkeypatch: pytest.MonkeyPatch): class _Session: def get_credentials(self): return "creds" @@ -167,7 +167,7 @@ def test_init_and_create_delegate_calls(opensearch_module): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_add_texts_supports_regular_and_aoss_clients(opensearch_module, monkeypatch): +def test_add_texts_supports_regular_and_aoss_clients(opensearch_module, monkeypatch: pytest.MonkeyPatch): vector = opensearch_module.OpenSearchVector("Collection_1", _config(opensearch_module, aws_service="es")) docs = [ Document(page_content="a", metadata={"doc_id": "1"}), @@ -308,7 +308,7 @@ def test_search_by_full_text_and_filters(opensearch_module): assert query["query"]["bool"]["filter"] == [{"terms": {"metadata.document_id": ["d-1"]}}] -def test_create_collection_cache_and_create_path(opensearch_module, monkeypatch): +def test_create_collection_cache_and_create_path(opensearch_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -331,7 +331,7 @@ def test_create_collection_cache_and_create_path(opensearch_module, monkeypatch) opensearch_module.redis_client.set.assert_called() -def test_opensearch_factory_initializes_expected_collection_name(opensearch_module, monkeypatch): +def test_opensearch_factory_initializes_expected_collection_name(opensearch_module, monkeypatch: pytest.MonkeyPatch): factory = opensearch_module.OpenSearchVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-oracle/tests/unit_tests/test_oraclevector.py b/api/providers/vdb/vdb-oracle/tests/unit_tests/test_oraclevector.py index 678cf876b0..46027c7e44 100644 --- a/api/providers/vdb/vdb-oracle/tests/unit_tests/test_oraclevector.py +++ b/api/providers/vdb/vdb-oracle/tests/unit_tests/test_oraclevector.py @@ -51,7 +51,7 @@ def _connection_with_cursor(cursor): @pytest.fixture -def oracle_module(monkeypatch): +def oracle_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_oracle_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -94,7 +94,7 @@ def test_oracle_config_validation_autonomous_requirements(oracle_module): ) -def test_init_and_get_type(oracle_module, monkeypatch): +def test_init_and_get_type(oracle_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(oracle_module.oracledb, "create_pool", MagicMock(return_value=pool)) vector = oracle_module.OracleVector("collection_1", _config(oracle_module)) @@ -139,7 +139,7 @@ def test_numpy_converters_and_type_handlers(oracle_module): assert out_float64.dtype == numpy.float64 -def test_get_connection_supports_standard_and_autonomous_paths(oracle_module, monkeypatch): +def test_get_connection_supports_standard_and_autonomous_paths(oracle_module, monkeypatch: pytest.MonkeyPatch): connect = MagicMock(return_value="connection") monkeypatch.setattr(oracle_module.oracledb, "connect", connect) @@ -173,7 +173,7 @@ def test_create_delegates_collection_and_insert(oracle_module): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_add_texts_inserts_and_logs_on_failures(oracle_module, monkeypatch): +def test_add_texts_inserts_and_logs_on_failures(oracle_module, monkeypatch: pytest.MonkeyPatch): vector = oracle_module.OracleVector.__new__(oracle_module.OracleVector) vector.table_name = "embedding_collection_1" vector.input_type_handler = MagicMock() @@ -279,7 +279,7 @@ def _fake_nltk_module(*, missing_data=False): return nltk, nltk_corpus -def test_search_by_full_text_chinese_and_english_paths(oracle_module, monkeypatch): +def test_search_by_full_text_chinese_and_english_paths(oracle_module, monkeypatch: pytest.MonkeyPatch): vector = oracle_module.OracleVector.__new__(oracle_module.OracleVector) vector.table_name = "embedding_collection_1" @@ -305,7 +305,7 @@ def test_search_by_full_text_chinese_and_english_paths(oracle_module, monkeypatc assert "doc_id_0" in en_params -def test_search_by_full_text_empty_query_and_missing_nltk(oracle_module, monkeypatch): +def test_search_by_full_text_empty_query_and_missing_nltk(oracle_module, monkeypatch: pytest.MonkeyPatch): vector = oracle_module.OracleVector.__new__(oracle_module.OracleVector) vector.table_name = "embedding_collection_1" vector._get_connection = MagicMock() @@ -320,7 +320,7 @@ def test_search_by_full_text_empty_query_and_missing_nltk(oracle_module, monkeyp vector.search_by_full_text("english query") -def test_create_collection_cache_and_execute_path(oracle_module, monkeypatch): +def test_create_collection_cache_and_execute_path(oracle_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -346,7 +346,9 @@ def test_create_collection_cache_and_execute_path(oracle_module, monkeypatch): oracle_module.redis_client.set.assert_called_once() -def test_oracle_factory_init_vector_uses_existing_or_generated_collection(oracle_module, monkeypatch): +def test_oracle_factory_init_vector_uses_existing_or_generated_collection( + oracle_module, monkeypatch: pytest.MonkeyPatch +): factory = oracle_module.OracleVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-pgvecto-rs/tests/unit_tests/test_pgvecto_rs.py b/api/providers/vdb/vdb-pgvecto-rs/tests/unit_tests/test_pgvecto_rs.py index c3291f7f12..1841e88139 100644 --- a/api/providers/vdb/vdb-pgvecto-rs/tests/unit_tests/test_pgvecto_rs.py +++ b/api/providers/vdb/vdb-pgvecto-rs/tests/unit_tests/test_pgvecto_rs.py @@ -79,7 +79,7 @@ def _patch_both(monkeypatch, module, calls, execute_results=None): @pytest.fixture -def pgvecto_module(monkeypatch): +def pgvecto_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_pgvecto_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -126,7 +126,7 @@ def test_collection_base_has_expected_annotations(pgvecto_module): assert {"id", "text", "meta", "vector"} <= set(annotations) -def test_init_get_type_and_create_delegate(pgvecto_module, monkeypatch): +def test_init_get_type_and_create_delegate(pgvecto_module, monkeypatch: pytest.MonkeyPatch): module, _ = pgvecto_module session_calls = [] monkeypatch.setattr(module, "create_engine", MagicMock(return_value="engine")) @@ -145,7 +145,7 @@ def test_init_get_type_and_create_delegate(pgvecto_module, monkeypatch): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_create_collection_cache_and_sql_execution(pgvecto_module, monkeypatch): +def test_create_collection_cache_and_sql_execution(pgvecto_module, monkeypatch: pytest.MonkeyPatch): module, _ = pgvecto_module session_calls = [] monkeypatch.setattr(module, "create_engine", MagicMock(return_value="engine")) @@ -169,7 +169,7 @@ def test_create_collection_cache_and_sql_execution(pgvecto_module, monkeypatch): module.redis_client.set.assert_called() -def test_add_texts_get_ids_and_delete_methods(pgvecto_module, monkeypatch): +def test_add_texts_get_ids_and_delete_methods(pgvecto_module, monkeypatch: pytest.MonkeyPatch): module, _ = pgvecto_module init_calls = [] runtime_calls = [] @@ -241,7 +241,7 @@ def test_add_texts_get_ids_and_delete_methods(pgvecto_module, monkeypatch): assert any("DROP TABLE IF EXISTS collection_1" in str(args[0]) for args, _ in runtime_calls) -def test_text_exists_search_and_full_text(pgvecto_module, monkeypatch): +def test_text_exists_search_and_full_text(pgvecto_module, monkeypatch: pytest.MonkeyPatch): module, _ = pgvecto_module init_calls = [] monkeypatch.setattr(module, "create_engine", MagicMock(return_value="engine")) @@ -313,7 +313,7 @@ def test_text_exists_search_and_full_text(pgvecto_module, monkeypatch): assert vector.search_by_full_text("hello") == [] -def test_factory_uses_existing_or_generated_collection(pgvecto_module, monkeypatch): +def test_factory_uses_existing_or_generated_collection(pgvecto_module, monkeypatch: pytest.MonkeyPatch): module, _ = pgvecto_module factory = module.PGVectoRSFactory() dataset_with_index = SimpleNamespace( diff --git a/api/providers/vdb/vdb-pgvector/tests/unit_tests/test_pgvector.py b/api/providers/vdb/vdb-pgvector/tests/unit_tests/test_pgvector.py index 99a6e00c16..38e472df63 100644 --- a/api/providers/vdb/vdb-pgvector/tests/unit_tests/test_pgvector.py +++ b/api/providers/vdb/vdb-pgvector/tests/unit_tests/test_pgvector.py @@ -336,7 +336,7 @@ def test_create_delegates_collection_creation_and_insert(): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_add_texts_uses_execute_values_and_returns_ids(monkeypatch): +def test_add_texts_uses_execute_values_and_returns_ids(monkeypatch: pytest.MonkeyPatch): vector = PGVector.__new__(PGVector) vector.table_name = "embedding_collection_1" @@ -387,7 +387,7 @@ def test_text_get_and_delete_methods(): assert any("DROP TABLE IF EXISTS embedding_collection_1" in sql for sql in executed_sql) -def test_delete_by_ids_handles_empty_undefined_table_and_generic_exception(monkeypatch): +def test_delete_by_ids_handles_empty_undefined_table_and_generic_exception(monkeypatch: pytest.MonkeyPatch): vector = PGVector.__new__(PGVector) vector.table_name = "embedding_collection_1" cursor = MagicMock() @@ -464,7 +464,7 @@ def test_search_by_full_text_branches_for_bigm_and_standard(): assert "bigm_similarity" in cursor.execute.call_args_list[1].args[0] -def test_pgvector_factory_initializes_expected_collection_name(monkeypatch): +def test_pgvector_factory_initializes_expected_collection_name(monkeypatch: pytest.MonkeyPatch): factory = pgvector_module.PGVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-qdrant/tests/unit_tests/test_qdrant_vector.py b/api/providers/vdb/vdb-qdrant/tests/unit_tests/test_qdrant_vector.py index 0ed5491fbe..89ee0a47f1 100644 --- a/api/providers/vdb/vdb-qdrant/tests/unit_tests/test_qdrant_vector.py +++ b/api/providers/vdb/vdb-qdrant/tests/unit_tests/test_qdrant_vector.py @@ -121,7 +121,7 @@ def _build_fake_qdrant_modules(): @pytest.fixture -def qdrant_module(monkeypatch): +def qdrant_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_qdrant_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -170,7 +170,7 @@ def test_init_and_basic_behaviour(qdrant_module): vector.add_texts.assert_called_once() -def test_create_collection_and_add_texts(qdrant_module, monkeypatch): +def test_create_collection_and_add_texts(qdrant_module, monkeypatch: pytest.MonkeyPatch): vector = qdrant_module.QdrantVector("collection_1", "group-1", _config(qdrant_module)) lock = MagicMock() lock.__enter__.return_value = None @@ -288,7 +288,7 @@ def test_search_and_helper_methods(qdrant_module): assert doc.page_content == "doc" -def test_qdrant_factory_paths(qdrant_module, monkeypatch): +def test_qdrant_factory_paths(qdrant_module, monkeypatch: pytest.MonkeyPatch): factory = qdrant_module.QdrantVectorFactory() dataset = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-relyt/tests/unit_tests/test_relyt_vector.py b/api/providers/vdb/vdb-relyt/tests/unit_tests/test_relyt_vector.py index f97ad1400a..c5f3a9f847 100644 --- a/api/providers/vdb/vdb-relyt/tests/unit_tests/test_relyt_vector.py +++ b/api/providers/vdb/vdb-relyt/tests/unit_tests/test_relyt_vector.py @@ -59,7 +59,7 @@ def _patch_both(monkeypatch, module, session): @pytest.fixture -def relyt_module(monkeypatch): +def relyt_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_relyt_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -97,7 +97,7 @@ def test_relyt_config_validation(relyt_module, field, value, message): relyt_module.RelytConfig.model_validate(values) -def test_init_get_type_and_create_delegate(relyt_module, monkeypatch): +def test_init_get_type_and_create_delegate(relyt_module, monkeypatch: pytest.MonkeyPatch): engine = MagicMock() monkeypatch.setattr(relyt_module, "create_engine", MagicMock(return_value=engine)) vector = relyt_module.RelytVector("collection_1", _config(relyt_module), group_id="group-1") @@ -114,7 +114,7 @@ def test_init_get_type_and_create_delegate(relyt_module, monkeypatch): vector.add_texts.assert_called_once_with(docs, [[0.1, 0.2]]) -def test_create_collection_cache_and_sql_execution(relyt_module, monkeypatch): +def test_create_collection_cache_and_sql_execution(relyt_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -142,7 +142,7 @@ def test_create_collection_cache_and_sql_execution(relyt_module, monkeypatch): relyt_module.redis_client.set.assert_called_once() -def test_add_texts_and_metadata_queries(relyt_module, monkeypatch): +def test_add_texts_and_metadata_queries(relyt_module, monkeypatch: pytest.MonkeyPatch): vector = relyt_module.RelytVector.__new__(relyt_module.RelytVector) vector._collection_name = "collection_1" vector._group_id = "group-1" @@ -212,7 +212,7 @@ def test_delete_by_metadata_field_calls_delete_by_uuids(relyt_module): # 3. delete_by_ids translates to uuids -def test_delete_by_ids_translates_to_uuids(relyt_module, monkeypatch): +def test_delete_by_ids_translates_to_uuids(relyt_module, monkeypatch: pytest.MonkeyPatch): vector = relyt_module.RelytVector.__new__(relyt_module.RelytVector) vector._collection_name = "collection_1" vector.client = MagicMock() @@ -225,7 +225,7 @@ def test_delete_by_ids_translates_to_uuids(relyt_module, monkeypatch): # 4. text_exists True -def test_text_exists_true(relyt_module, monkeypatch): +def test_text_exists_true(relyt_module, monkeypatch: pytest.MonkeyPatch): vector = relyt_module.RelytVector.__new__(relyt_module.RelytVector) vector._collection_name = "collection_1" vector.client = MagicMock() @@ -236,7 +236,7 @@ def test_text_exists_true(relyt_module, monkeypatch): # 5. text_exists False -def test_text_exists_false(relyt_module, monkeypatch): +def test_text_exists_false(relyt_module, monkeypatch: pytest.MonkeyPatch): vector = relyt_module.RelytVector.__new__(relyt_module.RelytVector) vector._collection_name = "collection_1" vector.client = MagicMock() @@ -284,7 +284,7 @@ def test_search_by_vector_filters_by_score_and_ids(relyt_module): # 8. delete commits session -def test_delete_drops_table(relyt_module, monkeypatch): +def test_delete_drops_table(relyt_module, monkeypatch: pytest.MonkeyPatch): vector = relyt_module.RelytVector.__new__(relyt_module.RelytVector) vector._collection_name = "collection_1" vector.client = MagicMock() @@ -295,7 +295,7 @@ def test_delete_drops_table(relyt_module, monkeypatch): session.execute.assert_called_once() -def test_relyt_factory_existing_and_generated_collection(relyt_module, monkeypatch): +def test_relyt_factory_existing_and_generated_collection(relyt_module, monkeypatch: pytest.MonkeyPatch): factory = relyt_module.RelytVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-tablestore/tests/unit_tests/test_tablestore_vector.py b/api/providers/vdb/vdb-tablestore/tests/unit_tests/test_tablestore_vector.py index 62a11e0445..49d4b160cf 100644 --- a/api/providers/vdb/vdb-tablestore/tests/unit_tests/test_tablestore_vector.py +++ b/api/providers/vdb/vdb-tablestore/tests/unit_tests/test_tablestore_vector.py @@ -77,7 +77,7 @@ def _build_fake_tablestore_module(): @pytest.fixture -def tablestore_module(monkeypatch): +def tablestore_module(monkeypatch: pytest.MonkeyPatch): fake_module = _build_fake_tablestore_module() monkeypatch.setitem(sys.modules, "tablestore", fake_module) @@ -177,7 +177,7 @@ def test_get_by_ids_text_exists_delete_and_wrappers(tablestore_module): vector._delete_table_if_exist.assert_called_once() -def test_create_collection_and_table_index_lifecycle(tablestore_module, monkeypatch): +def test_create_collection_and_table_index_lifecycle(tablestore_module, monkeypatch: pytest.MonkeyPatch): vector = tablestore_module.TableStoreVector("collection_1", _config(tablestore_module)) lock = MagicMock() lock.__enter__.return_value = None @@ -289,7 +289,7 @@ def test_write_row_and_search_helpers(tablestore_module): assert "score" not in docs[0].metadata -def test_tablestore_factory_uses_existing_or_generated_collection(tablestore_module, monkeypatch): +def test_tablestore_factory_uses_existing_or_generated_collection(tablestore_module, monkeypatch: pytest.MonkeyPatch): factory = tablestore_module.TableStoreVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-tencent/tests/unit_tests/test_tencent_vector.py b/api/providers/vdb/vdb-tencent/tests/unit_tests/test_tencent_vector.py index 299e40ee1e..e1fe227a29 100644 --- a/api/providers/vdb/vdb-tencent/tests/unit_tests/test_tencent_vector.py +++ b/api/providers/vdb/vdb-tencent/tests/unit_tests/test_tencent_vector.py @@ -136,7 +136,7 @@ def _build_fake_tencent_modules(): @pytest.fixture -def tencent_module(monkeypatch): +def tencent_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_tencent_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -187,7 +187,7 @@ def test_config_and_init_paths(tencent_module): assert vector._enable_hybrid_search is False -def test_create_collection_branches(tencent_module, monkeypatch): +def test_create_collection_branches(tencent_module, monkeypatch: pytest.MonkeyPatch): vector = tencent_module.TencentVector("collection_1", _config(tencent_module)) lock = MagicMock() @@ -279,7 +279,7 @@ def test_create_add_delete_and_search_behaviour(tencent_module): vector._client.drop_collection.assert_called_once() -def test_tencent_factory_existing_and_generated_collection(tencent_module, monkeypatch): +def test_tencent_factory_existing_and_generated_collection(tencent_module, monkeypatch: pytest.MonkeyPatch): factory = tencent_module.TencentVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-tidb-vector/tests/unit_tests/test_tidb_vector.py b/api/providers/vdb/vdb-tidb-vector/tests/unit_tests/test_tidb_vector.py index bdbed2f740..ed03cbee88 100644 --- a/api/providers/vdb/vdb-tidb-vector/tests/unit_tests/test_tidb_vector.py +++ b/api/providers/vdb/vdb-tidb-vector/tests/unit_tests/test_tidb_vector.py @@ -46,7 +46,7 @@ def test_tidb_config_validation(tidb_module, field, value, message): tidb_module.TiDBVectorConfig.model_validate(values) -def test_init_get_type_and_distance_func(tidb_module, monkeypatch): +def test_init_get_type_and_distance_func(tidb_module, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(tidb_module, "create_engine", MagicMock(return_value="engine")) vector = tidb_module.TiDBVector("collection_1", _config(tidb_module), distance_func="L2") @@ -63,7 +63,7 @@ def test_init_get_type_and_distance_func(tidb_module, monkeypatch): assert vector._get_distance_func() == "VEC_COSINE_DISTANCE" -def test_table_builds_columns_with_tidb_vector_type(tidb_module, monkeypatch): +def test_table_builds_columns_with_tidb_vector_type(tidb_module, monkeypatch: pytest.MonkeyPatch): fake_tidb_vector = types.ModuleType("tidb_vector") fake_tidb_sqlalchemy = types.ModuleType("tidb_vector.sqlalchemy") @@ -107,7 +107,7 @@ def test_create_calls_collection_and_add_texts(tidb_module): assert vector._dimension == 2 -def test_create_collection_skips_when_cache_hit(tidb_module, monkeypatch): +def test_create_collection_skips_when_cache_hit(tidb_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -127,7 +127,7 @@ def test_create_collection_skips_when_cache_hit(tidb_module, monkeypatch): tidb_module.redis_client.set.assert_not_called() -def test_create_collection_executes_create_sql_and_sets_cache(tidb_module, monkeypatch): +def test_create_collection_executes_create_sql_and_sets_cache(tidb_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -160,7 +160,7 @@ def test_create_collection_executes_create_sql_and_sets_cache(tidb_module, monke tidb_module.redis_client.set.assert_called_once() -def test_add_texts_batches_inserts_and_returns_ids(tidb_module, monkeypatch): +def test_add_texts_batches_inserts_and_returns_ids(tidb_module, monkeypatch: pytest.MonkeyPatch): class _InsertStmt: def __init__(self, table): self.table = table @@ -198,7 +198,7 @@ def test_add_texts_batches_inserts_and_returns_ids(tidb_module, monkeypatch): @pytest.fixture -def tidb_vector_with_session(tidb_module, monkeypatch): +def tidb_vector_with_session(tidb_module, monkeypatch: pytest.MonkeyPatch): vector = tidb_module.TiDBVector.__new__(tidb_module.TiDBVector) vector._collection_name = "collection_1" vector._engine = MagicMock() @@ -354,7 +354,7 @@ def test_delete_by_metadata_field_does_nothing_when_no_ids(tidb_module): # Test search_by_vector filters and scores -def test_search_by_vector_filters_and_scores(tidb_module, monkeypatch): +def test_search_by_vector_filters_and_scores(tidb_module, monkeypatch: pytest.MonkeyPatch): session = MagicMock() session.execute.return_value = [ ('{"doc_id":"id-1","document_id":"d-1"}', "text-1", 0.2), @@ -392,7 +392,7 @@ def test_search_by_vector_filters_and_scores(tidb_module, monkeypatch): # Test delete drops table -def test_delete_drops_table(tidb_module, monkeypatch): +def test_delete_drops_table(tidb_module, monkeypatch: pytest.MonkeyPatch): session = MagicMock() session.execute.return_value = None @@ -413,7 +413,7 @@ def test_delete_drops_table(tidb_module, monkeypatch): assert "DROP TABLE IF EXISTS collection_1" in drop_sql -def test_tidb_factory_uses_existing_or_generated_collection(tidb_module, monkeypatch): +def test_tidb_factory_uses_existing_or_generated_collection(tidb_module, monkeypatch: pytest.MonkeyPatch): factory = tidb_module.TiDBVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-upstash/tests/unit_tests/test_upstash_vector.py b/api/providers/vdb/vdb-upstash/tests/unit_tests/test_upstash_vector.py index a884275c89..55d27ad264 100644 --- a/api/providers/vdb/vdb-upstash/tests/unit_tests/test_upstash_vector.py +++ b/api/providers/vdb/vdb-upstash/tests/unit_tests/test_upstash_vector.py @@ -36,7 +36,7 @@ def _build_fake_upstash_module(): @pytest.fixture -def upstash_module(monkeypatch): +def upstash_module(monkeypatch: pytest.MonkeyPatch): # Remove patched modules if present for modname in ["upstash_vector", "dify_vdb_upstash.upstash_vector"]: if modname in sys.modules: @@ -65,7 +65,7 @@ def test_upstash_config_validation(upstash_module, field, value, message): upstash_module.UpstashVectorConfig.model_validate(values) -def test_init_get_type_and_dimension(upstash_module, monkeypatch): +def test_init_get_type_and_dimension(upstash_module, monkeypatch: pytest.MonkeyPatch): vector = upstash_module.UpstashVector("collection_1", _config(upstash_module)) assert vector.get_type() == upstash_module.VectorType.UPSTASH @@ -162,7 +162,7 @@ def test_search_by_vector_filter_threshold_and_delete(upstash_module): vector.index.reset.assert_called_once() -def test_upstash_factory_uses_existing_or_generated_collection(upstash_module, monkeypatch): +def test_upstash_factory_uses_existing_or_generated_collection(upstash_module, monkeypatch: pytest.MonkeyPatch): factory = upstash_module.UpstashVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-vastbase/tests/unit_tests/test_vastbase_vector.py b/api/providers/vdb/vdb-vastbase/tests/unit_tests/test_vastbase_vector.py index 4dfb956c00..32f47c67ed 100644 --- a/api/providers/vdb/vdb-vastbase/tests/unit_tests/test_vastbase_vector.py +++ b/api/providers/vdb/vdb-vastbase/tests/unit_tests/test_vastbase_vector.py @@ -37,7 +37,7 @@ def _build_fake_psycopg2_modules(): @pytest.fixture -def vastbase_module(monkeypatch): +def vastbase_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_psycopg2_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -93,7 +93,7 @@ def test_vastbase_config_rejects_invalid_connection_window(vastbase_module): ) -def test_init_and_get_cursor_context_manager(vastbase_module, monkeypatch): +def test_init_and_get_cursor_context_manager(vastbase_module, monkeypatch: pytest.MonkeyPatch): pool = MagicMock() monkeypatch.setattr(vastbase_module.psycopg2.pool, "SimpleConnectionPool", MagicMock(return_value=pool)) @@ -114,7 +114,7 @@ def test_init_and_get_cursor_context_manager(vastbase_module, monkeypatch): pool.putconn.assert_called_once_with(conn) -def test_create_and_add_texts(vastbase_module, monkeypatch): +def test_create_and_add_texts(vastbase_module, monkeypatch: pytest.MonkeyPatch): vector = vastbase_module.VastbaseVector.__new__(vastbase_module.VastbaseVector) vector.table_name = "embedding_collection_1" vector._create_collection = MagicMock() @@ -205,7 +205,7 @@ def test_search_by_vector_and_full_text(vastbase_module): assert full_docs[0].page_content == "full-text" -def test_create_collection_cache_and_dimension_branches(vastbase_module, monkeypatch): +def test_create_collection_cache_and_dimension_branches(vastbase_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -240,7 +240,7 @@ def test_create_collection_cache_and_dimension_branches(vastbase_module, monkeyp vastbase_module.redis_client.set.assert_called() -def test_vastbase_factory_uses_existing_or_generated_collection(vastbase_module, monkeypatch): +def test_vastbase_factory_uses_existing_or_generated_collection(vastbase_module, monkeypatch: pytest.MonkeyPatch): factory = vastbase_module.VastbaseVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", diff --git a/api/providers/vdb/vdb-vikingdb/tests/unit_tests/test_vikingdb_vector.py b/api/providers/vdb/vdb-vikingdb/tests/unit_tests/test_vikingdb_vector.py index 544b8163be..6559ad97d2 100644 --- a/api/providers/vdb/vdb-vikingdb/tests/unit_tests/test_vikingdb_vector.py +++ b/api/providers/vdb/vdb-vikingdb/tests/unit_tests/test_vikingdb_vector.py @@ -79,7 +79,7 @@ def _build_fake_vikingdb_modules(): @pytest.fixture -def vikingdb_module(monkeypatch): +def vikingdb_module(monkeypatch: pytest.MonkeyPatch): for name, module in _build_fake_vikingdb_modules().items(): monkeypatch.setitem(sys.modules, name, module) @@ -117,7 +117,7 @@ def test_init_get_type_and_has_checks(vikingdb_module): assert vector._has_index() is False -def test_create_collection_cache_and_creation_paths(vikingdb_module, monkeypatch): +def test_create_collection_cache_and_creation_paths(vikingdb_module, monkeypatch: pytest.MonkeyPatch): lock = MagicMock() lock.__enter__.return_value = None lock.__exit__.return_value = None @@ -253,7 +253,7 @@ def test_delete_drops_index_and_collection_when_present(vikingdb_module): vector._client.drop_collection.assert_not_called() -def test_vikingdb_factory_validates_config_and_builds_vector(vikingdb_module, monkeypatch): +def test_vikingdb_factory_validates_config_and_builds_vector(vikingdb_module, monkeypatch: pytest.MonkeyPatch): factory = vikingdb_module.VikingDBVectorFactory() dataset_with_index = SimpleNamespace( id="dataset-1", @@ -293,7 +293,9 @@ def test_vikingdb_factory_validates_config_and_builds_vector(vikingdb_module, mo ("VIKINGDB_SCHEME", "VIKINGDB_SCHEME should not be None"), ], ) -def test_vikingdb_factory_raises_when_required_config_missing(vikingdb_module, monkeypatch, field, message): +def test_vikingdb_factory_raises_when_required_config_missing( + vikingdb_module, monkeypatch: pytest.MonkeyPatch, field, message +): factory = vikingdb_module.VikingDBVectorFactory() dataset = SimpleNamespace( id="dataset-1", index_struct_dict={"vector_store": {"class_prefix": "existing"}}, index_struct=None diff --git a/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py b/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py index 3b5e822b90..90131fe98d 100644 --- a/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py +++ b/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py @@ -13,7 +13,7 @@ from controllers.console.app import wraps from libs.datetime_utils import naive_utc_now from models import App, Tenant from models.account import Account, TenantAccountJoin, TenantAccountRole -from models.enums import ConversationFromSource +from models.enums import AppStatus, ConversationFromSource from models.model import AppMode from services.app_generate_service import AppGenerateService @@ -28,7 +28,7 @@ class TestChatMessageApiPermissions: app.id = str(uuid.uuid4()) app.mode = AppMode.CHAT app.tenant_id = str(uuid.uuid4()) - app.status = "normal" + app.status = AppStatus.NORMAL return app @pytest.fixture @@ -78,7 +78,7 @@ class TestChatMessageApiPermissions: self, test_client: FlaskClient, auth_header, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account, role: TenantAccountRole, @@ -130,7 +130,7 @@ class TestChatMessageApiPermissions: self, test_client: FlaskClient, auth_header, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account, role: TenantAccountRole, diff --git a/api/tests/integration_tests/controllers/console/app/test_feedback_export_api.py b/api/tests/integration_tests/controllers/console/app/test_feedback_export_api.py index 309a0b015a..c4db0d5111 100644 --- a/api/tests/integration_tests/controllers/console/app/test_feedback_export_api.py +++ b/api/tests/integration_tests/controllers/console/app/test_feedback_export_api.py @@ -14,7 +14,7 @@ from controllers.console.app import wraps from libs.datetime_utils import naive_utc_now from models import App, Tenant from models.account import Account, TenantAccountJoin, TenantAccountRole -from models.enums import FeedbackFromSource, FeedbackRating +from models.enums import AppStatus, FeedbackFromSource, FeedbackRating from models.model import AppMode, MessageFeedback from services.feedback_service import FeedbackService @@ -29,7 +29,7 @@ class TestFeedbackExportApi: app.id = str(uuid.uuid4()) app.mode = AppMode.CHAT app.tenant_id = str(uuid.uuid4()) - app.status = "normal" + app.status = AppStatus.NORMAL app.name = "Test App" return app @@ -135,7 +135,7 @@ class TestFeedbackExportApi: self, test_client: FlaskClient, auth_header, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account, role: TenantAccountRole, @@ -167,7 +167,13 @@ class TestFeedbackExportApi: mock_export_feedbacks.assert_called_once() def test_feedback_export_csv_format( - self, test_client: FlaskClient, auth_header, monkeypatch, mock_app_model, mock_account, sample_feedback_data + self, + test_client: FlaskClient, + auth_header, + monkeypatch: pytest.MonkeyPatch, + mock_app_model, + mock_account, + sample_feedback_data, ): """Test feedback export in CSV format.""" @@ -202,7 +208,13 @@ class TestFeedbackExportApi: assert "text/csv" in response.content_type def test_feedback_export_json_format( - self, test_client: FlaskClient, auth_header, monkeypatch, mock_app_model, mock_account, sample_feedback_data + self, + test_client: FlaskClient, + auth_header, + monkeypatch: pytest.MonkeyPatch, + mock_app_model, + mock_account, + sample_feedback_data, ): """Test feedback export in JSON format.""" @@ -246,7 +258,7 @@ class TestFeedbackExportApi: assert "application/json" in response.content_type def test_feedback_export_with_filters( - self, test_client: FlaskClient, auth_header, monkeypatch, mock_app_model, mock_account + self, test_client: FlaskClient, auth_header, monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account ): """Test feedback export with various filters.""" @@ -287,7 +299,7 @@ class TestFeedbackExportApi: ) def test_feedback_export_invalid_date_format( - self, test_client: FlaskClient, auth_header, monkeypatch, mock_app_model, mock_account + self, test_client: FlaskClient, auth_header, monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account ): """Test feedback export with invalid date format.""" @@ -312,7 +324,7 @@ class TestFeedbackExportApi: assert "Parameter validation error" in response_json["error"] def test_feedback_export_server_error( - self, test_client: FlaskClient, auth_header, monkeypatch, mock_app_model, mock_account + self, test_client: FlaskClient, auth_header, monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account ): """Test feedback export with server error.""" diff --git a/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py b/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py index 04945e57a0..ab08c7a6d8 100644 --- a/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py +++ b/api/tests/integration_tests/controllers/console/app/test_model_config_permissions.py @@ -11,6 +11,7 @@ from controllers.console.app import wraps from libs.datetime_utils import naive_utc_now from models import App, Tenant from models.account import Account, TenantAccountJoin, TenantAccountRole +from models.enums import AppStatus from models.model import AppMode from services.app_model_config_service import AppModelConfigService @@ -25,7 +26,7 @@ class TestModelConfigResourcePermissions: app.id = str(uuid.uuid4()) app.mode = AppMode.CHAT app.tenant_id = str(uuid.uuid4()) - app.status = "normal" + app.status = AppStatus.NORMAL app.app_model_config_id = str(uuid.uuid4()) return app @@ -73,7 +74,7 @@ class TestModelConfigResourcePermissions: self, test_client: FlaskClient, auth_header, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, mock_app_model, mock_account, role: TenantAccountRole, diff --git a/api/tests/integration_tests/core/datasource/test_datasource_manager_integration.py b/api/tests/integration_tests/core/datasource/test_datasource_manager_integration.py index a876b0c4aa..7d0b575262 100644 --- a/api/tests/integration_tests/core/datasource/test_datasource_manager_integration.py +++ b/api/tests/integration_tests/core/datasource/test_datasource_manager_integration.py @@ -1,5 +1,7 @@ from collections.abc import Generator +from pytest_mock import MockerFixture + from core.datasource.datasource_manager import DatasourceManager from core.datasource.entities.datasource_entities import DatasourceMessage from graphon.node_events import StreamCompletedEvent @@ -19,7 +21,7 @@ def _gen_var_stream() -> Generator[DatasourceMessage, None, None]: ) -def test_stream_node_events_accumulates_variables(mocker): +def test_stream_node_events_accumulates_variables(mocker: MockerFixture): mocker.patch.object(DatasourceManager, "stream_online_results", return_value=_gen_var_stream()) events = list( DatasourceManager.stream_node_events( diff --git a/api/tests/integration_tests/core/workflow/nodes/datasource/test_datasource_node_integration.py b/api/tests/integration_tests/core/workflow/nodes/datasource/test_datasource_node_integration.py index 2392084c36..2c1e667c58 100644 --- a/api/tests/integration_tests/core/workflow/nodes/datasource/test_datasource_node_integration.py +++ b/api/tests/integration_tests/core/workflow/nodes/datasource/test_datasource_node_integration.py @@ -1,3 +1,5 @@ +from pytest_mock import MockerFixture + from core.app.entities.app_invoke_entities import DIFY_RUN_CONTEXT_KEY from core.workflow.nodes.datasource.datasource_node import DatasourceNode from core.workflow.nodes.datasource.entities import DatasourceNodeData @@ -44,7 +46,7 @@ class _GP: call_depth = 0 -def test_node_integration_minimal_stream(mocker): +def test_node_integration_minimal_stream(mocker: MockerFixture): sys_d = { "sys": { "datasource_type": "online_document", diff --git a/api/tests/integration_tests/workflow/nodes/test_tool.py b/api/tests/integration_tests/workflow/nodes/test_tool.py index a8e9422c1e..493330e02b 100644 --- a/api/tests/integration_tests/workflow/nodes/test_tool.py +++ b/api/tests/integration_tests/workflow/nodes/test_tool.py @@ -2,6 +2,8 @@ import time import uuid from unittest.mock import MagicMock, patch +import pytest + from core.app.entities.app_invoke_entities import InvokeFrom, UserFrom from core.tools.utils.configuration import ToolParameterConfigurationManager from core.workflow.node_factory import DifyNodeFactory @@ -71,7 +73,7 @@ def init_tool_node(config: dict): return node -def test_tool_variable_invoke(monkeypatch): +def test_tool_variable_invoke(monkeypatch: pytest.MonkeyPatch): node = init_tool_node( config={ "id": "1", @@ -106,7 +108,7 @@ def test_tool_variable_invoke(monkeypatch): assert item.node_run_result.outputs.get("text") is not None -def test_tool_mixed_invoke(monkeypatch): +def test_tool_mixed_invoke(monkeypatch: pytest.MonkeyPatch): node = init_tool_node( config={ "id": "1", diff --git a/api/tests/test_containers_integration_tests/libs/test_rate_limiter_integration.py b/api/tests/test_containers_integration_tests/libs/test_rate_limiter_integration.py index 178fc2e4fb..390795486b 100644 --- a/api/tests/test_containers_integration_tests/libs/test_rate_limiter_integration.py +++ b/api/tests/test_containers_integration_tests/libs/test_rate_limiter_integration.py @@ -11,7 +11,7 @@ from libs import helper as helper_module @pytest.mark.usefixtures("flask_app_with_containers") -def test_rate_limiter_counts_multiple_attempts_in_same_second(monkeypatch): +def test_rate_limiter_counts_multiple_attempts_in_same_second(monkeypatch: pytest.MonkeyPatch): prefix = f"test_rate_limit:{uuid.uuid4().hex}" limiter = helper_module.RateLimiter(prefix=prefix, max_attempts=2, time_window=60) key = limiter._get_key("203.0.113.10") diff --git a/api/tests/test_containers_integration_tests/services/test_agent_service.py b/api/tests/test_containers_integration_tests/services/test_agent_service.py index 00a2f9a59f..cbd939c7a4 100644 --- a/api/tests/test_containers_integration_tests/services/test_agent_service.py +++ b/api/tests/test_containers_integration_tests/services/test_agent_service.py @@ -6,7 +6,7 @@ from faker import Faker from sqlalchemy.orm import Session from core.plugin.impl.exc import PluginDaemonClientSideError -from models import Account +from models import Account, CreatorUserRole from models.enums import ConversationFromSource, MessageFileBelongsTo from models.model import AppModelConfig, Conversation, EndUser, Message, MessageAgentThought from services.account_service import AccountService, TenantService @@ -246,7 +246,7 @@ class TestAgentService: tool_input=json.dumps({"test_tool": {"input": "test_input"}}), observation=json.dumps({"test_tool": {"output": "test_output"}}), tokens=50, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=message.from_account_id, ) db_session_with_containers.add(thought1) @@ -294,7 +294,7 @@ class TestAgentService: agent_thoughts = self._create_test_agent_thoughts(db_session_with_containers, message) # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result structure assert result is not None @@ -370,7 +370,7 @@ class TestAgentService: # Execute the method under test with non-existent message with pytest.raises(ValueError, match="Message not found"): - AgentService.get_agent_logs(app, str(conversation.id), fake.uuid4()) + AgentService.get_agent_logs(app, conversation.id, fake.uuid4()) def test_get_agent_logs_with_end_user( self, db_session_with_containers: Session, mock_external_service_dependencies @@ -451,7 +451,7 @@ class TestAgentService: db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -523,7 +523,7 @@ class TestAgentService: db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -561,14 +561,14 @@ class TestAgentService: tool_input=json.dumps({"error_tool": {"input": "test_input"}}), observation=json.dumps({"error_tool": {"output": "error_output"}}), tokens=50, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=message.from_account_id, ) db_session_with_containers.add(thought_with_error) db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -592,7 +592,7 @@ class TestAgentService: conversation, message = self._create_test_conversation_and_message(db_session_with_containers, app, account) # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -654,7 +654,7 @@ class TestAgentService: # Execute the method under test with pytest.raises(ValueError, match="App model config not found"): - AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + AgentService.get_agent_logs(app, conversation.id, message.id) def test_get_agent_logs_agent_config_not_found( self, db_session_with_containers: Session, mock_external_service_dependencies @@ -673,7 +673,7 @@ class TestAgentService: # Execute the method under test with pytest.raises(ValueError, match="Agent config not found"): - AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + AgentService.get_agent_logs(app, conversation.id, message.id) def test_list_agent_providers_success( self, db_session_with_containers: Session, mock_external_service_dependencies @@ -687,7 +687,7 @@ class TestAgentService: app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) # Execute the method under test - result = AgentService.list_agent_providers(str(account.id), str(app.tenant_id)) + result = AgentService.list_agent_providers(account.id, app.tenant_id) # Verify the result assert result is not None @@ -696,7 +696,7 @@ class TestAgentService: # Verify the mock was called correctly mock_plugin_client = mock_external_service_dependencies["plugin_agent_client"].return_value - mock_plugin_client.fetch_agent_strategy_providers.assert_called_once_with(str(app.tenant_id)) + mock_plugin_client.fetch_agent_strategy_providers.assert_called_once_with(app.tenant_id) def test_get_agent_provider_success(self, db_session_with_containers: Session, mock_external_service_dependencies): """ @@ -710,7 +710,7 @@ class TestAgentService: provider_name = "test_provider" # Execute the method under test - result = AgentService.get_agent_provider(str(account.id), str(app.tenant_id), provider_name) + result = AgentService.get_agent_provider(account.id, app.tenant_id, provider_name) # Verify the result assert result is not None @@ -718,7 +718,7 @@ class TestAgentService: # Verify the mock was called correctly mock_plugin_client = mock_external_service_dependencies["plugin_agent_client"].return_value - mock_plugin_client.fetch_agent_strategy_provider.assert_called_once_with(str(app.tenant_id), provider_name) + mock_plugin_client.fetch_agent_strategy_provider.assert_called_once_with(app.tenant_id, provider_name) def test_get_agent_provider_plugin_error( self, db_session_with_containers: Session, mock_external_service_dependencies @@ -740,7 +740,7 @@ class TestAgentService: # Execute the method under test with pytest.raises(ValueError, match=error_message): - AgentService.get_agent_provider(str(account.id), str(app.tenant_id), provider_name) + AgentService.get_agent_provider(account.id, app.tenant_id, provider_name) def test_get_agent_logs_with_complex_tool_data( self, db_session_with_containers: Session, mock_external_service_dependencies @@ -796,14 +796,14 @@ class TestAgentService: {"tool1": {"output1": "result1"}, "tool2": {"output2": "result2"}, "tool3": {"output3": "result3"}} ), tokens=100, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=message.from_account_id, ) db_session_with_containers.add(complex_thought) db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -891,14 +891,14 @@ class TestAgentService: observation=json.dumps({"file_tool": {"output": "test_output"}}), message_files=json.dumps(["file1", "file2"]), tokens=50, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=message.from_account_id, ) db_session_with_containers.add(thought_with_files) db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -926,7 +926,7 @@ class TestAgentService: mock_external_service_dependencies["current_user"].timezone = "Asia/Shanghai" # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -960,14 +960,14 @@ class TestAgentService: tool_input="", # Empty input observation="", # Empty observation tokens=50, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=message.from_account_id, ) db_session_with_containers.add(empty_thought) db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result assert result is not None @@ -1001,14 +1001,14 @@ class TestAgentService: tool_input="invalid json", # Malformed JSON observation="invalid json", # Malformed JSON tokens=50, - created_by_role="account", + created_by_role=CreatorUserRole.ACCOUNT, created_by=message.from_account_id, ) db_session_with_containers.add(malformed_thought) db_session_with_containers.commit() # Execute the method under test - result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + result = AgentService.get_agent_logs(app, conversation.id, message.id) # Verify the result - should handle malformed JSON gracefully assert result is not None diff --git a/api/tests/test_containers_integration_tests/services/test_app_dsl_service.py b/api/tests/test_containers_integration_tests/services/test_app_dsl_service.py index 7c5d2390ba..a5ec06dc13 100644 --- a/api/tests/test_containers_integration_tests/services/test_app_dsl_service.py +++ b/api/tests/test_containers_integration_tests/services/test_app_dsl_service.py @@ -198,7 +198,7 @@ class TestAppDslService: def test_check_version_compatibility_newer_version_returns_pending(self): assert _check_version_compatibility("99.0.0") == ImportStatus.PENDING - def test_check_version_compatibility_major_older_returns_pending(self, monkeypatch): + def test_check_version_compatibility_major_older_returns_pending(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(app_dsl_service, "CURRENT_DSL_VERSION", "1.0.0") assert _check_version_compatibility("0.9.9") == ImportStatus.PENDING @@ -272,7 +272,9 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert "Missing app data" in result.error - def test_import_app_yaml_error_returns_failed(self, db_session_with_containers: Session, monkeypatch): + def test_import_app_yaml_error_returns_failed( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): def bad_safe_load(_content: str): raise yaml.YAMLError("bad") @@ -287,7 +289,9 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert result.error.startswith("Invalid YAML format:") - def test_import_app_unexpected_error_returns_failed(self, db_session_with_containers: Session, monkeypatch): + def test_import_app_unexpected_error_returns_failed( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): monkeypatch.setattr( AppDslService, "_create_or_update_app", @@ -305,7 +309,9 @@ class TestAppDslService: # ── Import: YAML URL ────────────────────────────────────────────── - def test_import_app_yaml_url_fetch_error_returns_failed(self, db_session_with_containers: Session, monkeypatch): + def test_import_app_yaml_url_fetch_error_returns_failed( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): monkeypatch.setattr( app_dsl_service.ssrf_proxy, "get", @@ -321,7 +327,9 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert "Error fetching YAML from URL: boom" in result.error - def test_import_app_yaml_url_empty_content_returns_failed(self, db_session_with_containers: Session, monkeypatch): + def test_import_app_yaml_url_empty_content_returns_failed( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): response = MagicMock() response.content = b"" response.raise_for_status.return_value = None @@ -336,7 +344,9 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert "Empty content" in result.error - def test_import_app_yaml_url_file_too_large_returns_failed(self, db_session_with_containers: Session, monkeypatch): + def test_import_app_yaml_url_file_too_large_returns_failed( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): response = MagicMock() response.content = b"x" * (DSL_MAX_SIZE + 1) response.raise_for_status.return_value = None @@ -379,7 +389,9 @@ class TestAppDslService: assert result.imported_dsl_version == "99.0.0" assert requested_urls == [yaml_url] - def test_import_app_yaml_url_github_blob_rewrites_to_raw(self, db_session_with_containers: Session, monkeypatch): + def test_import_app_yaml_url_github_blob_rewrites_to_raw( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): yaml_url = "https://github.com/acme/repo/blob/main/app.yml" raw_url = "https://raw.githubusercontent.com/acme/repo/main/app.yml" yaml_bytes = _pending_yaml_content() @@ -491,7 +503,7 @@ class TestAppDslService: @pytest.mark.parametrize("has_workflow", [True, False]) def test_import_app_legacy_versions_extract_dependencies( - self, db_session_with_containers: Session, monkeypatch, has_workflow: bool + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch, has_workflow: bool ): monkeypatch.setattr( AppDslService, @@ -554,7 +566,9 @@ class TestAppDslService: assert result.status == ImportStatus.FAILED assert "expired" in result.error - def test_confirm_import_success_deletes_redis_key(self, db_session_with_containers: Session, monkeypatch): + def test_confirm_import_success_deletes_redis_key( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): import_id = str(uuid4()) redis_key = f"{IMPORT_INFO_REDIS_KEY_PREFIX}{import_id}" @@ -614,7 +628,9 @@ class TestAppDslService: result = service.check_dependencies(app_model=app_model) assert result.leaked_dependencies == [] - def test_check_dependencies_calls_analysis_service(self, db_session_with_containers: Session, monkeypatch): + def test_check_dependencies_calls_analysis_service( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): app_id = str(uuid4()) pending = CheckDependenciesPendingData(dependencies=[], app_id=app_id) redis_client.setex( @@ -665,7 +681,9 @@ class TestAppDslService: with pytest.raises(ValueError, match="loss app mode"): service._create_or_update_app(app=None, data={"app": {}}, account=_account_mock()) - def test_create_or_update_app_existing_app_updates_fields(self, db_session_with_containers: Session, monkeypatch): + def test_create_or_update_app_existing_app_updates_fields( + self, db_session_with_containers: Session, monkeypatch: pytest.MonkeyPatch + ): fixed_now = object() monkeypatch.setattr(app_dsl_service, "naive_utc_now", lambda: fixed_now) @@ -778,8 +796,8 @@ class TestAppDslService: service = AppDslService(db_session_with_containers) with pytest.raises(ValueError, match="Missing model_config"): service._create_or_update_app( - app=_app_stub(mode=AppMode.CHAT.value), - data={"app": {"mode": AppMode.CHAT.value}}, + app=_app_stub(mode=AppMode.CHAT), + data={"app": {"mode": AppMode.CHAT}}, account=_account_mock(), ) @@ -794,7 +812,7 @@ class TestAppDslService: service._create_or_update_app( app=app, data={ - "app": {"mode": AppMode.CHAT.value}, + "app": {"mode": AppMode.CHAT}, "model_config": {"model": {"provider": "openai"}}, }, account=account, @@ -807,14 +825,14 @@ class TestAppDslService: service = AppDslService(db_session_with_containers) with pytest.raises(ValueError, match="Invalid app mode"): service._create_or_update_app( - app=_app_stub(mode=AppMode.RAG_PIPELINE.value), - data={"app": {"mode": AppMode.RAG_PIPELINE.value}}, + app=_app_stub(mode=AppMode.RAG_PIPELINE), + data={"app": {"mode": AppMode.RAG_PIPELINE}}, account=_account_mock(), ) # ── Export ───────────────────────────────────────────────────────── - def test_export_dsl_delegates_by_mode(self, monkeypatch): + def test_export_dsl_delegates_by_mode(self, monkeypatch: pytest.MonkeyPatch): workflow_calls: list[bool] = [] model_calls: list[bool] = [] monkeypatch.setattr( @@ -836,14 +854,14 @@ class TestAppDslService: assert workflow_calls == [True] chat_app = _app_stub( - mode=AppMode.CHAT.value, + mode=AppMode.CHAT, icon_type="emoji", app_model_config=SimpleNamespace(to_dict=lambda: {"agent_mode": {"tools": []}}), ) AppDslService.export_dsl(chat_app) assert model_calls == [True] - def test_export_dsl_preserves_icon_and_icon_type(self, monkeypatch): + def test_export_dsl_preserves_icon_and_icon_type(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( AppDslService, "_append_workflow_export_data", @@ -1011,7 +1029,7 @@ class TestAppDslService: # ── Workflow Export Data ─────────────────────────────────────────── - def test_append_workflow_export_data_filters_and_overrides(self, monkeypatch): + def test_append_workflow_export_data_filters_and_overrides(self, monkeypatch: pytest.MonkeyPatch): workflow_dict = { "graph": { "nodes": [ @@ -1111,7 +1129,7 @@ class TestAppDslService: assert nodes[5]["data"]["subscription_id"] == "" assert export_data["dependencies"] == [{"tenant": _DEFAULT_TENANT_ID, "dep": "dep-1"}] - def test_append_workflow_export_data_missing_workflow_raises(self, monkeypatch): + def test_append_workflow_export_data_missing_workflow_raises(self, monkeypatch: pytest.MonkeyPatch): workflow_service = MagicMock() workflow_service.get_draft_workflow.return_value = None monkeypatch.setattr(app_dsl_service, "WorkflowService", lambda: workflow_service) @@ -1126,7 +1144,7 @@ class TestAppDslService: # ── Model Config Export Data ────────────────────────────────────── - def test_append_model_config_export_data_filters_credential_id(self, monkeypatch): + def test_append_model_config_export_data_filters_credential_id(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( AppDslService, "_extract_dependencies_from_model_config", @@ -1160,7 +1178,7 @@ class TestAppDslService: # ── Dependency Extraction ───────────────────────────────────────── - def test_extract_dependencies_from_workflow_graph_covers_all_node_types(self, monkeypatch): + def test_extract_dependencies_from_workflow_graph_covers_all_node_types(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.DependenciesAnalysisService, "analyze_tool_dependency", @@ -1230,7 +1248,7 @@ class TestAppDslService: "model:m4", ] - def test_extract_dependencies_from_workflow_graph_handles_exceptions(self, monkeypatch): + def test_extract_dependencies_from_workflow_graph_handles_exceptions(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.ToolNodeData, "model_validate", @@ -1241,7 +1259,7 @@ class TestAppDslService: ) assert deps == [] - def test_extract_dependencies_from_model_config_parses_providers(self, monkeypatch): + def test_extract_dependencies_from_model_config_parses_providers(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.DependenciesAnalysisService, "analyze_model_provider_dependency", @@ -1264,7 +1282,7 @@ class TestAppDslService: ) assert deps == ["model:p1", "model:p2", "tool:t1"] - def test_extract_dependencies_from_model_config_handles_exceptions(self, monkeypatch): + def test_extract_dependencies_from_model_config_handles_exceptions(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.DependenciesAnalysisService, "analyze_model_provider_dependency", @@ -1278,7 +1296,7 @@ class TestAppDslService: def test_get_leaked_dependencies_empty_returns_empty(self): assert AppDslService.get_leaked_dependencies(_DEFAULT_TENANT_ID, []) == [] - def test_get_leaked_dependencies_delegates(self, monkeypatch): + def test_get_leaked_dependencies_delegates(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.DependenciesAnalysisService, "get_leaked_dependencies", @@ -1289,7 +1307,7 @@ class TestAppDslService: # ── Encryption/Decryption ───────────────────────────────────────── - def test_encrypt_decrypt_dataset_id_respects_config(self, monkeypatch): + def test_encrypt_decrypt_dataset_id_respects_config(self, monkeypatch: pytest.MonkeyPatch): tenant_id = _DEFAULT_TENANT_ID dataset_uuid = "00000000-0000-0000-0000-000000000000" @@ -1314,7 +1332,7 @@ class TestAppDslService: value = "00000000-0000-0000-0000-000000000000" assert AppDslService.decrypt_dataset_id(encrypted_data=value, tenant_id=_DEFAULT_TENANT_ID) == value - def test_decrypt_dataset_id_returns_none_on_invalid_data(self, monkeypatch): + def test_decrypt_dataset_id_returns_none_on_invalid_data(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.dify_config, "DSL_EXPORT_ENCRYPT_DATASET_ID", @@ -1322,7 +1340,7 @@ class TestAppDslService: ) assert AppDslService.decrypt_dataset_id(encrypted_data="not-base64", tenant_id=_DEFAULT_TENANT_ID) is None - def test_decrypt_dataset_id_returns_none_when_decrypted_is_not_uuid(self, monkeypatch): + def test_decrypt_dataset_id_returns_none_when_decrypted_is_not_uuid(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( app_dsl_service.dify_config, "DSL_EXPORT_ENCRYPT_DATASET_ID", diff --git a/api/tests/test_containers_integration_tests/services/test_app_service.py b/api/tests/test_containers_integration_tests/services/test_app_service.py index b695ae9fd9..837b63d1ea 100644 --- a/api/tests/test_containers_integration_tests/services/test_app_service.py +++ b/api/tests/test_containers_integration_tests/services/test_app_service.py @@ -6,6 +6,7 @@ from sqlalchemy.orm import Session from constants.model_template import default_app_templates from models import Account +from models.enums import AppStatus, CustomizeTokenStrategy from models.model import App, IconType, Site from services.account_service import AccountService, TenantService from tests.test_containers_integration_tests.helpers import generate_valid_password @@ -1079,9 +1080,9 @@ class TestAppService: site.app_id = app.id site.code = fake.postalcode() site.title = fake.company() - site.status = "normal" + site.status = AppStatus.NORMAL site.default_language = "en-US" - site.customize_token_strategy = "uuid" + site.customize_token_strategy = CustomizeTokenStrategy.UUID db_session_with_containers.add(site) db_session_with_containers.commit() diff --git a/api/tests/test_containers_integration_tests/services/test_conversation_service.py b/api/tests/test_containers_integration_tests/services/test_conversation_service.py index 8aa10129c1..5f3914eb19 100644 --- a/api/tests/test_containers_integration_tests/services/test_conversation_service.py +++ b/api/tests/test_containers_integration_tests/services/test_conversation_service.py @@ -10,6 +10,7 @@ from sqlalchemy import select from sqlalchemy.orm import Session from core.app.entities.app_invoke_entities import InvokeFrom +from models import TenantAccountRole from models.account import Account, Tenant, TenantAccountJoin from models.enums import ConversationFromSource from models.model import App, Conversation, EndUser, Message, MessageAnnotation @@ -22,7 +23,7 @@ from services.message_service import MessageService class ConversationServiceIntegrationTestDataFactory: @staticmethod - def create_app_and_account(db_session_with_containers): + def create_app_and_account(db_session_with_containers: Session): tenant = Tenant(name=f"Tenant {uuid4()}") db_session_with_containers.add(tenant) db_session_with_containers.flush() @@ -41,7 +42,7 @@ class ConversationServiceIntegrationTestDataFactory: tenant_join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role="owner", + role=TenantAccountRole.OWNER, current=True, ) db_session_with_containers.add(tenant_join) @@ -155,7 +156,7 @@ class ConversationServiceIntegrationTestDataFactory: total_price=Decimal(0), currency="USD", status="normal", - invoke_from=InvokeFrom.WEB_APP.value, + invoke_from=InvokeFrom.WEB_APP, from_source=ConversationFromSource.API if isinstance(user, EndUser) else ConversationFromSource.CONSOLE, from_end_user_id=user.id if isinstance(user, EndUser) else None, from_account_id=user.id if isinstance(user, Account) else None, diff --git a/api/tests/test_containers_integration_tests/services/test_conversation_service_variables.py b/api/tests/test_containers_integration_tests/services/test_conversation_service_variables.py index 6c292dbc4b..853630ad65 100644 --- a/api/tests/test_containers_integration_tests/services/test_conversation_service_variables.py +++ b/api/tests/test_containers_integration_tests/services/test_conversation_service_variables.py @@ -25,7 +25,7 @@ from services.errors.conversation import ( class ConversationServiceVariableIntegrationFactory: @staticmethod - def create_app_and_account(db_session_with_containers): + def create_app_and_account(db_session_with_containers: Session): tenant = Tenant(name=f"Tenant {uuid4()}") db_session_with_containers.add(tenant) db_session_with_containers.flush() diff --git a/api/tests/test_containers_integration_tests/services/test_dataset_service_document.py b/api/tests/test_containers_integration_tests/services/test_dataset_service_document.py index 2bec703f0c..0c089e506b 100644 --- a/api/tests/test_containers_integration_tests/services/test_dataset_service_document.py +++ b/api/tests/test_containers_integration_tests/services/test_dataset_service_document.py @@ -6,6 +6,7 @@ from unittest.mock import create_autospec, patch from uuid import uuid4 import pytest +from sqlalchemy.orm import Session from werkzeug.exceptions import Forbidden, NotFound from core.rag.index_processor.constant.index_type import IndexStructureType @@ -119,13 +120,13 @@ def current_user_mock(): yield current_user -def test_get_document_returns_none_when_document_id_is_missing(db_session_with_containers): +def test_get_document_returns_none_when_document_id_is_missing(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) assert DocumentService.get_document(dataset.id, None) is None -def test_get_document_queries_by_dataset_and_document_id(db_session_with_containers): +def test_get_document_queries_by_dataset_and_document_id(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) document = DocumentServiceIntegrationFactory.create_document(db_session_with_containers, dataset=dataset) @@ -135,7 +136,7 @@ def test_get_document_queries_by_dataset_and_document_id(db_session_with_contain assert result.id == document.id -def test_get_documents_by_ids_returns_empty_for_empty_input(db_session_with_containers): +def test_get_documents_by_ids_returns_empty_for_empty_input(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) result = DocumentService.get_documents_by_ids(dataset.id, []) @@ -143,7 +144,7 @@ def test_get_documents_by_ids_returns_empty_for_empty_input(db_session_with_cont assert result == [] -def test_get_documents_by_ids_uses_single_batch_query(db_session_with_containers): +def test_get_documents_by_ids_uses_single_batch_query(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) doc_a = DocumentServiceIntegrationFactory.create_document(db_session_with_containers, dataset=dataset, name="a.txt") doc_b = DocumentServiceIntegrationFactory.create_document( @@ -158,13 +159,13 @@ def test_get_documents_by_ids_uses_single_batch_query(db_session_with_containers assert {document.id for document in result} == {doc_a.id, doc_b.id} -def test_update_documents_need_summary_returns_zero_for_empty_input(db_session_with_containers): +def test_update_documents_need_summary_returns_zero_for_empty_input(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) assert DocumentService.update_documents_need_summary(dataset.id, []) == 0 -def test_update_documents_need_summary_updates_matching_non_qa_documents(db_session_with_containers): +def test_update_documents_need_summary_updates_matching_non_qa_documents(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) paragraph_doc = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -195,7 +196,7 @@ def test_update_documents_need_summary_updates_matching_non_qa_documents(db_sess assert refreshed_qa.need_summary is True -def test_get_document_download_url_uses_signed_url_helper(db_session_with_containers): +def test_get_document_download_url_uses_signed_url_helper(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) upload_file = DocumentServiceIntegrationFactory.create_upload_file( db_session_with_containers, @@ -215,7 +216,7 @@ def test_get_document_download_url_uses_signed_url_helper(db_session_with_contai get_url.assert_called_once_with(upload_file_id=upload_file.id, as_attachment=True) -def test_get_upload_file_id_for_upload_file_document_rejects_invalid_source_type(db_session_with_containers): +def test_get_upload_file_id_for_upload_file_document_rejects_invalid_source_type(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -232,7 +233,9 @@ def test_get_upload_file_id_for_upload_file_document_rejects_invalid_source_type ) -def test_get_upload_file_id_for_upload_file_document_rejects_missing_upload_file_id(db_session_with_containers): +def test_get_upload_file_id_for_upload_file_document_rejects_missing_upload_file_id( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -248,7 +251,7 @@ def test_get_upload_file_id_for_upload_file_document_rejects_missing_upload_file ) -def test_get_upload_file_id_for_upload_file_document_returns_string_id(db_session_with_containers): +def test_get_upload_file_id_for_upload_file_document_returns_string_id(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -265,7 +268,9 @@ def test_get_upload_file_id_for_upload_file_document_returns_string_id(db_sessio assert result == "99" -def test_get_upload_file_for_upload_file_document_raises_when_file_service_returns_nothing(db_session_with_containers): +def test_get_upload_file_for_upload_file_document_raises_when_file_service_returns_nothing( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -278,7 +283,7 @@ def test_get_upload_file_for_upload_file_document_raises_when_file_service_retur DocumentService._get_upload_file_for_upload_file_document(document) -def test_get_upload_file_for_upload_file_document_returns_upload_file(db_session_with_containers): +def test_get_upload_file_for_upload_file_document_returns_upload_file(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) upload_file = DocumentServiceIntegrationFactory.create_upload_file( db_session_with_containers, @@ -296,7 +301,9 @@ def test_get_upload_file_for_upload_file_document_returns_upload_file(db_session assert result.id == upload_file.id -def test_get_upload_files_by_document_id_for_zip_download_raises_for_missing_documents(db_session_with_containers): +def test_get_upload_files_by_document_id_for_zip_download_raises_for_missing_documents( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) with pytest.raises(NotFound, match="Document not found"): @@ -307,7 +314,9 @@ def test_get_upload_files_by_document_id_for_zip_download_raises_for_missing_doc ) -def test_get_upload_files_by_document_id_for_zip_download_rejects_cross_tenant_access(db_session_with_containers): +def test_get_upload_files_by_document_id_for_zip_download_rejects_cross_tenant_access( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) upload_file = DocumentServiceIntegrationFactory.create_upload_file( db_session_with_containers, @@ -329,7 +338,9 @@ def test_get_upload_files_by_document_id_for_zip_download_rejects_cross_tenant_a ) -def test_get_upload_files_by_document_id_for_zip_download_rejects_missing_upload_files(db_session_with_containers): +def test_get_upload_files_by_document_id_for_zip_download_rejects_missing_upload_files( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -345,7 +356,9 @@ def test_get_upload_files_by_document_id_for_zip_download_rejects_missing_upload ) -def test_get_upload_files_by_document_id_for_zip_download_returns_document_keyed_mapping(db_session_with_containers): +def test_get_upload_files_by_document_id_for_zip_download_returns_document_keyed_mapping( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) upload_file_a = DocumentServiceIntegrationFactory.create_upload_file( db_session_with_containers, @@ -395,7 +408,7 @@ def test_prepare_document_batch_download_zip_raises_not_found_for_missing_datase def test_prepare_document_batch_download_zip_translates_permission_error_to_forbidden( - db_session_with_containers, + db_session_with_containers: Session, current_user_mock, ): dataset = DocumentServiceIntegrationFactory.create_dataset( @@ -418,7 +431,7 @@ def test_prepare_document_batch_download_zip_translates_permission_error_to_forb def test_prepare_document_batch_download_zip_returns_upload_files_in_requested_order( - db_session_with_containers, + db_session_with_containers: Session, current_user_mock, ): dataset = DocumentServiceIntegrationFactory.create_dataset( @@ -461,7 +474,7 @@ def test_prepare_document_batch_download_zip_returns_upload_files_in_requested_o assert download_name.endswith(".zip") -def test_get_document_by_dataset_id_returns_enabled_documents(db_session_with_containers): +def test_get_document_by_dataset_id_returns_enabled_documents(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) enabled_document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -480,7 +493,9 @@ def test_get_document_by_dataset_id_returns_enabled_documents(db_session_with_co assert [document.id for document in result] == [enabled_document.id] -def test_get_working_documents_by_dataset_id_returns_completed_enabled_unarchived_documents(db_session_with_containers): +def test_get_working_documents_by_dataset_id_returns_completed_enabled_unarchived_documents( + db_session_with_containers: Session, +): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) available_document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -501,7 +516,7 @@ def test_get_working_documents_by_dataset_id_returns_completed_enabled_unarchive assert [document.id for document in result] == [available_document.id] -def test_get_error_documents_by_dataset_id_returns_error_and_paused_documents(db_session_with_containers): +def test_get_error_documents_by_dataset_id_returns_error_and_paused_documents(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) error_document = DocumentServiceIntegrationFactory.create_document( db_session_with_containers, @@ -526,7 +541,7 @@ def test_get_error_documents_by_dataset_id_returns_error_and_paused_documents(db assert {document.id for document in result} == {error_document.id, paused_document.id} -def test_get_batch_documents_filters_by_current_user_tenant(db_session_with_containers): +def test_get_batch_documents_filters_by_current_user_tenant(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) batch = f"batch-{uuid4()}" matching_document = DocumentServiceIntegrationFactory.create_document( @@ -549,7 +564,7 @@ def test_get_batch_documents_filters_by_current_user_tenant(db_session_with_cont assert [document.id for document in result] == [matching_document.id] -def test_get_document_file_detail_returns_upload_file(db_session_with_containers): +def test_get_document_file_detail_returns_upload_file(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) upload_file = DocumentServiceIntegrationFactory.create_upload_file( db_session_with_containers, @@ -563,7 +578,7 @@ def test_get_document_file_detail_returns_upload_file(db_session_with_containers assert result.id == upload_file.id -def test_delete_document_emits_signal_and_commits(db_session_with_containers): +def test_delete_document_emits_signal_and_commits(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) upload_file = DocumentServiceIntegrationFactory.create_upload_file( db_session_with_containers, @@ -588,7 +603,7 @@ def test_delete_document_emits_signal_and_commits(db_session_with_containers): ) -def test_delete_documents_ignores_empty_input(db_session_with_containers): +def test_delete_documents_ignores_empty_input(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) with patch("services.dataset_service.batch_clean_document_task.delay") as delay: @@ -597,7 +612,7 @@ def test_delete_documents_ignores_empty_input(db_session_with_containers): delay.assert_not_called() -def test_delete_documents_deletes_rows_and_dispatches_cleanup_task(db_session_with_containers): +def test_delete_documents_deletes_rows_and_dispatches_cleanup_task(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) dataset.chunk_structure = IndexStructureType.PARAGRAPH_INDEX db_session_with_containers.commit() @@ -637,14 +652,14 @@ def test_delete_documents_deletes_rows_and_dispatches_cleanup_task(db_session_wi assert set(args[3]) == {upload_file_a.id, upload_file_b.id} -def test_get_documents_position_returns_next_position_when_documents_exist(db_session_with_containers): +def test_get_documents_position_returns_next_position_when_documents_exist(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) DocumentServiceIntegrationFactory.create_document(db_session_with_containers, dataset=dataset, position=3) assert DocumentService.get_documents_position(dataset.id) == 4 -def test_get_documents_position_defaults_to_one_when_dataset_is_empty(db_session_with_containers): +def test_get_documents_position_defaults_to_one_when_dataset_is_empty(db_session_with_containers: Session): dataset = DocumentServiceIntegrationFactory.create_dataset(db_session_with_containers) assert DocumentService.get_documents_position(dataset.id) == 1 diff --git a/api/tests/test_containers_integration_tests/services/test_document_service_display_status.py b/api/tests/test_containers_integration_tests/services/test_document_service_display_status.py index c0047df810..383a5f6374 100644 --- a/api/tests/test_containers_integration_tests/services/test_document_service_display_status.py +++ b/api/tests/test_containers_integration_tests/services/test_document_service_display_status.py @@ -2,6 +2,7 @@ import datetime from uuid import uuid4 from sqlalchemy import select +from sqlalchemy.orm import Session from core.rag.index_processor.constant.index_type import IndexStructureType from models.dataset import Dataset, Document @@ -58,7 +59,7 @@ def _create_document( return document -def test_build_display_status_filters_available(db_session_with_containers): +def test_build_display_status_filters_available(db_session_with_containers: Session): dataset = _create_dataset(db_session_with_containers) available_doc = _create_document( db_session_with_containers, @@ -97,7 +98,7 @@ def test_build_display_status_filters_available(db_session_with_containers): assert [row.id for row in rows] == [available_doc.id] -def test_apply_display_status_filter_applies_when_status_present(db_session_with_containers): +def test_apply_display_status_filter_applies_when_status_present(db_session_with_containers: Session): dataset = _create_dataset(db_session_with_containers) waiting_doc = _create_document( db_session_with_containers, @@ -121,7 +122,7 @@ def test_apply_display_status_filter_applies_when_status_present(db_session_with assert [row.id for row in rows] == [waiting_doc.id] -def test_apply_display_status_filter_returns_same_when_invalid(db_session_with_containers): +def test_apply_display_status_filter_returns_same_when_invalid(db_session_with_containers: Session): dataset = _create_dataset(db_session_with_containers) doc1 = _create_document( db_session_with_containers, diff --git a/api/tests/test_containers_integration_tests/services/test_end_user_service.py b/api/tests/test_containers_integration_tests/services/test_end_user_service.py index 074d448aab..3f611d92f7 100644 --- a/api/tests/test_containers_integration_tests/services/test_end_user_service.py +++ b/api/tests/test_containers_integration_tests/services/test_end_user_service.py @@ -7,6 +7,7 @@ import pytest from sqlalchemy.orm import Session from core.app.entities.app_invoke_entities import InvokeFrom +from models import TenantAccountRole from models.account import Account, Tenant, TenantAccountJoin from models.model import App, DefaultEndUserSessionID, EndUser from services.end_user_service import EndUserService @@ -16,7 +17,7 @@ class TestEndUserServiceFactory: """Factory class for creating test data and mock objects for end user service tests.""" @staticmethod - def create_app_and_account(db_session_with_containers): + def create_app_and_account(db_session_with_containers: Session): tenant = Tenant(name=f"Tenant {uuid4()}") db_session_with_containers.add(tenant) db_session_with_containers.flush() @@ -35,7 +36,7 @@ class TestEndUserServiceFactory: tenant_join = TenantAccountJoin( tenant_id=tenant.id, account_id=account.id, - role="owner", + role=TenantAccountRole.OWNER, current=True, ) db_session_with_containers.add(tenant_join) diff --git a/api/tests/test_containers_integration_tests/services/test_feature_service.py b/api/tests/test_containers_integration_tests/services/test_feature_service.py index f78aeaf984..a678e37b41 100644 --- a/api/tests/test_containers_integration_tests/services/test_feature_service.py +++ b/api/tests/test_containers_integration_tests/services/test_feature_service.py @@ -644,7 +644,7 @@ class TestFeatureService: assert result.max_plugin_package_size == 15728640 # Verify default license status - assert result.license.status.value == "none" + assert result.license.status == "none" assert result.license.expired_at == "" assert result.license.workspaces.enabled is False diff --git a/api/tests/test_containers_integration_tests/services/test_feedback_service.py b/api/tests/test_containers_integration_tests/services/test_feedback_service.py index 3dcd6586e2..a4663450d4 100644 --- a/api/tests/test_containers_integration_tests/services/test_feedback_service.py +++ b/api/tests/test_containers_integration_tests/services/test_feedback_service.py @@ -23,7 +23,7 @@ class TestFeedbackService: """Test FeedbackService methods.""" @pytest.fixture - def mock_db_session(self, monkeypatch): + def mock_db_session(self, monkeypatch: pytest.MonkeyPatch): """Mock database session.""" mock_session = mock.Mock() monkeypatch.setattr(db, "session", mock_session) diff --git a/api/tests/test_containers_integration_tests/services/test_human_input_delivery_test_service.py b/api/tests/test_containers_integration_tests/services/test_human_input_delivery_test_service.py index ce63e7a71a..bfc2af6509 100644 --- a/api/tests/test_containers_integration_tests/services/test_human_input_delivery_test_service.py +++ b/api/tests/test_containers_integration_tests/services/test_human_input_delivery_test_service.py @@ -122,7 +122,7 @@ class TestEmailDeliveryTestHandler: with pytest.raises(DeliveryTestUnsupportedError): handler.send_test(context=MagicMock(), method=MagicMock()) - def test_send_test_feature_disabled(self, monkeypatch): + def test_send_test_feature_disabled(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( service_module.FeatureService, "get_features", @@ -137,7 +137,7 @@ class TestEmailDeliveryTestHandler: with pytest.raises(DeliveryTestError, match="Email delivery is not available"): handler.send_test(context=context, method=method) - def test_send_test_mail_not_inited(self, monkeypatch): + def test_send_test_mail_not_inited(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( service_module.FeatureService, "get_features", @@ -154,7 +154,7 @@ class TestEmailDeliveryTestHandler: with pytest.raises(DeliveryTestError, match="Mail client is not initialized."): handler.send_test(context=context, method=method) - def test_send_test_no_recipients(self, monkeypatch): + def test_send_test_no_recipients(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( service_module.FeatureService, "get_features", @@ -173,7 +173,7 @@ class TestEmailDeliveryTestHandler: with pytest.raises(DeliveryTestError, match="No recipients configured"): handler.send_test(context=context, method=method) - def test_send_test_success(self, monkeypatch): + def test_send_test_success(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( service_module.FeatureService, "get_features", @@ -209,7 +209,7 @@ class TestEmailDeliveryTestHandler: assert kwargs["to"] == "test@example.com" assert "RENDERED_Subj" in kwargs["subject"] - def test_send_test_sanitizes_subject(self, monkeypatch): + def test_send_test_sanitizes_subject(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( service_module.FeatureService, "get_features", diff --git a/api/tests/test_containers_integration_tests/services/test_message_service_execution_extra_content.py b/api/tests/test_containers_integration_tests/services/test_message_service_execution_extra_content.py index 44e5a82868..52ebc0131f 100644 --- a/api/tests/test_containers_integration_tests/services/test_message_service_execution_extra_content.py +++ b/api/tests/test_containers_integration_tests/services/test_message_service_execution_extra_content.py @@ -1,6 +1,7 @@ from __future__ import annotations import pytest +from sqlalchemy.orm import Session from services.message_service import MessageService from tests.test_containers_integration_tests.helpers.execution_extra_content import ( @@ -9,7 +10,7 @@ from tests.test_containers_integration_tests.helpers.execution_extra_content imp @pytest.mark.usefixtures("flask_req_ctx_with_containers") -def test_pagination_returns_extra_contents(db_session_with_containers): +def test_pagination_returns_extra_contents(db_session_with_containers: Session): fixture = create_human_input_message_fixture(db_session_with_containers) pagination = MessageService.pagination_by_first_id( diff --git a/api/tests/test_containers_integration_tests/tasks/test_create_segment_to_index_task.py b/api/tests/test_containers_integration_tests/tasks/test_create_segment_to_index_task.py index 80289c448a..a8d295e6a9 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_create_segment_to_index_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_create_segment_to_index_task.py @@ -16,7 +16,7 @@ from sqlalchemy.orm import Session from core.rag.index_processor.constant.index_type import IndexStructureType, IndexTechniqueType from extensions.ext_redis import redis_client -from models import Account, Tenant, TenantAccountJoin, TenantAccountRole +from models import Account, AccountStatus, Tenant, TenantAccountJoin, TenantAccountRole, TenantStatus from models.dataset import Dataset, Document, DocumentSegment from models.enums import DataSourceType, DocumentCreatedFrom, IndexingStatus, SegmentStatus from tasks.create_segment_to_index_task import create_segment_to_index_task @@ -73,7 +73,7 @@ class TestCreateSegmentToIndexTask: email=fake.email(), name=fake.name(), interface_language="en-US", - status="active", + status=AccountStatus.ACTIVE, ) db_session_with_containers.add(account) @@ -82,7 +82,7 @@ class TestCreateSegmentToIndexTask: # Create tenant tenant = Tenant( name=fake.company(), - status="normal", + status=TenantStatus.NORMAL, plan="basic", ) db_session_with_containers.add(tenant) diff --git a/api/tests/test_containers_integration_tests/tasks/test_dataset_indexing_task.py b/api/tests/test_containers_integration_tests/tasks/test_dataset_indexing_task.py index a5a3cd10b5..5287cd06db 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_dataset_indexing_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_dataset_indexing_task.py @@ -12,7 +12,7 @@ from sqlalchemy.orm import Session from core.indexing_runner import DocumentIsPausedError from core.rag.index_processor.constant.index_type import IndexTechniqueType from enums.cloud_plan import CloudPlan -from models import Account, Tenant, TenantAccountJoin, TenantAccountRole +from models import Account, AccountStatus, Tenant, TenantAccountJoin, TenantAccountRole, TenantStatus from models.dataset import Dataset, Document from models.enums import DataSourceType, DocumentCreatedFrom, IndexingStatus from tasks.document_indexing_task import ( @@ -54,7 +54,7 @@ class _TrackedSessionContext: @pytest.fixture(autouse=True) -def _ensure_testcontainers_db(db_session_with_containers): +def _ensure_testcontainers_db(db_session_with_containers: Session): """Ensure this suite always runs on testcontainers infrastructure.""" return db_session_with_containers @@ -121,12 +121,12 @@ class TestDatasetIndexingTaskIntegration: email=fake.email(), name=fake.name(), interface_language="en-US", - status="active", + status=AccountStatus.ACTIVE, ) db_session_with_containers.add(account) db_session_with_containers.flush() - tenant = Tenant(name=fake.company(), status="normal") + tenant = Tenant(name=fake.company(), status=TenantStatus.NORMAL) db_session_with_containers.add(tenant) db_session_with_containers.flush() diff --git a/api/tests/test_containers_integration_tests/tasks/test_mail_account_deletion_task.py b/api/tests/test_containers_integration_tests/tasks/test_mail_account_deletion_task.py index ff72232d12..c4895839c9 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_mail_account_deletion_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_mail_account_deletion_task.py @@ -5,6 +5,7 @@ from faker import Faker from sqlalchemy.orm import Session from libs.email_i18n import EmailType +from models import TenantStatus from models.account import Account, Tenant, TenantAccountJoin, TenantAccountRole from tasks.mail_account_deletion_task import send_account_deletion_verification_code, send_deletion_success_task @@ -55,7 +56,7 @@ class TestMailAccountDeletionTask: # Create tenant tenant = Tenant( name=fake.company(), - status="normal", + status=TenantStatus.NORMAL, ) db_session_with_containers.add(tenant) db_session_with_containers.commit() diff --git a/api/tests/test_containers_integration_tests/tasks/test_mail_email_code_login_task.py b/api/tests/test_containers_integration_tests/tasks/test_mail_email_code_login_task.py index 8e9da6aaaa..0eec166fe2 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_mail_email_code_login_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_mail_email_code_login_task.py @@ -18,6 +18,7 @@ from sqlalchemy import delete from sqlalchemy.orm import Session from libs.email_i18n import EmailType +from models import AccountStatus, TenantStatus from models.account import Account, Tenant, TenantAccountJoin, TenantAccountRole from tasks.mail_email_code_login import send_email_code_login_mail_task @@ -91,7 +92,7 @@ class TestSendEmailCodeLoginMailTask: email=fake.email(), name=fake.name(), interface_language="en-US", - status="active", + status=AccountStatus.ACTIVE, ) db_session_with_containers.add(account) @@ -120,7 +121,7 @@ class TestSendEmailCodeLoginMailTask: tenant = Tenant( name=fake.company(), plan="basic", - status="normal", + status=TenantStatus.NORMAL, ) db_session_with_containers.add(tenant) diff --git a/api/tests/test_containers_integration_tests/tasks/test_mail_human_input_delivery_task.py b/api/tests/test_containers_integration_tests/tasks/test_mail_human_input_delivery_task.py index f505361727..a452bee9f8 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_mail_human_input_delivery_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_mail_human_input_delivery_task.py @@ -31,7 +31,7 @@ from tasks.mail_human_input_delivery_task import dispatch_human_input_email_task @pytest.fixture(autouse=True) -def cleanup_database(db_session_with_containers): +def cleanup_database(db_session_with_containers: Session): db_session_with_containers.execute(delete(HumanInputFormRecipient)) db_session_with_containers.execute(delete(HumanInputDelivery)) db_session_with_containers.execute(delete(HumanInputForm)) @@ -43,7 +43,7 @@ def cleanup_database(db_session_with_containers): db_session_with_containers.commit() -def _create_workspace_member(db_session_with_containers): +def _create_workspace_member(db_session_with_containers: Session): account = Account( email="owner@example.com", name="Owner", diff --git a/api/tests/test_containers_integration_tests/tasks/test_remove_app_and_related_data_task.py b/api/tests/test_containers_integration_tests/tasks/test_remove_app_and_related_data_task.py index 03c02ea341..204f533978 100644 --- a/api/tests/test_containers_integration_tests/tasks/test_remove_app_and_related_data_task.py +++ b/api/tests/test_containers_integration_tests/tasks/test_remove_app_and_related_data_task.py @@ -21,7 +21,7 @@ from tasks.remove_app_and_related_data_task import ( @pytest.fixture(autouse=True) -def cleanup_database(db_session_with_containers): +def cleanup_database(db_session_with_containers: Session): db_session_with_containers.execute(delete(WorkflowDraftVariable)) db_session_with_containers.execute(delete(WorkflowDraftVariableFile)) db_session_with_containers.execute(delete(UploadFile)) @@ -30,7 +30,7 @@ def cleanup_database(db_session_with_containers): db_session_with_containers.commit() -def _create_tenant_and_app(db_session_with_containers): +def _create_tenant_and_app(db_session_with_containers: Session): tenant = Tenant(name=f"test_tenant_{uuid.uuid4()}") db_session_with_containers.add(tenant) db_session_with_containers.flush() diff --git a/api/tests/unit_tests/controllers/common/test_helpers.py b/api/tests/unit_tests/controllers/common/test_helpers.py index 59c463177c..376a7a90c5 100644 --- a/api/tests/unit_tests/controllers/common/test_helpers.py +++ b/api/tests/unit_tests/controllers/common/test_helpers.py @@ -57,7 +57,7 @@ class TestGuessFileInfoFromResponse: (False, "bin"), ], ) - def test_generated_filename_when_missing(self, monkeypatch, magic_available, expected_ext): + def test_generated_filename_when_missing(self, monkeypatch: pytest.MonkeyPatch, magic_available, expected_ext): if magic_available: if helpers.magic is None: pytest.skip("python-magic is not installed, cannot run 'magic_available=True' test variant") @@ -155,7 +155,7 @@ class TestMagicImportWarnings: ) def test_magic_import_warning_per_platform( self, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, platform_name, expected_message, ): diff --git a/api/tests/unit_tests/controllers/common/test_schema.py b/api/tests/unit_tests/controllers/common/test_schema.py index fbca2539a5..6cf36e3bce 100644 --- a/api/tests/unit_tests/controllers/common/test_schema.py +++ b/api/tests/unit_tests/controllers/common/test_schema.py @@ -101,7 +101,7 @@ def test_register_schema_models_registers_multiple_models(): assert called_names == ["UserModel", "ProductModel"] -def test_register_schema_models_calls_register_schema_model(monkeypatch): +def test_register_schema_models_calls_register_schema_model(monkeypatch: pytest.MonkeyPatch): from controllers.common.schema import register_schema_models namespace = MagicMock(spec=Namespace) diff --git a/api/tests/unit_tests/controllers/console/datasets/test_datasets_segments.py b/api/tests/unit_tests/controllers/console/datasets/test_datasets_segments.py index 412edb9dfe..66d257ee66 100644 --- a/api/tests/unit_tests/controllers/console/datasets/test_datasets_segments.py +++ b/api/tests/unit_tests/controllers/console/datasets/test_datasets_segments.py @@ -68,7 +68,7 @@ def _segment(): ) -def test_get_segment_with_summary(monkeypatch): +def test_get_segment_with_summary(monkeypatch: pytest.MonkeyPatch): segment = _segment() summary = SimpleNamespace(summary_content="summary") diff --git a/api/tests/unit_tests/controllers/console/datasets/test_hit_testing.py b/api/tests/unit_tests/controllers/console/datasets/test_hit_testing.py index 09ed2aaf69..4fa5d21493 100644 --- a/api/tests/unit_tests/controllers/console/datasets/test_hit_testing.py +++ b/api/tests/unit_tests/controllers/console/datasets/test_hit_testing.py @@ -3,6 +3,7 @@ from unittest.mock import MagicMock, PropertyMock, patch import pytest from flask import Flask +from pytest_mock import MockerFixture from werkzeug.exceptions import NotFound from controllers.console import console_ns @@ -35,7 +36,7 @@ def dataset(): @pytest.fixture(autouse=True) -def bypass_decorators(mocker): +def bypass_decorators(mocker: MockerFixture): """Bypass all decorators on the API method.""" mocker.patch( "controllers.console.datasets.hit_testing.setup_required", @@ -56,7 +57,7 @@ def bypass_decorators(mocker): class TestHitTestingApi: - def test_hit_testing_success(self, app, dataset, dataset_id): + def test_hit_testing_success(self, app: Flask, dataset, dataset_id): api = HitTestingApi() method = unwrap(api.post) @@ -99,7 +100,7 @@ class TestHitTestingApi: assert "records" in result assert result["records"] == [] - def test_hit_testing_success_with_optional_record_fields(self, app, dataset, dataset_id): + def test_hit_testing_success_with_optional_record_fields(self, app: Flask, dataset, dataset_id): api = HitTestingApi() method = unwrap(api.post) @@ -150,7 +151,7 @@ class TestHitTestingApi: assert result["query"] == payload["query"] assert result["records"] == records - def test_hit_testing_dataset_not_found(self, app, dataset_id): + def test_hit_testing_dataset_not_found(self, app: Flask, dataset_id): api = HitTestingApi() method = unwrap(api.post) @@ -175,7 +176,7 @@ class TestHitTestingApi: with pytest.raises(NotFound, match="Dataset not found"): method(api, dataset_id) - def test_hit_testing_invalid_args(self, app, dataset, dataset_id): + def test_hit_testing_invalid_args(self, app: Flask, dataset, dataset_id): api = HitTestingApi() method = unwrap(api.post) diff --git a/api/tests/unit_tests/controllers/console/datasets/test_metadata.py b/api/tests/unit_tests/controllers/console/datasets/test_metadata.py index 0105aacd65..4042190ff6 100644 --- a/api/tests/unit_tests/controllers/console/datasets/test_metadata.py +++ b/api/tests/unit_tests/controllers/console/datasets/test_metadata.py @@ -3,6 +3,7 @@ from unittest.mock import MagicMock, PropertyMock, patch import pytest from flask import Flask +from pytest_mock import MockerFixture from werkzeug.exceptions import NotFound from controllers.console import console_ns @@ -60,7 +61,7 @@ def metadata_id(): @pytest.fixture(autouse=True) -def bypass_decorators(mocker): +def bypass_decorators(mocker: MockerFixture): """Bypass setup/login/license decorators.""" mocker.patch( "controllers.console.datasets.metadata.setup_required", diff --git a/api/tests/unit_tests/controllers/console/datasets/test_website.py b/api/tests/unit_tests/controllers/console/datasets/test_website.py index 9f0da6e76f..9991a0d345 100644 --- a/api/tests/unit_tests/controllers/console/datasets/test_website.py +++ b/api/tests/unit_tests/controllers/console/datasets/test_website.py @@ -2,6 +2,7 @@ from unittest.mock import Mock, PropertyMock, patch import pytest from flask import Flask +from pytest_mock import MockerFixture from controllers.console import console_ns from controllers.console.datasets.error import WebsiteCrawlError @@ -31,7 +32,7 @@ def app(): @pytest.fixture(autouse=True) -def bypass_auth_and_setup(mocker): +def bypass_auth_and_setup(mocker: MockerFixture): """Bypass setup/login/account decorators.""" mocker.patch( "controllers.console.datasets.website.login_required", @@ -48,7 +49,7 @@ def bypass_auth_and_setup(mocker): class TestWebsiteCrawlApi: - def test_crawl_success(self, app, mocker): + def test_crawl_success(self, app, mocker: MockerFixture): api = WebsiteCrawlApi() method = unwrap(api.post) @@ -85,7 +86,7 @@ class TestWebsiteCrawlApi: assert status == 200 assert result["job_id"] == "job-1" - def test_crawl_invalid_payload(self, app, mocker): + def test_crawl_invalid_payload(self, app, mocker: MockerFixture): api = WebsiteCrawlApi() method = unwrap(api.post) @@ -113,7 +114,7 @@ class TestWebsiteCrawlApi: with pytest.raises(WebsiteCrawlError, match="invalid payload"): method(api) - def test_crawl_service_error(self, app, mocker): + def test_crawl_service_error(self, app, mocker: MockerFixture): api = WebsiteCrawlApi() method = unwrap(api.post) @@ -150,7 +151,7 @@ class TestWebsiteCrawlApi: class TestWebsiteCrawlStatusApi: - def test_get_status_success(self, app, mocker): + def test_get_status_success(self, app, mocker: MockerFixture): api = WebsiteCrawlStatusApi() method = unwrap(api.get) @@ -181,7 +182,7 @@ class TestWebsiteCrawlStatusApi: assert status == 200 assert result["status"] == "completed" - def test_get_status_invalid_provider(self, app, mocker): + def test_get_status_invalid_provider(self, app, mocker: MockerFixture): api = WebsiteCrawlStatusApi() method = unwrap(api.get) @@ -203,7 +204,7 @@ class TestWebsiteCrawlStatusApi: with pytest.raises(WebsiteCrawlError, match="invalid provider"): method(api, job_id) - def test_get_status_service_error(self, app, mocker): + def test_get_status_service_error(self, app, mocker: MockerFixture): api = WebsiteCrawlStatusApi() method = unwrap(api.get) diff --git a/api/tests/unit_tests/controllers/console/datasets/test_wraps.py b/api/tests/unit_tests/controllers/console/datasets/test_wraps.py index e358435de4..2cfa938af8 100644 --- a/api/tests/unit_tests/controllers/console/datasets/test_wraps.py +++ b/api/tests/unit_tests/controllers/console/datasets/test_wraps.py @@ -1,6 +1,7 @@ from unittest.mock import Mock import pytest +from pytest_mock import MockerFixture from controllers.console.datasets.error import PipelineNotFoundError from controllers.console.datasets.wraps import get_rag_pipeline @@ -16,7 +17,7 @@ class TestGetRagPipeline: with pytest.raises(ValueError, match="missing pipeline_id"): dummy_view() - def test_pipeline_not_found(self, mocker): + def test_pipeline_not_found(self, mocker: MockerFixture): @get_rag_pipeline def dummy_view(**kwargs): return "ok" @@ -34,7 +35,7 @@ class TestGetRagPipeline: with pytest.raises(PipelineNotFoundError): dummy_view(pipeline_id="pipeline-1") - def test_pipeline_found_and_injected(self, mocker): + def test_pipeline_found_and_injected(self, mocker: MockerFixture): pipeline = Mock(spec=Pipeline) pipeline.id = "pipeline-1" pipeline.tenant_id = "tenant-1" @@ -57,7 +58,7 @@ class TestGetRagPipeline: assert result is pipeline - def test_pipeline_id_removed_from_kwargs(self, mocker): + def test_pipeline_id_removed_from_kwargs(self, mocker: MockerFixture): pipeline = Mock(spec=Pipeline) @get_rag_pipeline @@ -79,7 +80,7 @@ class TestGetRagPipeline: assert result == "ok" - def test_pipeline_id_cast_to_string(self, mocker): + def test_pipeline_id_cast_to_string(self, mocker: MockerFixture): pipeline = Mock(spec=Pipeline) @get_rag_pipeline diff --git a/api/tests/unit_tests/controllers/console/test_admin.py b/api/tests/unit_tests/controllers/console/test_admin.py index 16197fcd0c..27f332ac51 100644 --- a/api/tests/unit_tests/controllers/console/test_admin.py +++ b/api/tests/unit_tests/controllers/console/test_admin.py @@ -4,6 +4,7 @@ import uuid from unittest.mock import Mock, PropertyMock, patch import pytest +from pytest_mock import MockerFixture from werkzeug.exceptions import NotFound, Unauthorized from controllers.console.admin import ( @@ -18,7 +19,7 @@ from models.model import App, InstalledApp, RecommendedApp @pytest.fixture(autouse=True) -def bypass_only_edition_cloud(mocker): +def bypass_only_edition_cloud(mocker: MockerFixture): """ Bypass only_edition_cloud decorator by setting EDITION to "CLOUD". """ @@ -29,7 +30,7 @@ def bypass_only_edition_cloud(mocker): @pytest.fixture -def mock_admin_auth(mocker): +def mock_admin_auth(mocker: MockerFixture): """ Provide valid admin authentication for controller tests. """ @@ -44,7 +45,7 @@ def mock_admin_auth(mocker): @pytest.fixture -def mock_console_payload(mocker): +def mock_console_payload(mocker: MockerFixture): payload = { "app_id": str(uuid.uuid4()), "language": "en-US", @@ -62,7 +63,7 @@ def mock_console_payload(mocker): @pytest.fixture -def mock_banner_payload(mocker): +def mock_banner_payload(mocker: MockerFixture): mocker.patch( "flask_restx.namespace.Namespace.payload", new_callable=PropertyMock, @@ -78,7 +79,7 @@ def mock_banner_payload(mocker): @pytest.fixture -def mock_session_factory(mocker): +def mock_session_factory(mocker: MockerFixture): mock_session = Mock() mock_session.execute = Mock() mock_session.add = Mock() @@ -97,7 +98,7 @@ class TestDeleteExploreBannerApi: def setup_method(self): self.api = DeleteExploreBannerApi() - def test_delete_banner_not_found(self, mocker, mock_admin_auth): + def test_delete_banner_not_found(self, mocker: MockerFixture, mock_admin_auth): mocker.patch( "controllers.console.admin.db.session.execute", return_value=Mock(scalar_one_or_none=lambda: None), @@ -106,7 +107,7 @@ class TestDeleteExploreBannerApi: with pytest.raises(NotFound, match="is not found"): self.api.delete(uuid.uuid4()) - def test_delete_banner_success(self, mocker, mock_admin_auth): + def test_delete_banner_success(self, mocker: MockerFixture, mock_admin_auth): mock_banner = Mock() mocker.patch( @@ -126,7 +127,7 @@ class TestInsertExploreBannerApi: def setup_method(self): self.api = InsertExploreBannerApi() - def test_insert_banner_success(self, mocker, mock_admin_auth, mock_banner_payload): + def test_insert_banner_success(self, mocker: MockerFixture, mock_admin_auth, mock_banner_payload): mocker.patch("controllers.console.admin.db.session.add") mocker.patch("controllers.console.admin.db.session.commit") @@ -168,7 +169,7 @@ class TestInsertExploreAppApiDelete: def setup_method(self): self.api = InsertExploreAppApi() - def test_delete_when_not_in_explore(self, mocker, mock_admin_auth): + def test_delete_when_not_in_explore(self, mocker: MockerFixture, mock_admin_auth): mocker.patch( "controllers.console.admin.session_factory.create_session", return_value=Mock( @@ -183,7 +184,7 @@ class TestInsertExploreAppApiDelete: assert status == 204 assert response["result"] == "success" - def test_delete_when_in_explore_with_trial_app(self, mocker, mock_admin_auth): + def test_delete_when_in_explore_with_trial_app(self, mocker: MockerFixture, mock_admin_auth): """Test deleting an app from explore that has a trial app.""" app_id = uuid.uuid4() @@ -225,7 +226,7 @@ class TestInsertExploreAppApiDelete: assert response["result"] == "success" assert mock_app.is_public is False - def test_delete_with_installed_apps(self, mocker, mock_admin_auth): + def test_delete_with_installed_apps(self, mocker: MockerFixture, mock_admin_auth): """Test deleting an app that has installed apps in other tenants.""" app_id = uuid.uuid4() @@ -270,7 +271,7 @@ class TestInsertExploreAppListApi: def setup_method(self): self.api = InsertExploreAppListApi() - def test_app_not_found(self, mocker, mock_admin_auth, mock_console_payload): + def test_app_not_found(self, mocker: MockerFixture, mock_admin_auth, mock_console_payload): mocker.patch( "controllers.console.admin.db.session.execute", return_value=Mock(scalar_one_or_none=lambda: None), @@ -281,7 +282,7 @@ class TestInsertExploreAppListApi: def test_create_recommended_app( self, - mocker, + mocker: MockerFixture, mock_admin_auth, mock_console_payload, ): @@ -318,7 +319,9 @@ class TestInsertExploreAppListApi: assert response["result"] == "success" assert mock_app.is_public is True - def test_update_recommended_app(self, mocker, mock_admin_auth, mock_console_payload, mock_session_factory): + def test_update_recommended_app( + self, mocker: MockerFixture, mock_admin_auth, mock_console_payload, mock_session_factory + ): mock_app = Mock(spec=App) mock_app.id = "app-id" mock_app.site = None @@ -344,7 +347,7 @@ class TestInsertExploreAppListApi: def test_site_data_overrides_payload( self, - mocker, + mocker: MockerFixture, mock_admin_auth, mock_console_payload, mock_session_factory, @@ -381,7 +384,7 @@ class TestInsertExploreAppListApi: def test_create_trial_app_when_can_trial_enabled( self, - mocker, + mocker: MockerFixture, mock_admin_auth, mock_console_payload, mock_session_factory, @@ -413,7 +416,7 @@ class TestInsertExploreAppListApi: def test_update_recommended_app_with_trial( self, - mocker, + mocker: MockerFixture, mock_admin_auth, mock_console_payload, mock_session_factory, @@ -450,7 +453,7 @@ class TestInsertExploreAppListApi: def test_update_recommended_app_without_trial( self, - mocker, + mocker: MockerFixture, mock_admin_auth, mock_console_payload, mock_session_factory, diff --git a/api/tests/unit_tests/controllers/console/test_feature.py b/api/tests/unit_tests/controllers/console/test_feature.py index d8debc1f2c..1711aede61 100644 --- a/api/tests/unit_tests/controllers/console/test_feature.py +++ b/api/tests/unit_tests/controllers/console/test_feature.py @@ -1,3 +1,4 @@ +from pytest_mock import MockerFixture from werkzeug.exceptions import Unauthorized @@ -11,7 +12,7 @@ def unwrap(func): class TestFeatureApi: - def test_get_tenant_features_success(self, mocker): + def test_get_tenant_features_success(self, mocker: MockerFixture): from controllers.console.feature import FeatureApi mocker.patch( @@ -32,7 +33,7 @@ class TestFeatureApi: class TestSystemFeatureApi: - def test_get_system_features_authenticated(self, mocker): + def test_get_system_features_authenticated(self, mocker: MockerFixture): """ current_user.is_authenticated == True """ @@ -56,7 +57,7 @@ class TestSystemFeatureApi: assert result == {"features": {"sys_feature": True}} - def test_get_system_features_unauthenticated(self, mocker): + def test_get_system_features_unauthenticated(self, mocker: MockerFixture): """ current_user.is_authenticated raises Unauthorized """ diff --git a/api/tests/unit_tests/controllers/console/workspace/test_models.py b/api/tests/unit_tests/controllers/console/workspace/test_models.py index 4246e3c04c..3c4acbab44 100644 --- a/api/tests/unit_tests/controllers/console/workspace/test_models.py +++ b/api/tests/unit_tests/controllers/console/workspace/test_models.py @@ -32,7 +32,7 @@ class TestDefaultModelApi: with ( app.test_request_context( "/", - query_string={"model_type": ModelType.LLM.value}, + query_string={"model_type": ModelType.LLM}, ), patch( "controllers.console.workspace.models.current_account_with_tenant", @@ -53,7 +53,7 @@ class TestDefaultModelApi: payload = { "model_settings": [ { - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "provider": "openai", "model": "gpt-4", } @@ -77,7 +77,7 @@ class TestDefaultModelApi: method = unwrap(api.get) with ( - app.test_request_context("/", query_string={"model_type": ModelType.LLM.value}), + app.test_request_context("/", query_string={"model_type": ModelType.LLM}), patch("controllers.console.workspace.models.current_account_with_tenant", return_value=(MagicMock(), "t1")), patch("controllers.console.workspace.models.ModelProviderService") as service, ): @@ -113,7 +113,7 @@ class TestModelProviderModelApi: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "load_balancing": { "configs": [{"weight": 1}], "enabled": True, @@ -139,7 +139,7 @@ class TestModelProviderModelApi: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, } with ( @@ -180,7 +180,7 @@ class TestModelProviderModelCredentialApi: "/", query_string={ "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, }, ), patch( @@ -208,7 +208,7 @@ class TestModelProviderModelCredentialApi: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "credentials": {"key": "val"}, } @@ -229,7 +229,7 @@ class TestModelProviderModelCredentialApi: method = unwrap(api.get) with ( - app.test_request_context("/", query_string={"model": "gpt", "model_type": ModelType.LLM.value}), + app.test_request_context("/", query_string={"model": "gpt", "model_type": ModelType.LLM}), patch("controllers.console.workspace.models.current_account_with_tenant", return_value=(MagicMock(), "t1")), patch("controllers.console.workspace.models.ModelProviderService") as service, patch("controllers.console.workspace.models.ModelLoadBalancingService") as lb, @@ -248,7 +248,7 @@ class TestModelProviderModelCredentialApi: payload = { "model": "gpt", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "credential_id": "123e4567-e89b-12d3-a456-426614174000", } @@ -269,7 +269,7 @@ class TestModelProviderModelCredentialSwitchApi: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "credential_id": "abc", } @@ -293,7 +293,7 @@ class TestModelEnableDisableApis: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, } with ( @@ -314,7 +314,7 @@ class TestModelEnableDisableApis: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, } with ( @@ -337,7 +337,7 @@ class TestModelProviderModelValidateApi: payload = { "model": "gpt-4", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "credentials": {"key": "val"}, } @@ -360,7 +360,7 @@ class TestModelProviderModelValidateApi: payload = { "model": model_name, - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "credentials": {}, } @@ -412,7 +412,7 @@ class TestParameterAndAvailableModels: ): service_mock.return_value.get_models_by_model_type.return_value = [] - result = method(api, ModelType.LLM.value) + result = method(api, ModelType.LLM) assert "data" in result @@ -442,6 +442,6 @@ class TestParameterAndAvailableModels: ): service.return_value.get_models_by_model_type.return_value = [] - result = method(api, ModelType.LLM.value) + result = method(api, ModelType.LLM) assert result["data"] == [] diff --git a/api/tests/unit_tests/controllers/inner_api/plugin/test_plugin_wraps.py b/api/tests/unit_tests/controllers/inner_api/plugin/test_plugin_wraps.py index d1b09c3a58..598677faff 100644 --- a/api/tests/unit_tests/controllers/inner_api/plugin/test_plugin_wraps.py +++ b/api/tests/unit_tests/controllers/inner_api/plugin/test_plugin_wraps.py @@ -189,7 +189,7 @@ class TestGetUserTenant: """Test get_user_tenant decorator""" @patch("controllers.inner_api.plugin.wraps.Tenant") - def test_should_inject_tenant_and_user_models(self, mock_tenant_class, app: Flask, monkeypatch): + def test_should_inject_tenant_and_user_models(self, mock_tenant_class, app: Flask, monkeypatch: pytest.MonkeyPatch): """Test that decorator injects tenant_model and user_model into kwargs""" # Arrange @@ -244,7 +244,9 @@ class TestGetUserTenant: protected_view() @patch("controllers.inner_api.plugin.wraps.Tenant") - def test_should_use_default_session_id_when_user_id_empty(self, mock_tenant_class, app: Flask, monkeypatch): + def test_should_use_default_session_id_when_user_id_empty( + self, mock_tenant_class, app: Flask, monkeypatch: pytest.MonkeyPatch + ): """Test that default session ID is used when user_id is empty string""" # Arrange diff --git a/api/tests/unit_tests/controllers/service_api/app/test_conversation.py b/api/tests/unit_tests/controllers/service_api/app/test_conversation.py index 6dc8f54d42..74c13d50f6 100644 --- a/api/tests/unit_tests/controllers/service_api/app/test_conversation.py +++ b/api/tests/unit_tests/controllers/service_api/app/test_conversation.py @@ -340,7 +340,7 @@ class TestConversationAppModeValidation: @pytest.mark.parametrize( "mode", [ - AppMode.CHAT.value, + AppMode.CHAT, AppMode.AGENT_CHAT.value, AppMode.ADVANCED_CHAT.value, ], @@ -365,7 +365,7 @@ class TestConversationAppModeValidation: app raises NotChatAppError. """ app = Mock(spec=App) - app.mode = AppMode.COMPLETION.value + app.mode = AppMode.COMPLETION app_mode = AppMode.value_of(app.mode) assert app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT} @@ -498,7 +498,7 @@ class TestConversationApiController: def test_list_not_chat(self, app) -> None: api = ConversationApi() handler = _unwrap(api.get) - app_model = SimpleNamespace(mode=AppMode.COMPLETION.value) + app_model = SimpleNamespace(mode=AppMode.COMPLETION) end_user = SimpleNamespace() with app.test_request_context("/conversations", method="GET"): @@ -531,7 +531,7 @@ class TestConversationApiController: api = ConversationApi() handler = _unwrap(api.get) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( @@ -546,7 +546,7 @@ class TestConversationDetailApiController: def test_delete_not_chat(self, app) -> None: api = ConversationDetailApi() handler = _unwrap(api.delete) - app_model = SimpleNamespace(mode=AppMode.COMPLETION.value) + app_model = SimpleNamespace(mode=AppMode.COMPLETION) end_user = SimpleNamespace() with app.test_request_context("/conversations/1", method="DELETE"): @@ -562,7 +562,7 @@ class TestConversationDetailApiController: api = ConversationDetailApi() handler = _unwrap(api.delete) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context("/conversations/1", method="DELETE"): @@ -580,7 +580,7 @@ class TestConversationRenameApiController: api = ConversationRenameApi() handler = _unwrap(api.post) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( @@ -596,7 +596,7 @@ class TestConversationVariablesApiController: def test_not_chat(self, app) -> None: api = ConversationVariablesApi() handler = _unwrap(api.get) - app_model = SimpleNamespace(mode=AppMode.COMPLETION.value) + app_model = SimpleNamespace(mode=AppMode.COMPLETION) end_user = SimpleNamespace() with app.test_request_context("/conversations/1/variables", method="GET"): @@ -612,7 +612,7 @@ class TestConversationVariablesApiController: api = ConversationVariablesApi() handler = _unwrap(api.get) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( @@ -645,7 +645,7 @@ class TestConversationVariablesApiController: api = ConversationVariablesApi() handler = _unwrap(api.get) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( @@ -671,7 +671,7 @@ class TestConversationVariableDetailApiController: api = ConversationVariableDetailApi() handler = _unwrap(api.put) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( @@ -697,7 +697,7 @@ class TestConversationVariableDetailApiController: api = ConversationVariableDetailApi() handler = _unwrap(api.put) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( @@ -731,7 +731,7 @@ class TestConversationVariableDetailApiController: api = ConversationVariableDetailApi() handler = _unwrap(api.put) - app_model = SimpleNamespace(mode=AppMode.CHAT.value) + app_model = SimpleNamespace(mode=AppMode.CHAT) end_user = SimpleNamespace() with app.test_request_context( diff --git a/api/tests/unit_tests/controllers/service_api/end_user/test_end_user.py b/api/tests/unit_tests/controllers/service_api/end_user/test_end_user.py index 3cc444e467..9c310a4f45 100644 --- a/api/tests/unit_tests/controllers/service_api/end_user/test_end_user.py +++ b/api/tests/unit_tests/controllers/service_api/end_user/test_end_user.py @@ -3,6 +3,7 @@ from unittest.mock import Mock from uuid import UUID, uuid4 import pytest +from pytest_mock import MockerFixture from controllers.service_api.end_user.end_user import EndUserApi from controllers.service_api.end_user.error import EndUserNotFoundError @@ -21,7 +22,9 @@ class TestEndUserApi: app.tenant_id = str(uuid4()) return app - def test_get_end_user_returns_all_attributes(self, mocker, resource: EndUserApi, app_model: App) -> None: + def test_get_end_user_returns_all_attributes( + self, mocker: MockerFixture, resource: EndUserApi, app_model: App + ) -> None: end_user = Mock(spec=EndUser) end_user.id = str(uuid4()) end_user.tenant_id = app_model.tenant_id @@ -54,7 +57,7 @@ class TestEndUserApi: assert result["created_at"].startswith("2024-01-01T00:00:00") assert result["updated_at"].startswith("2024-01-02T00:00:00") - def test_get_end_user_not_found(self, mocker, resource: EndUserApi, app_model: App) -> None: + def test_get_end_user_not_found(self, mocker: MockerFixture, resource: EndUserApi, app_model: App) -> None: mocker.patch("controllers.service_api.end_user.end_user.EndUserService.get_end_user_by_id", return_value=None) with pytest.raises(EndUserNotFoundError): diff --git a/api/tests/unit_tests/core/agent/output_parser/test_cot_output_parser.py b/api/tests/unit_tests/core/agent/output_parser/test_cot_output_parser.py index 9073ae1044..c1a4da8cd3 100644 --- a/api/tests/unit_tests/core/agent/output_parser/test_cot_output_parser.py +++ b/api/tests/unit_tests/core/agent/output_parser/test_cot_output_parser.py @@ -12,12 +12,13 @@ from types import SimpleNamespace from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.agent.output_parser.cot_output_parser import CotAgentOutputParser @pytest.fixture -def mock_action_class(mocker): +def mock_action_class(mocker: MockerFixture): mock_action = MagicMock() mocker.patch( "core.agent.output_parser.cot_output_parser.AgentScratchpadUnit.Action", diff --git a/api/tests/unit_tests/core/agent/strategy/test_plugin.py b/api/tests/unit_tests/core/agent/strategy/test_plugin.py index e0894f1e90..0fea04845d 100644 --- a/api/tests/unit_tests/core/agent/strategy/test_plugin.py +++ b/api/tests/unit_tests/core/agent/strategy/test_plugin.py @@ -3,6 +3,7 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.agent.strategy.plugin import PluginAgentStrategy @@ -213,7 +214,9 @@ class TestInvoke: (None, None, "msg"), ], ) - def test_invoke_optional_arguments(self, strategy, mocker, conversation_id, app_id, message_id) -> None: + def test_invoke_optional_arguments( + self, strategy, mocker: MockerFixture, conversation_id, app_id, message_id + ) -> None: mock_manager = MagicMock() mock_manager.invoke = MagicMock(return_value=iter([])) diff --git a/api/tests/unit_tests/core/agent/test_base_agent_runner.py b/api/tests/unit_tests/core/agent/test_base_agent_runner.py index db4b293b16..d5fb853ee3 100644 --- a/api/tests/unit_tests/core/agent/test_base_agent_runner.py +++ b/api/tests/unit_tests/core/agent/test_base_agent_runner.py @@ -3,6 +3,7 @@ from decimal import Decimal from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture import core.agent.base_agent_runner as module from core.agent.base_agent_runner import BaseAgentRunner @@ -13,7 +14,7 @@ from core.agent.base_agent_runner import BaseAgentRunner @pytest.fixture -def mock_db_session(mocker): +def mock_db_session(mocker: MockerFixture): session = mocker.MagicMock() mocker.patch.object(module.db, "session", session) return session @@ -41,13 +42,13 @@ def runner(mocker, mock_db_session): class TestRepack: - def test_sets_empty_if_none(self, runner, mocker): + def test_sets_empty_if_none(self, runner, mocker: MockerFixture): entity = mocker.MagicMock() entity.app_config.prompt_template.simple_prompt_template = None result = runner._repack_app_generate_entity(entity) assert result.app_config.prompt_template.simple_prompt_template == "" - def test_keeps_existing(self, runner, mocker): + def test_keeps_existing(self, runner, mocker: MockerFixture): entity = mocker.MagicMock() entity.app_config.prompt_template.simple_prompt_template = "abc" result = runner._repack_app_generate_entity(entity) @@ -60,7 +61,7 @@ class TestRepack: class TestUpdatePromptTool: - def build_param(self, mocker, **kwargs): + def build_param(self, mocker: MockerFixture, **kwargs): p = mocker.MagicMock() p.form = kwargs.get("form") @@ -75,7 +76,7 @@ class TestUpdatePromptTool: p.required = kwargs.get("required", False) return p - def test_skip_non_llm(self, runner, mocker): + def test_skip_non_llm(self, runner, mocker: MockerFixture): tool = mocker.MagicMock() param = self.build_param(mocker, form="NOT_LLM") tool.get_runtime_parameters.return_value = [param] @@ -86,7 +87,7 @@ class TestUpdatePromptTool: result = runner.update_prompt_message_tool(tool, prompt_tool) assert result.parameters["properties"] == {} - def test_enum_and_required(self, runner, mocker): + def test_enum_and_required(self, runner, mocker: MockerFixture): option = mocker.MagicMock(value="opt1") param = self.build_param( mocker, @@ -104,7 +105,7 @@ class TestUpdatePromptTool: result = runner.update_prompt_message_tool(tool, prompt_tool) assert "p1" in result.parameters["required"] - def test_skip_file_type_param(self, runner, mocker): + def test_skip_file_type_param(self, runner, mocker: MockerFixture): tool = mocker.MagicMock() param = self.build_param(mocker, form=module.ToolParameter.ToolParameterForm.LLM) param.type = module.ToolParameter.ToolParameterType.FILE @@ -116,7 +117,7 @@ class TestUpdatePromptTool: result = runner.update_prompt_message_tool(tool, prompt_tool) assert result.parameters["properties"] == {} - def test_duplicate_required_not_duplicated(self, runner, mocker): + def test_duplicate_required_not_duplicated(self, runner, mocker: MockerFixture): tool = mocker.MagicMock() param = self.build_param( @@ -141,7 +142,7 @@ class TestUpdatePromptTool: class TestCreateAgentThought: - def test_with_files(self, runner, mock_db_session, mocker): + def test_with_files(self, runner, mock_db_session, mocker: MockerFixture): mock_thought = mocker.MagicMock(id=10) mocker.patch.object(module, "MessageAgentThought", return_value=mock_thought) @@ -149,7 +150,7 @@ class TestCreateAgentThought: assert result == "10" assert runner.agent_thought_count == 1 - def test_without_files(self, runner, mock_db_session, mocker): + def test_without_files(self, runner, mock_db_session, mocker: MockerFixture): mock_thought = mocker.MagicMock(id=11) mocker.patch.object(module, "MessageAgentThought", return_value=mock_thought) @@ -163,7 +164,7 @@ class TestCreateAgentThought: class TestSaveAgentThought: - def setup_agent(self, mocker): + def setup_agent(self, mocker: MockerFixture): agent = mocker.MagicMock() agent.tool = "tool1;tool2" agent.tool_labels = {} @@ -175,7 +176,7 @@ class TestSaveAgentThought: with pytest.raises(ValueError): runner.save_agent_thought("id", None, None, None, None, None, None, [], None) - def test_full_update(self, runner, mock_db_session, mocker): + def test_full_update(self, runner, mock_db_session, mocker: MockerFixture): agent = self.setup_agent(mocker) mock_db_session.scalar.return_value = agent @@ -210,7 +211,7 @@ class TestSaveAgentThought: assert agent.tokens == 3 assert "tool1" in json.loads(agent.tool_labels_str) - def test_label_fallback_when_none(self, runner, mock_db_session, mocker): + def test_label_fallback_when_none(self, runner, mock_db_session, mocker: MockerFixture): agent = self.setup_agent(mocker) agent.tool = "unknown_tool" mock_db_session.scalar.return_value = agent @@ -220,7 +221,7 @@ class TestSaveAgentThought: labels = json.loads(agent.tool_labels_str) assert "unknown_tool" in labels - def test_json_failure_paths(self, runner, mock_db_session, mocker): + def test_json_failure_paths(self, runner, mock_db_session, mocker: MockerFixture): agent = self.setup_agent(mocker) mock_db_session.scalar.return_value = agent @@ -241,13 +242,13 @@ class TestSaveAgentThought: assert mock_db_session.commit.called - def test_messages_ids_none(self, runner, mock_db_session, mocker): + def test_messages_ids_none(self, runner, mock_db_session, mocker: MockerFixture): agent = self.setup_agent(mocker) mock_db_session.scalar.return_value = agent runner.save_agent_thought("id", None, None, None, None, None, None, None, None) assert mock_db_session.commit.called - def test_success_dict_serialization(self, runner, mock_db_session, mocker): + def test_success_dict_serialization(self, runner, mock_db_session, mocker: MockerFixture): agent = self.setup_agent(mocker) mock_db_session.scalar.return_value = agent @@ -273,19 +274,19 @@ class TestSaveAgentThought: class TestOrganizeUserPrompt: - def test_no_files(self, runner, mock_db_session, mocker): + def test_no_files(self, runner, mock_db_session, mocker: MockerFixture): mock_db_session.scalars.return_value.all.return_value = [] msg = mocker.MagicMock(id="1", query="hello", app_model_config=None) result = runner.organize_agent_user_prompt(msg) assert result.content == "hello" - def test_with_files_no_config(self, runner, mock_db_session, mocker): + def test_with_files_no_config(self, runner, mock_db_session, mocker: MockerFixture): mock_db_session.scalars.return_value.all.return_value = [mocker.MagicMock()] msg = mocker.MagicMock(id="1", query="hello", app_model_config=None) result = runner.organize_agent_user_prompt(msg) assert result.content == "hello" - def test_image_detail_low_fallback(self, runner, mock_db_session, mocker): + def test_image_detail_low_fallback(self, runner, mock_db_session, mocker: MockerFixture): mock_db_session.scalars.return_value.all.return_value = [mocker.MagicMock()] file_config = mocker.MagicMock() file_config.image_config = mocker.MagicMock(detail=None) @@ -305,27 +306,27 @@ class TestOrganizeUserPrompt: class TestOrganizeHistory: - def test_empty(self, runner, mock_db_session, mocker): + def test_empty(self, runner, mock_db_session, mocker: MockerFixture): mock_db_session.execute.return_value.scalars.return_value.all.return_value = [] mocker.patch.object(module, "extract_thread_messages", return_value=[]) result = runner.organize_agent_history([]) assert result == [] - def test_with_answer_only(self, runner, mock_db_session, mocker): + def test_with_answer_only(self, runner, mock_db_session, mocker: MockerFixture): msg = mocker.MagicMock(id="m1", answer="ans", agent_thoughts=[], app_model_config=None) mock_db_session.execute.return_value.scalars.return_value.all.return_value = [msg] mocker.patch.object(module, "extract_thread_messages", return_value=[msg]) result = runner.organize_agent_history([]) assert any(isinstance(x, module.AssistantPromptMessage) for x in result) - def test_skip_current_message(self, runner, mock_db_session, mocker): + def test_skip_current_message(self, runner, mock_db_session, mocker: MockerFixture): msg = mocker.MagicMock(id="msg_current", agent_thoughts=[], answer="ans", app_model_config=None) mock_db_session.execute.return_value.scalars.return_value.all.return_value = [msg] mocker.patch.object(module, "extract_thread_messages", return_value=[msg]) result = runner.organize_agent_history([]) assert result == [] - def test_with_tool_calls_invalid_json(self, runner, mock_db_session, mocker): + def test_with_tool_calls_invalid_json(self, runner, mock_db_session, mocker: MockerFixture): thought = mocker.MagicMock( tool="tool1", tool_input="invalid", @@ -341,7 +342,7 @@ class TestOrganizeHistory: result = runner.organize_agent_history([]) assert isinstance(result, list) - def test_empty_tool_name_split(self, runner, mock_db_session, mocker): + def test_empty_tool_name_split(self, runner, mock_db_session, mocker: MockerFixture): thought = mocker.MagicMock(tool=";", thought="thinking") msg = mocker.MagicMock(id="m5", agent_thoughts=[thought], answer=None, app_model_config=None) @@ -350,7 +351,7 @@ class TestOrganizeHistory: result = runner.organize_agent_history([]) assert isinstance(result, list) - def test_valid_json_tool_flow(self, runner, mock_db_session, mocker): + def test_valid_json_tool_flow(self, runner, mock_db_session, mocker: MockerFixture): thought = mocker.MagicMock( tool="tool1", tool_input=json.dumps({"tool1": {"x": 1}}), @@ -379,7 +380,7 @@ class TestOrganizeHistory: class TestConvertToolToPromptMessageTool: - def test_basic_conversion(self, runner, mocker): + def test_basic_conversion(self, runner, mocker: MockerFixture): tool = mocker.MagicMock(tool_name="tool1") runtime_param = mocker.MagicMock() @@ -404,7 +405,7 @@ class TestConvertToolToPromptMessageTool: prompt_tool, entity = runner._convert_tool_to_prompt_message_tool(tool) assert entity == tool_entity - def test_full_conversion_multiple_params(self, runner, mocker): + def test_full_conversion_multiple_params(self, runner, mocker: MockerFixture): tool = mocker.MagicMock(tool_name="tool1") # LLM param with input_schema override @@ -441,7 +442,7 @@ class TestConvertToolToPromptMessageTool: class TestInitPromptToolsExtended: - def test_agent_tool_branch(self, runner, mocker): + def test_agent_tool_branch(self, runner, mocker: MockerFixture): agent_tool = mocker.MagicMock(tool_name="agent_tool") runner.app_config.agent = mocker.MagicMock(tools=[agent_tool]) mocker.patch.object(runner, "_convert_tool_to_prompt_message_tool", return_value=(MagicMock(), "entity")) @@ -449,7 +450,7 @@ class TestInitPromptToolsExtended: tools, prompts = runner._init_prompt_tools() assert "agent_tool" in tools - def test_exception_in_conversion(self, runner, mocker): + def test_exception_in_conversion(self, runner, mocker: MockerFixture): agent_tool = mocker.MagicMock(tool_name="bad_tool") runner.app_config.agent = mocker.MagicMock(tools=[agent_tool]) mocker.patch.object(runner, "_convert_tool_to_prompt_message_tool", side_effect=Exception) @@ -464,7 +465,7 @@ class TestInitPromptToolsExtended: class TestAdditionalCoverage: - def test_update_prompt_with_input_schema(self, runner, mocker): + def test_update_prompt_with_input_schema(self, runner, mocker: MockerFixture): tool = mocker.MagicMock() param = mocker.MagicMock() @@ -487,7 +488,7 @@ class TestAdditionalCoverage: result = runner.update_prompt_message_tool(tool, prompt_tool) assert result.parameters["properties"]["p1"]["type"] == "number" - def test_save_agent_thought_existing_labels(self, runner, mock_db_session, mocker): + def test_save_agent_thought_existing_labels(self, runner, mock_db_session, mocker: MockerFixture): agent = mocker.MagicMock() agent.tool = "tool1" agent.tool_labels = {"tool1": {"en_US": "existing"}} @@ -498,7 +499,7 @@ class TestAdditionalCoverage: labels = json.loads(agent.tool_labels_str) assert labels["tool1"]["en_US"] == "existing" - def test_save_agent_thought_tool_meta_string(self, runner, mock_db_session, mocker): + def test_save_agent_thought_tool_meta_string(self, runner, mock_db_session, mocker: MockerFixture): agent = mocker.MagicMock() agent.tool = "tool1" agent.tool_labels = {} @@ -508,7 +509,7 @@ class TestAdditionalCoverage: runner.save_agent_thought("id", None, None, None, None, "meta_string", None, [], None) assert agent.tool_meta_str == "meta_string" - def test_convert_dataset_retriever_tool(self, runner, mocker): + def test_convert_dataset_retriever_tool(self, runner, mocker: MockerFixture): ds_tool = mocker.MagicMock() ds_tool.entity.identity.name = "ds" ds_tool.entity.description.llm = "desc" @@ -525,7 +526,7 @@ class TestAdditionalCoverage: prompt = runner._convert_dataset_retriever_tool_to_prompt_message_tool(ds_tool) assert prompt is not None - def test_organize_user_prompt_with_file_objects(self, runner, mock_db_session, mocker): + def test_organize_user_prompt_with_file_objects(self, runner, mock_db_session, mocker: MockerFixture): mock_db_session.scalars.return_value.all.return_value = [mocker.MagicMock()] file_config = mocker.MagicMock() @@ -544,7 +545,7 @@ class TestAdditionalCoverage: result = runner.organize_agent_user_prompt(msg) assert result is not None - def test_organize_history_without_tool_names(self, runner, mock_db_session, mocker): + def test_organize_history_without_tool_names(self, runner, mock_db_session, mocker: MockerFixture): thought = mocker.MagicMock(tool=None, thought="thinking") msg = mocker.MagicMock(id="m3", agent_thoughts=[thought], answer=None, app_model_config=None) @@ -554,7 +555,7 @@ class TestAdditionalCoverage: result = runner.organize_agent_history([]) assert isinstance(result, list) - def test_organize_history_multiple_tools_split(self, runner, mock_db_session, mocker): + def test_organize_history_multiple_tools_split(self, runner, mock_db_session, mocker: MockerFixture): thought = mocker.MagicMock( tool="tool1;tool2", tool_input=json.dumps({"tool1": {}, "tool2": {}}), @@ -572,7 +573,7 @@ class TestAdditionalCoverage: # ================= Additional Surgical Coverage ================= - def test_convert_tool_select_enum_branch(self, runner, mocker): + def test_convert_tool_select_enum_branch(self, runner, mocker: MockerFixture): tool = mocker.MagicMock(tool_name="tool1") param = mocker.MagicMock() @@ -599,7 +600,7 @@ class TestAdditionalCoverage: class TestConvertDatasetRetrieverTool: - def test_required_param_added(self, runner, mocker): + def test_required_param_added(self, runner, mocker: MockerFixture): ds_tool = mocker.MagicMock() ds_tool.entity.identity.name = "ds" ds_tool.entity.description.llm = "desc" @@ -619,7 +620,7 @@ class TestConvertDatasetRetrieverTool: class TestBaseAgentRunnerInit: - def test_init_sets_stream_tool_call_and_files(self, mocker): + def test_init_sets_stream_tool_call_and_files(self, mocker: MockerFixture): session = mocker.MagicMock() session.scalar.return_value = 2 mocker.patch.object(module.db, "session", session) @@ -662,7 +663,7 @@ class TestBaseAgentRunnerInit: class TestBaseAgentRunnerCoverage: - def test_convert_tool_skips_non_llm_param(self, runner, mocker): + def test_convert_tool_skips_non_llm_param(self, runner, mocker: MockerFixture): tool = mocker.MagicMock(tool_name="tool1") param = mocker.MagicMock() @@ -680,7 +681,7 @@ class TestBaseAgentRunnerCoverage: assert prompt_tool.parameters["properties"] == {} - def test_init_prompt_tools_adds_dataset_tools(self, runner, mocker): + def test_init_prompt_tools_adds_dataset_tools(self, runner, mocker: MockerFixture): dataset_tool = mocker.MagicMock() dataset_tool.entity.identity.name = "ds" runner.dataset_tools = [dataset_tool] @@ -692,7 +693,7 @@ class TestBaseAgentRunnerCoverage: assert tools["ds"] == dataset_tool assert len(prompt_tools) == 1 - def test_update_prompt_message_tool_select_enum(self, runner, mocker): + def test_update_prompt_message_tool_select_enum(self, runner, mocker: MockerFixture): tool = mocker.MagicMock() option1 = mocker.MagicMock(value="A") @@ -716,7 +717,7 @@ class TestBaseAgentRunnerCoverage: assert result.parameters["properties"]["select_param"]["enum"] == ["A", "B"] - def test_save_agent_thought_json_dumps_fallbacks(self, runner, mock_db_session, mocker): + def test_save_agent_thought_json_dumps_fallbacks(self, runner, mock_db_session, mocker: MockerFixture): agent = mocker.MagicMock() agent.tool = "tool1" agent.tool_labels = {} @@ -754,7 +755,7 @@ class TestBaseAgentRunnerCoverage: assert isinstance(agent.observation, str) assert isinstance(agent.tool_meta_str, str) - def test_save_agent_thought_skips_empty_tool_name(self, runner, mock_db_session, mocker): + def test_save_agent_thought_skips_empty_tool_name(self, runner, mock_db_session, mocker: MockerFixture): agent = mocker.MagicMock() agent.tool = "tool1;;" agent.tool_labels = {} @@ -768,7 +769,7 @@ class TestBaseAgentRunnerCoverage: labels = json.loads(agent.tool_labels_str) assert "" not in labels - def test_organize_history_includes_system_prompt(self, runner, mock_db_session, mocker): + def test_organize_history_includes_system_prompt(self, runner, mock_db_session, mocker: MockerFixture): mock_db_session.execute.return_value.scalars.return_value.all.return_value = [] mocker.patch.object(module, "extract_thread_messages", return_value=[]) @@ -778,7 +779,7 @@ class TestBaseAgentRunnerCoverage: assert system_message in result - def test_organize_history_tool_inputs_and_observation_none(self, runner, mock_db_session, mocker): + def test_organize_history_tool_inputs_and_observation_none(self, runner, mock_db_session, mocker: MockerFixture): thought = mocker.MagicMock( tool="tool1", tool_input=None, diff --git a/api/tests/unit_tests/core/agent/test_cot_agent_runner.py b/api/tests/unit_tests/core/agent/test_cot_agent_runner.py index cde8820e00..314305d371 100644 --- a/api/tests/unit_tests/core/agent/test_cot_agent_runner.py +++ b/api/tests/unit_tests/core/agent/test_cot_agent_runner.py @@ -2,6 +2,7 @@ import json from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.agent.cot_agent_runner import CotAgentRunner from core.agent.entities import AgentScratchpadUnit @@ -25,7 +26,7 @@ class DummyRunner(CotAgentRunner): @pytest.fixture -def runner(mocker): +def runner(mocker: MockerFixture): # Prevent BaseAgentRunner __init__ from hitting database mocker.patch( "core.agent.base_agent_runner.BaseAgentRunner.organize_agent_history", @@ -165,7 +166,7 @@ class TestHandleInvokeAction: response, meta = runner._handle_invoke_action(action, {}, []) assert "there is not a tool named" in response - def test_tool_with_json_string_args(self, runner, mocker): + def test_tool_with_json_string_args(self, runner, mocker: MockerFixture): action = AgentScratchpadUnit.Action(action_name="tool", action_input=json.dumps({"a": 1})) tool_instance = MagicMock() tool_instances = {"tool": tool_instance} @@ -180,7 +181,7 @@ class TestHandleInvokeAction: class TestOrganizeHistoricPromptMessages: - def test_empty_history(self, runner, mocker): + def test_empty_history(self, runner, mocker: MockerFixture): mocker.patch( "core.agent.cot_agent_runner.AgentHistoryPromptTransform.get_prompt", return_value=[], @@ -190,7 +191,7 @@ class TestOrganizeHistoricPromptMessages: class TestRun: - def test_run_handles_empty_parser_output(self, runner, mocker): + def test_run_handles_empty_parser_output(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -202,7 +203,7 @@ class TestRun: results = list(runner.run(message, "query", {})) assert isinstance(results, list) - def test_run_with_action_and_tool_invocation(self, runner, mocker): + def test_run_with_action_and_tool_invocation(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -223,7 +224,7 @@ class TestRun: with pytest.raises(AgentMaxIterationError): list(runner.run(message, "query", {"tool": MagicMock()})) - def test_run_respects_max_iteration_boundary(self, runner, mocker): + def test_run_respects_max_iteration_boundary(self, runner, mocker: MockerFixture): runner.app_config.agent.max_iteration = 1 message = MagicMock() message.id = "msg-id" @@ -245,7 +246,7 @@ class TestRun: with pytest.raises(AgentMaxIterationError): list(runner.run(message, "query", {"tool": MagicMock()})) - def test_run_basic_flow(self, runner, mocker): + def test_run_basic_flow(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -257,7 +258,7 @@ class TestRun: results = list(runner.run(message, "query", {"name": "John"})) assert results - def test_run_max_iteration_error(self, runner, mocker): + def test_run_max_iteration_error(self, runner, mocker: MockerFixture): runner.app_config.agent.max_iteration = 0 message = MagicMock() message.id = "msg-id" @@ -272,7 +273,7 @@ class TestRun: with pytest.raises(AgentMaxIterationError): list(runner.run(message, "query", {})) - def test_run_increase_usage_aggregation(self, runner, mocker): + def test_run_increase_usage_aggregation(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" runner.app_config.agent.max_iteration = 2 @@ -329,7 +330,7 @@ class TestRun: assert final_usage.completion_price == 2 assert final_usage.total_price == 4 - def test_run_when_no_action_branch(self, runner, mocker): + def test_run_when_no_action_branch(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -341,7 +342,7 @@ class TestRun: results = list(runner.run(message, "query", {})) assert results[-1].delta.message.content == "" - def test_run_usage_missing_key_branch(self, runner, mocker): + def test_run_usage_missing_key_branch(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -354,7 +355,7 @@ class TestRun: list(runner.run(message, "query", {})) - def test_run_prompt_tool_update_branch(self, runner, mocker): + def test_run_prompt_tool_update_branch(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -410,7 +411,7 @@ class TestRun: class TestInitReactState: - def test_init_react_state_resets_state(self, runner, mocker): + def test_init_react_state_resets_state(self, runner, mocker: MockerFixture): mocker.patch.object(runner, "_organize_historic_prompt_messages", return_value=["historic"]) runner._agent_scratchpad = ["old"] runner._query = "old" @@ -423,7 +424,7 @@ class TestInitReactState: class TestHandleInvokeActionExtended: - def test_tool_with_invalid_json_string_args(self, runner, mocker): + def test_tool_with_invalid_json_string_args(self, runner, mocker: MockerFixture): action = AgentScratchpadUnit.Action(action_name="tool", action_input="not-json") tool_instance = MagicMock() tool_instances = {"tool": tool_instance} @@ -457,7 +458,7 @@ class TestFillInputsEdgeCases: class TestOrganizeHistoricPromptMessagesExtended: - def test_user_message_flushes_scratchpad(self, runner, mocker): + def test_user_message_flushes_scratchpad(self, runner, mocker: MockerFixture): from graphon.model_runtime.entities.message_entities import UserPromptMessage user_message = UserPromptMessage(content="Hi") @@ -480,7 +481,7 @@ class TestOrganizeHistoricPromptMessagesExtended: with pytest.raises(NotImplementedError): runner._organize_historic_prompt_messages([]) - def test_agent_history_transform_invocation(self, runner, mocker): + def test_agent_history_transform_invocation(self, runner, mocker: MockerFixture): mock_transform = MagicMock() mock_transform.get_prompt.return_value = [] @@ -495,7 +496,7 @@ class TestOrganizeHistoricPromptMessagesExtended: class TestRunAdditionalBranches: - def test_run_with_no_action_final_answer_empty(self, runner, mocker): + def test_run_with_no_action_final_answer_empty(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -507,7 +508,7 @@ class TestRunAdditionalBranches: results = list(runner.run(message, "query", {})) assert any(hasattr(r, "delta") for r in results) - def test_run_with_final_answer_action_string(self, runner, mocker): + def test_run_with_final_answer_action_string(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -521,7 +522,7 @@ class TestRunAdditionalBranches: results = list(runner.run(message, "query", {})) assert results[-1].delta.message.content == "done" - def test_run_with_final_answer_action_dict(self, runner, mocker): + def test_run_with_final_answer_action_dict(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" @@ -535,7 +536,7 @@ class TestRunAdditionalBranches: results = list(runner.run(message, "query", {})) assert json.loads(results[-1].delta.message.content) == {"a": 1} - def test_run_with_string_final_answer(self, runner, mocker): + def test_run_with_string_final_answer(self, runner, mocker: MockerFixture): message = MagicMock() message.id = "msg-id" diff --git a/api/tests/unit_tests/core/agent/test_cot_chat_agent_runner.py b/api/tests/unit_tests/core/agent/test_cot_chat_agent_runner.py index ea8cc8aa86..8e7093fd12 100644 --- a/api/tests/unit_tests/core/agent/test_cot_chat_agent_runner.py +++ b/api/tests/unit_tests/core/agent/test_cot_chat_agent_runner.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock, patch import pytest +from pytest_mock import MockerFixture from core.agent.cot_chat_agent_runner import CotChatAgentRunner from graphon.model_runtime.entities.message_entities import TextPromptMessageContent @@ -55,7 +56,7 @@ def runner(): class TestOrganizeSystemPrompt: - def test_organize_system_prompt_success(self, runner, mocker): + def test_organize_system_prompt_success(self, runner, mocker: MockerFixture): first_prompt = "Instruction: {{instruction}}, Tools: {{tools}}, Names: {{tool_names}}" runner.app_config = DummyAppConfig(DummyAgentConfig(DummyPrompt(first_prompt))) @@ -154,7 +155,7 @@ class TestOrganizeUserQuery: class TestOrganizePromptMessages: - def test_no_scratchpad(self, runner, mocker): + def test_no_scratchpad(self, runner, mocker: MockerFixture): runner.app_config = DummyAppConfig(DummyAgentConfig(DummyPrompt("{{instruction}}"))) runner._organize_system_prompt = MagicMock(return_value="system") runner._organize_user_query = MagicMock(return_value=["query"]) @@ -164,7 +165,7 @@ class TestOrganizePromptMessages: assert "query" in result runner._organize_historic_prompt_messages.assert_called_once() - def test_with_final_scratchpad(self, runner, mocker): + def test_with_final_scratchpad(self, runner, mocker: MockerFixture): runner.app_config = DummyAppConfig(DummyAgentConfig(DummyPrompt("{{instruction}}"))) runner._organize_system_prompt = MagicMock(return_value="system") runner._organize_user_query = MagicMock(return_value=["query"]) @@ -177,7 +178,7 @@ class TestOrganizePromptMessages: combined = "".join([m.content for m in assistant_msgs if isinstance(m.content, str)]) assert "Final Answer: done" in combined - def test_with_thought_action_observation(self, runner, mocker): + def test_with_thought_action_observation(self, runner, mocker: MockerFixture): runner.app_config = DummyAppConfig(DummyAgentConfig(DummyPrompt("{{instruction}}"))) runner._organize_system_prompt = MagicMock(return_value="system") runner._organize_user_query = MagicMock(return_value=["query"]) @@ -197,7 +198,7 @@ class TestOrganizePromptMessages: assert "Action: action" in combined assert "Observation: observe" in combined - def test_multiple_units_mixed(self, runner, mocker): + def test_multiple_units_mixed(self, runner, mocker: MockerFixture): runner.app_config = DummyAppConfig(DummyAgentConfig(DummyPrompt("{{instruction}}"))) runner._organize_system_prompt = MagicMock(return_value="system") runner._organize_user_query = MagicMock(return_value=["query"]) diff --git a/api/tests/unit_tests/core/agent/test_cot_completion_agent_runner.py b/api/tests/unit_tests/core/agent/test_cot_completion_agent_runner.py index 2f5873d865..0d949c357d 100644 --- a/api/tests/unit_tests/core/agent/test_cot_completion_agent_runner.py +++ b/api/tests/unit_tests/core/agent/test_cot_completion_agent_runner.py @@ -1,6 +1,7 @@ import json import pytest +from pytest_mock import MockerFixture from core.agent.cot_completion_agent_runner import CotCompletionAgentRunner from graphon.model_runtime.entities.message_entities import ( @@ -74,7 +75,7 @@ class TestOrganizeInstructionPrompt: class TestOrganizeHistoricPrompt: - def test_with_user_and_assistant_string(self, runner, mocker): + def test_with_user_and_assistant_string(self, runner, mocker: MockerFixture): user_msg = UserPromptMessage(content="Hello") assistant_msg = AssistantPromptMessage(content="Hi there") @@ -89,7 +90,7 @@ class TestOrganizeHistoricPrompt: assert "Question: Hello" in result assert "Hi there" in result - def test_assistant_list_with_text_content(self, runner, mocker): + def test_assistant_list_with_text_content(self, runner, mocker: MockerFixture): text_content = TextPromptMessageContent(data="Partial answer") assistant_msg = AssistantPromptMessage(content=[text_content]) @@ -103,7 +104,7 @@ class TestOrganizeHistoricPrompt: assert "Partial answer" in result - def test_assistant_list_with_non_text_content_ignored(self, runner, mocker): + def test_assistant_list_with_non_text_content_ignored(self, runner, mocker: MockerFixture): non_text_content = ImagePromptMessageContent(format="url", mime_type="image/png") assistant_msg = AssistantPromptMessage(content=[non_text_content]) @@ -116,7 +117,7 @@ class TestOrganizeHistoricPrompt: result = runner._organize_historic_prompt() assert result == "" - def test_empty_history(self, runner, mocker): + def test_empty_history(self, runner, mocker: MockerFixture): mocker.patch.object( runner, "_organize_historic_prompt_messages", @@ -136,7 +137,7 @@ class TestOrganizePromptMessages: def test_full_flow_with_scratchpad( self, runner, - mocker, + mocker: MockerFixture, dummy_app_config_factory, dummy_agent_config_factory, dummy_prompt_entity_factory, @@ -171,7 +172,12 @@ class TestOrganizePromptMessages: assert "Question: What is Python?" in content def test_no_scratchpad( - self, runner, mocker, dummy_app_config_factory, dummy_agent_config_factory, dummy_prompt_entity_factory + self, + runner, + mocker: MockerFixture, + dummy_app_config_factory, + dummy_agent_config_factory, + dummy_prompt_entity_factory, ): template = "SYS {{historic_messages}} {{agent_scratchpad}} {{query}}" @@ -198,7 +204,7 @@ class TestOrganizePromptMessages: def test_partial_scratchpad_units( self, runner, - mocker, + mocker: MockerFixture, thought, action, observation, diff --git a/api/tests/unit_tests/core/agent/test_fc_agent_runner.py b/api/tests/unit_tests/core/agent/test_fc_agent_runner.py index 17ab5babcb..3a4347e723 100644 --- a/api/tests/unit_tests/core/agent/test_fc_agent_runner.py +++ b/api/tests/unit_tests/core/agent/test_fc_agent_runner.py @@ -3,6 +3,7 @@ from typing import Any from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.agent.errors import AgentMaxIterationError from core.agent.fc_agent_runner import FunctionCallAgentRunner @@ -68,7 +69,7 @@ class DummyResult: @pytest.fixture -def runner(mocker): +def runner(mocker: MockerFixture): # Completely bypass BaseAgentRunner __init__ to avoid DB / Flask context mocker.patch( "core.agent.base_agent_runner.BaseAgentRunner.__init__", @@ -230,7 +231,7 @@ class TestOrganizeUserQuery: result = runner._organize_user_query(None, []) assert len(result) == 1 - def test_with_files_uses_image_detail_config(self, runner, mocker): + def test_with_files_uses_image_detail_config(self, runner, mocker: MockerFixture): file_content = TextPromptMessageContent(data="file-content") mock_to_prompt = mocker.patch( "core.agent.fc_agent_runner.file_manager.to_prompt_message_content", @@ -352,7 +353,7 @@ class TestRunMethod: assert len(outputs) == 1 assert runner.save_agent_thought.call_args.kwargs["thought"] == "hi" - def test_run_streaming_tool_call_inputs_type_error(self, runner, mocker): + def test_run_streaming_tool_call_inputs_type_error(self, runner, mocker: MockerFixture): message = MagicMock(id="m1") runner.stream_tool_call = True @@ -398,7 +399,7 @@ class TestRunMethod: outputs = list(runner.run(message, "query")) assert len(outputs) >= 1 - def test_run_with_tool_instance_and_files(self, runner, mocker): + def test_run_with_tool_instance_and_files(self, runner, mocker: MockerFixture): message = MagicMock(id="m1") tool_call = MagicMock() diff --git a/api/tests/unit_tests/core/agent/test_plugin_entities.py b/api/tests/unit_tests/core/agent/test_plugin_entities.py index 9955190aca..aa3098a2a1 100644 --- a/api/tests/unit_tests/core/agent/test_plugin_entities.py +++ b/api/tests/unit_tests/core/agent/test_plugin_entities.py @@ -9,6 +9,7 @@ mocking; ensure entity invariants and validation rules remain stable. import pytest from pydantic import ValidationError +from pytest_mock import MockerFixture from core.agent.plugin_entities import ( AgentFeature, @@ -28,12 +29,12 @@ from core.tools.entities.tool_entities import ToolIdentity, ToolProviderIdentity @pytest.fixture -def mock_identity(mocker): +def mock_identity(mocker: MockerFixture): return mocker.MagicMock(spec=AgentStrategyIdentity) @pytest.fixture -def mock_provider_identity(mocker): +def mock_provider_identity(mocker: MockerFixture): return mocker.MagicMock(spec=AgentStrategyProviderIdentity) @@ -47,7 +48,7 @@ class TestAgentStrategyParameterType: "enum_member", list(AgentStrategyParameter.AgentStrategyParameterType), ) - def test_as_normal_type_calls_external_function(self, mocker, enum_member) -> None: + def test_as_normal_type_calls_external_function(self, mocker: MockerFixture, enum_member) -> None: mock_func = mocker.patch( "core.agent.plugin_entities.as_normal_type", return_value="normalized", @@ -58,7 +59,7 @@ class TestAgentStrategyParameterType: mock_func.assert_called_once_with(enum_member) assert result == "normalized" - def test_as_normal_type_propagates_exception(self, mocker) -> None: + def test_as_normal_type_propagates_exception(self, mocker: MockerFixture) -> None: enum_member = AgentStrategyParameter.AgentStrategyParameterType.STRING mocker.patch( "core.agent.plugin_entities.as_normal_type", @@ -79,7 +80,7 @@ class TestAgentStrategyParameterType: (AgentStrategyParameter.AgentStrategyParameterType.FILES, []), ], ) - def test_cast_value_calls_external_function(self, mocker, enum_member, value) -> None: + def test_cast_value_calls_external_function(self, mocker: MockerFixture, enum_member, value) -> None: mock_func = mocker.patch( "core.agent.plugin_entities.cast_parameter_value", return_value="casted", @@ -90,7 +91,7 @@ class TestAgentStrategyParameterType: mock_func.assert_called_once_with(enum_member, value) assert result == "casted" - def test_cast_value_propagates_exception(self, mocker) -> None: + def test_cast_value_propagates_exception(self, mocker: MockerFixture) -> None: enum_member = AgentStrategyParameter.AgentStrategyParameterType.STRING mocker.patch( "core.agent.plugin_entities.cast_parameter_value", @@ -136,7 +137,7 @@ class TestAgentStrategyParameter: assert any(error["loc"] == ("type",) for error in exc_info.value.errors()) - def test_init_frontend_parameter_calls_external(self, mocker) -> None: + def test_init_frontend_parameter_calls_external(self, mocker: MockerFixture) -> None: mock_func = mocker.patch( "core.agent.plugin_entities.init_frontend_parameter", return_value="frontend", @@ -153,7 +154,7 @@ class TestAgentStrategyParameter: mock_func.assert_called_once_with(param, param.type, "value") assert result == "frontend" - def test_init_frontend_parameter_propagates_exception(self, mocker) -> None: + def test_init_frontend_parameter_propagates_exception(self, mocker: MockerFixture) -> None: mocker.patch( "core.agent.plugin_entities.init_frontend_parameter", side_effect=RuntimeError("error"), diff --git a/api/tests/unit_tests/core/app/app_config/common/test_parameters_mapping.py b/api/tests/unit_tests/core/app/app_config/common/test_parameters_mapping.py index 1c5b6ed944..6dbf301f65 100644 --- a/api/tests/unit_tests/core/app/app_config/common/test_parameters_mapping.py +++ b/api/tests/unit_tests/core/app/app_config/common/test_parameters_mapping.py @@ -10,7 +10,7 @@ class TestGetParametersFromFeatureDict: """Test suite for get_parameters_from_feature_dict""" @pytest.fixture - def mock_config(self, monkeypatch): + def mock_config(self, monkeypatch: pytest.MonkeyPatch): """Mock dify_config values""" mock = MagicMock() mock.UPLOAD_IMAGE_FILE_SIZE_LIMIT = 1 @@ -23,7 +23,7 @@ class TestGetParametersFromFeatureDict: return mock @pytest.fixture - def mock_default_file_limits(self, monkeypatch): + def mock_default_file_limits(self, monkeypatch: pytest.MonkeyPatch): """Mock DEFAULT_FILE_NUMBER_LIMITS constant""" monkeypatch.setattr(parameters_mapping, "DEFAULT_FILE_NUMBER_LIMITS", 99) return 99 diff --git a/api/tests/unit_tests/core/app/app_config/common/test_sensitive_word_avoidance_manager.py b/api/tests/unit_tests/core/app/app_config/common/test_sensitive_word_avoidance_manager.py index 013ed0cbc4..bd4ca5ff85 100644 --- a/api/tests/unit_tests/core/app/app_config/common/test_sensitive_word_avoidance_manager.py +++ b/api/tests/unit_tests/core/app/app_config/common/test_sensitive_word_avoidance_manager.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.app.app_config.common.sensitive_word_avoidance.manager import ( SensitiveWordAvoidanceConfigManager, @@ -26,7 +27,7 @@ class TestSensitiveWordAvoidanceConfigManagerConvert: # Assert assert result is None - def test_convert_returns_entity_when_enabled(self, mocker): + def test_convert_returns_entity_when_enabled(self, mocker: MockerFixture): # Arrange mock_entity = MagicMock() mocker.patch( @@ -48,7 +49,7 @@ class TestSensitiveWordAvoidanceConfigManagerConvert: # Assert assert result == mock_entity - def test_convert_enabled_without_type_or_config(self, mocker): + def test_convert_enabled_without_type_or_config(self, mocker: MockerFixture): # Arrange mock_entity = MagicMock() patched = mocker.patch( @@ -135,7 +136,7 @@ class TestSensitiveWordAvoidanceConfigManagerValidateAndSetDefaults: with pytest.raises(ValueError, match="must be a dict"): SensitiveWordAvoidanceConfigManager.validate_and_set_defaults(tenant_id="tenant1", config=config) - def test_validate_calls_moderation_factory(self, mocker): + def test_validate_calls_moderation_factory(self, mocker: MockerFixture): # Arrange mock_validate = mocker.patch( "core.app.app_config.common.sensitive_word_avoidance.manager.ModerationFactory.validate_config" @@ -159,7 +160,7 @@ class TestSensitiveWordAvoidanceConfigManagerValidateAndSetDefaults: assert result_config["sensitive_word_avoidance"]["enabled"] is True assert fields == ["sensitive_word_avoidance"] - def test_validate_sets_empty_dict_when_config_none(self, mocker): + def test_validate_sets_empty_dict_when_config_none(self, mocker: MockerFixture): # Arrange mock_validate = mocker.patch( "core.app.app_config.common.sensitive_word_avoidance.manager.ModerationFactory.validate_config" @@ -179,7 +180,7 @@ class TestSensitiveWordAvoidanceConfigManagerValidateAndSetDefaults: # Assert mock_validate.assert_called_once_with(name="mock_type", tenant_id="tenant1", config={}) - def test_validate_only_structure_validate_skips_factory(self, mocker): + def test_validate_only_structure_validate_skips_factory(self, mocker: MockerFixture): # Arrange mock_validate = mocker.patch( "core.app.app_config.common.sensitive_word_avoidance.manager.ModerationFactory.validate_config" diff --git a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_agent_manager.py b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_agent_manager.py index 992b580376..359b04070b 100644 --- a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_agent_manager.py +++ b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_agent_manager.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.app.app_config.easy_ui_based_app.agent.manager import AgentConfigManager @@ -84,7 +85,7 @@ class TestAgentConfigManagerConvert: assert result.strategy.name == "CHAIN_OF_THOUGHT" - def test_convert_skips_disabled_tools(self, mocker, base_config): + def test_convert_skips_disabled_tools(self, mocker: MockerFixture, base_config): # Patch AgentEntity to bypass pydantic validation mock_agent_entity = mocker.patch( "core.app.app_config.easy_ui_based_app.agent.manager.AgentEntity", @@ -128,7 +129,7 @@ class TestAgentConfigManagerConvert: mock_validate.assert_called_once() mock_agent_entity.assert_called_once() - def test_convert_tool_requires_minimum_keys(self, mocker, base_config): + def test_convert_tool_requires_minimum_keys(self, mocker: MockerFixture, base_config): mock_validate = mocker.patch( "core.app.app_config.easy_ui_based_app.agent.manager.AgentToolEntity.model_validate", return_value=MagicMock(), diff --git a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_dataset_manager.py b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_dataset_manager.py index a688e2a5c5..3a239eac0e 100644 --- a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_dataset_manager.py +++ b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_dataset_manager.py @@ -2,6 +2,7 @@ import uuid from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.app.app_config.easy_ui_based_app.dataset.manager import DatasetConfigManager from core.entities.agent_entities import PlanningStrategy @@ -69,7 +70,7 @@ class TestDatasetConfigManagerConvert: assert result.dataset_ids == [valid_uuid] assert result.retrieve_config.query_variable == "query" - def test_convert_single_with_metadata_configs(self, valid_uuid, mocker): + def test_convert_single_with_metadata_configs(self, valid_uuid, mocker: MockerFixture): mock_retrieve_config = MagicMock() mock_entity = MagicMock() mock_entity.dataset_ids = [valid_uuid] @@ -258,7 +259,7 @@ class TestExtractDatasetConfig: with pytest.raises(ValueError): DatasetConfigManager.extract_dataset_config_for_legacy_compatibility("tenant1", AppMode.CHAT, config) - def test_extract_invalid_uuid(self, mocker): + def test_extract_invalid_uuid(self, mocker: MockerFixture): invalid_uuid = "not-a-uuid" config = { "agent_mode": { @@ -270,7 +271,7 @@ class TestExtractDatasetConfig: with pytest.raises(ValueError): DatasetConfigManager.extract_dataset_config_for_legacy_compatibility("tenant1", AppMode.CHAT, config) - def test_extract_dataset_not_exists(self, valid_uuid, mocker): + def test_extract_dataset_not_exists(self, valid_uuid, mocker: MockerFixture): mocker.patch( "core.app.app_config.easy_ui_based_app.dataset.manager.DatasetService.get_dataset", return_value=None, @@ -292,7 +293,7 @@ class TestExtractDatasetConfig: class TestIsDatasetExists: - def test_dataset_exists_true(self, mocker, valid_uuid): + def test_dataset_exists_true(self, mocker: MockerFixture, valid_uuid): mock_dataset = MagicMock() mock_dataset.tenant_id = "tenant1" mocker.patch( @@ -302,14 +303,14 @@ class TestIsDatasetExists: assert DatasetConfigManager.is_dataset_exists("tenant1", valid_uuid) - def test_dataset_exists_false_when_not_found(self, mocker, valid_uuid): + def test_dataset_exists_false_when_not_found(self, mocker: MockerFixture, valid_uuid): mocker.patch( "core.app.app_config.easy_ui_based_app.dataset.manager.DatasetService.get_dataset", return_value=None, ) assert not DatasetConfigManager.is_dataset_exists("tenant1", valid_uuid) - def test_dataset_exists_false_when_tenant_mismatch(self, mocker, valid_uuid): + def test_dataset_exists_false_when_tenant_mismatch(self, mocker: MockerFixture, valid_uuid): mock_dataset = MagicMock() mock_dataset.tenant_id = "other" mocker.patch( diff --git a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_converter.py b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_converter.py index 186b4a501d..e5b581b6a0 100644 --- a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_converter.py +++ b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_converter.py @@ -2,6 +2,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.app.app_config.easy_ui_based_app.model_config.converter import ModelConfigConverter from core.entities.model_entities import ModelStatus @@ -16,7 +17,7 @@ from graphon.model_runtime.entities.model_entities import ModelPropertyKey class TestModelConfigConverter: @pytest.fixture(autouse=True) - def patch_response_entity(self, mocker): + def patch_response_entity(self, mocker: MockerFixture): """ Patch ModelConfigWithCredentialsEntity to bypass Pydantic validation and return a simple namespace object instead. @@ -69,7 +70,7 @@ class TestModelConfigConverter: return bundle @pytest.fixture - def patch_provider_manager(self, mocker, mock_provider_bundle): + def patch_provider_manager(self, mocker: MockerFixture, mock_provider_bundle): mock_manager = MagicMock() mock_manager.get_provider_model_bundle.return_value = mock_provider_bundle mocker.patch( @@ -99,7 +100,7 @@ class TestModelConfigConverter: assert result.parameters == {"temperature": 0.7} assert result.stop == ["\n"] - def test_convert_mode_from_schema_valid(self, mock_app_config, mock_provider_bundle, mocker): + def test_convert_mode_from_schema_valid(self, mock_app_config, mock_provider_bundle, mocker: MockerFixture): mock_app_config.model.mode = None mock_provider_bundle.model_type_instance.get_model_schema.return_value.model_properties = { @@ -116,7 +117,9 @@ class TestModelConfigConverter: result = ModelConfigConverter.convert(mock_app_config) assert result.mode == LLMMode.COMPLETION - def test_convert_mode_from_schema_invalid_fallback(self, mock_app_config, mock_provider_bundle, mocker): + def test_convert_mode_from_schema_invalid_fallback( + self, mock_app_config, mock_provider_bundle, mocker: MockerFixture + ): mock_provider_bundle.model_type_instance.get_model_schema.return_value.model_properties = { ModelPropertyKey.MODE: "invalid" } @@ -135,7 +138,7 @@ class TestModelConfigConverter: # Credential Errors # ============================= - def test_convert_credentials_none_raises(self, mock_app_config, mock_provider_bundle, mocker): + def test_convert_credentials_none_raises(self, mock_app_config, mock_provider_bundle, mocker: MockerFixture): mock_provider_bundle.configuration.get_current_credentials.return_value = None mock_manager = MagicMock() @@ -152,7 +155,7 @@ class TestModelConfigConverter: # Provider Model Errors # ============================= - def test_convert_provider_model_none_raises(self, mock_app_config, mock_provider_bundle, mocker): + def test_convert_provider_model_none_raises(self, mock_app_config, mock_provider_bundle, mocker: MockerFixture): mock_provider_bundle.configuration.get_provider_model.return_value = None mock_manager = MagicMock() @@ -174,7 +177,7 @@ class TestModelConfigConverter: ], ) def test_convert_provider_model_status_errors( - self, mock_app_config, mock_provider_bundle, mocker, status, expected_exception + self, mock_app_config, mock_provider_bundle, mocker: MockerFixture, status, expected_exception ): mock_provider = MagicMock() mock_provider.status = status @@ -194,7 +197,7 @@ class TestModelConfigConverter: # Schema Errors # ============================= - def test_convert_model_schema_none_raises(self, mock_app_config, mock_provider_bundle, mocker): + def test_convert_model_schema_none_raises(self, mock_app_config, mock_provider_bundle, mocker: MockerFixture): mock_provider_bundle.model_type_instance.get_model_schema.return_value = None mock_manager = MagicMock() diff --git a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_manager.py b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_manager.py index 68bca485bb..72e334004e 100644 --- a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_manager.py +++ b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_model_config_manager.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture # Target from core.app.app_config.easy_ui_based_app.model_config.manager import ModelConfigManager @@ -107,7 +108,9 @@ class TestModelConfigManager: # validate_and_set_defaults # ========================================================== - def test_validate_and_set_defaults_success(self, mocker, valid_config, provider_entities, valid_model_list): + def test_validate_and_set_defaults_success( + self, mocker: MockerFixture, valid_config, provider_entities, valid_model_list + ): self._patch_model_assembly( mocker, provider_entities=provider_entities, @@ -127,35 +130,37 @@ class TestModelConfigManager: with pytest.raises(ValueError, match="object type"): ModelConfigManager.validate_and_set_defaults("tenant1", {"model": "invalid"}) - def test_validate_and_set_defaults_missing_provider(self, mocker, provider_entities): + def test_validate_and_set_defaults_missing_provider(self, mocker: MockerFixture, provider_entities): config = {"model": {"name": "gpt-4", "completion_params": {}}} self._patch_model_assembly(mocker, provider_entities=provider_entities, model_list=[]) with pytest.raises(ValueError, match="model.provider is required"): ModelConfigManager.validate_and_set_defaults("tenant1", config) - def test_validate_and_set_defaults_invalid_provider(self, mocker, provider_entities): + def test_validate_and_set_defaults_invalid_provider(self, mocker: MockerFixture, provider_entities): config = {"model": {"provider": "invalid/provider", "name": "gpt-4", "completion_params": {}}} self._patch_model_assembly(mocker, provider_entities=provider_entities, model_list=[]) with pytest.raises(ValueError, match="model.provider is required"): ModelConfigManager.validate_and_set_defaults("tenant1", config) - def test_validate_and_set_defaults_missing_name(self, mocker, provider_entities): + def test_validate_and_set_defaults_missing_name(self, mocker: MockerFixture, provider_entities): config = {"model": {"provider": "openai/gpt", "completion_params": {}}} self._patch_model_assembly(mocker, provider_entities=provider_entities, model_list=[]) with pytest.raises(ValueError, match="model.name is required"): ModelConfigManager.validate_and_set_defaults("tenant1", config) - def test_validate_and_set_defaults_empty_models(self, mocker, provider_entities): + def test_validate_and_set_defaults_empty_models(self, mocker: MockerFixture, provider_entities): config = {"model": {"provider": "openai/gpt", "name": "gpt-4", "completion_params": {}}} self._patch_model_assembly(mocker, provider_entities=provider_entities, model_list=[]) with pytest.raises(ValueError, match="must be in the specified model list"): ModelConfigManager.validate_and_set_defaults("tenant1", config) - def test_validate_and_set_defaults_invalid_model_name(self, mocker, provider_entities, valid_model_list): + def test_validate_and_set_defaults_invalid_model_name( + self, mocker: MockerFixture, provider_entities, valid_model_list + ): config = {"model": {"provider": "openai/gpt", "name": "invalid", "completion_params": {}}} self._patch_model_assembly( mocker, @@ -166,7 +171,7 @@ class TestModelConfigManager: with pytest.raises(ValueError, match="must be in the specified model list"): ModelConfigManager.validate_and_set_defaults("tenant1", config) - def test_validate_and_set_defaults_default_mode_when_missing(self, mocker, provider_entities): + def test_validate_and_set_defaults_default_mode_when_missing(self, mocker: MockerFixture, provider_entities): model = MagicMock() model.model = "gpt-4" model.model_properties = {} @@ -178,7 +183,9 @@ class TestModelConfigManager: assert updated_config["model"]["mode"] == "completion" - def test_validate_and_set_defaults_missing_completion_params(self, mocker, provider_entities, valid_model_list): + def test_validate_and_set_defaults_missing_completion_params( + self, mocker: MockerFixture, provider_entities, valid_model_list + ): config = {"model": {"provider": "openai/gpt", "name": "gpt-4"}} self._patch_model_assembly( mocker, @@ -189,7 +196,7 @@ class TestModelConfigManager: with pytest.raises(ValueError, match="completion_params is required"): ModelConfigManager.validate_and_set_defaults("tenant1", config) - def test_validate_and_set_defaults_provider_without_slash_converted(self, mocker, valid_model_list): + def test_validate_and_set_defaults_provider_without_slash_converted(self, mocker: MockerFixture, valid_model_list): """ Covers branch where provider does not contain '/' and ModelProviderID conversion is triggered (line 64). diff --git a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_prompt_template_manager.py b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_prompt_template_manager.py index fd49072cd5..3fd21ab22b 100644 --- a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_prompt_template_manager.py +++ b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_prompt_template_manager.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.app.app_config.easy_ui_based_app.prompt_template.manager import ( PromptTemplateConfigManager, @@ -38,7 +39,7 @@ class TestPromptTemplateConfigManagerConvert: with pytest.raises(ValueError, match="prompt_type is required"): PromptTemplateConfigManager.convert({}) - def test_convert_simple_prompt(self, mocker): + def test_convert_simple_prompt(self, mocker: MockerFixture): mock_prompt_entity_cls = MagicMock() mock_prompt_entity_cls.PromptType = DummyPromptType() @@ -56,7 +57,7 @@ class TestPromptTemplateConfigManagerConvert: assert result == "simple_entity" mock_prompt_entity_cls.assert_called_once_with(prompt_type="simple", simple_prompt_template="hello") - def test_convert_advanced_chat_valid(self, mocker): + def test_convert_advanced_chat_valid(self, mocker: MockerFixture): mock_prompt_entity_cls = MagicMock() mock_prompt_entity_cls.PromptType = DummyPromptType() mock_prompt_entity_cls.return_value = "advanced_entity" @@ -97,7 +98,7 @@ class TestPromptTemplateConfigManagerConvert: {"text": "hi", "role": 123}, ], ) - def test_convert_advanced_invalid_message_fields(self, mocker, message): + def test_convert_advanced_invalid_message_fields(self, mocker: MockerFixture, message): mock_prompt_entity_cls = MagicMock() mock_prompt_entity_cls.PromptType = DummyPromptType() @@ -114,7 +115,7 @@ class TestPromptTemplateConfigManagerConvert: with pytest.raises(ValueError): PromptTemplateConfigManager.convert(config) - def test_convert_advanced_completion_with_roles(self, mocker): + def test_convert_advanced_completion_with_roles(self, mocker: MockerFixture): mock_prompt_entity_cls = MagicMock() mock_prompt_entity_cls.PromptType = DummyPromptType() mock_prompt_entity_cls.return_value = "advanced_entity" @@ -154,7 +155,7 @@ class TestValidateAndSetDefaults: def setup_method(self): self.valid_model = {"mode": "chat"} - def _patch_prompt_type(self, mocker): + def _patch_prompt_type(self, mocker: MockerFixture): mock_prompt_entity_cls = MagicMock() mock_prompt_entity_cls.PromptType = DummyPromptType() mocker.patch( @@ -163,7 +164,7 @@ class TestValidateAndSetDefaults: ) return mock_prompt_entity_cls - def test_default_prompt_type_set(self, mocker): + def test_default_prompt_type_set(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = {"model": self.valid_model} @@ -173,7 +174,7 @@ class TestValidateAndSetDefaults: assert result["prompt_type"] == "simple" assert isinstance(keys, list) - def test_invalid_prompt_type_raises(self, mocker): + def test_invalid_prompt_type_raises(self, mocker: MockerFixture): class InvalidEnum(DummyPromptType): def __iter__(self): return iter([DummyEnumValue("valid")]) @@ -191,7 +192,7 @@ class TestValidateAndSetDefaults: with pytest.raises(ValueError): PromptTemplateConfigManager.validate_and_set_defaults("chat_app", config) - def test_invalid_chat_prompt_config_type(self, mocker): + def test_invalid_chat_prompt_config_type(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = { @@ -203,7 +204,7 @@ class TestValidateAndSetDefaults: with pytest.raises(ValueError): PromptTemplateConfigManager.validate_and_set_defaults("chat_app", config) - def test_simple_mode_invalid_pre_prompt_type(self, mocker): + def test_simple_mode_invalid_pre_prompt_type(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = { @@ -215,7 +216,7 @@ class TestValidateAndSetDefaults: with pytest.raises(ValueError): PromptTemplateConfigManager.validate_and_set_defaults("chat_app", config) - def test_advanced_requires_one_config(self, mocker): + def test_advanced_requires_one_config(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = { @@ -228,7 +229,7 @@ class TestValidateAndSetDefaults: with pytest.raises(ValueError): PromptTemplateConfigManager.validate_and_set_defaults("chat_app", config) - def test_advanced_invalid_model_mode(self, mocker): + def test_advanced_invalid_model_mode(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = { @@ -240,7 +241,7 @@ class TestValidateAndSetDefaults: with pytest.raises(ValueError): PromptTemplateConfigManager.validate_and_set_defaults("chat_app", config) - def test_advanced_chat_prompt_length_exceeds(self, mocker): + def test_advanced_chat_prompt_length_exceeds(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = { @@ -252,7 +253,7 @@ class TestValidateAndSetDefaults: with pytest.raises(ValueError): PromptTemplateConfigManager.validate_and_set_defaults("chat_app", config) - def test_completion_prefix_defaults_set_when_empty(self, mocker): + def test_completion_prefix_defaults_set_when_empty(self, mocker: MockerFixture): self._patch_prompt_type(mocker) config = { diff --git a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_variables_manager.py b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_variables_manager.py index d9fe7004ff..b82417cfed 100644 --- a/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_variables_manager.py +++ b/api/tests/unit_tests/core/app/app_config/easy_ui_based_app/test_variables_manager.py @@ -1,4 +1,5 @@ import pytest +from pytest_mock import MockerFixture from core.app.app_config.easy_ui_based_app.variables.manager import ( BasicVariablesConfigManager, @@ -15,7 +16,7 @@ class TestBasicVariablesConfigManagerConvert: assert variables == [] assert external == [] - def test_convert_external_data_tools_enabled_and_disabled(self, mocker): + def test_convert_external_data_tools_enabled_and_disabled(self, mocker: MockerFixture): config = { "external_data_tools": [ {"enabled": False}, @@ -232,7 +233,7 @@ class TestValidateExternalDataToolsAndSetDefaults: with pytest.raises(ValueError): BasicVariablesConfigManager.validate_external_data_tools_and_set_defaults("tenant", config) - def test_validate_disabled_tool_skipped(self, mocker): + def test_validate_disabled_tool_skipped(self, mocker: MockerFixture): config = {"external_data_tools": [{"enabled": False}]} spy = mocker.patch( @@ -250,7 +251,7 @@ class TestValidateExternalDataToolsAndSetDefaults: with pytest.raises(ValueError): BasicVariablesConfigManager.validate_external_data_tools_and_set_defaults("tenant", config) - def test_validate_enabled_tool_calls_factory(self, mocker): + def test_validate_enabled_tool_calls_factory(self, mocker: MockerFixture): config = {"external_data_tools": [{"enabled": True, "type": "tool", "config": {"a": 1}}]} spy = mocker.patch( @@ -263,7 +264,7 @@ class TestValidateExternalDataToolsAndSetDefaults: class TestValidateAndSetDefaultsIntegration: - def test_validate_and_set_defaults_calls_both(self, mocker): + def test_validate_and_set_defaults_calls_both(self, mocker: MockerFixture): config = {} spy_var = mocker.patch.object( diff --git a/api/tests/unit_tests/core/app/app_config/test_base_app_config_manager.py b/api/tests/unit_tests/core/app/app_config/test_base_app_config_manager.py index e99852cf76..e2ab3e2192 100644 --- a/api/tests/unit_tests/core/app/app_config/test_base_app_config_manager.py +++ b/api/tests/unit_tests/core/app/app_config/test_base_app_config_manager.py @@ -2,6 +2,7 @@ from collections import UserDict from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.app.app_config.base_app_config_manager import BaseAppConfigManager @@ -12,7 +13,7 @@ class TestBaseAppConfigManager: return {"key": "value", "another": 123} @pytest.fixture - def mock_app_additional_features(self, mocker): + def mock_app_additional_features(self, mocker: MockerFixture): mock_instance = MagicMock() mocker.patch( "core.app.app_config.base_app_config_manager.AppAdditionalFeatures", @@ -21,7 +22,7 @@ class TestBaseAppConfigManager: return mock_instance @pytest.fixture - def mock_managers(self, mocker): + def mock_managers(self, mocker: MockerFixture): retrieval = mocker.patch( "core.app.app_config.base_app_config_manager.RetrievalResourceConfigManager.convert", return_value="retrieval_result", @@ -72,7 +73,7 @@ class TestBaseAppConfigManager: ) def test_convert_features_all_modes( self, - mocker, + mocker: MockerFixture, mock_config_dict, mock_app_additional_features, mock_managers, @@ -107,7 +108,7 @@ class TestBaseAppConfigManager: mock_managers["speech_to_text"].assert_called_once_with(config=dict(mock_config_dict.items())) mock_managers["text_to_speech"].assert_called_once_with(config=dict(mock_config_dict.items())) - def test_convert_features_empty_config(self, mocker, mock_app_additional_features, mock_managers): + def test_convert_features_empty_config(self, mocker: MockerFixture, mock_app_additional_features, mock_managers): # Arrange empty_config = {} mock_app_mode = MagicMock() @@ -143,7 +144,7 @@ class TestBaseAppConfigManager: with pytest.raises((TypeError, AttributeError)): BaseAppConfigManager.convert_features(invalid_config, "CHAT") - def test_convert_features_manager_exception_propagates(self, mocker, mock_config_dict): + def test_convert_features_manager_exception_propagates(self, mocker: MockerFixture, mock_config_dict): # Arrange mocker.patch( "core.app.app_config.base_app_config_manager.RetrievalResourceConfigManager.convert", @@ -154,7 +155,9 @@ class TestBaseAppConfigManager: with pytest.raises(RuntimeError): BaseAppConfigManager.convert_features(mock_config_dict, "CHAT") - def test_convert_features_mapping_subclass(self, mocker, mock_app_additional_features, mock_managers): + def test_convert_features_mapping_subclass( + self, mocker: MockerFixture, mock_app_additional_features, mock_managers + ): # Arrange class CustomMapping(UserDict): pass diff --git a/api/tests/unit_tests/core/app/app_config/workflow_ui_based_app/test_workflow_ui_based_app_manager.py b/api/tests/unit_tests/core/app/app_config/workflow_ui_based_app/test_workflow_ui_based_app_manager.py index fa128aca87..dacd69a578 100644 --- a/api/tests/unit_tests/core/app/app_config/workflow_ui_based_app/test_workflow_ui_based_app_manager.py +++ b/api/tests/unit_tests/core/app/app_config/workflow_ui_based_app/test_workflow_ui_based_app_manager.py @@ -1,4 +1,5 @@ import pytest +from pytest_mock import MockerFixture from core.app.app_config.workflow_ui_based_app.variables.manager import ( WorkflowVariablesConfigManager, @@ -10,19 +11,19 @@ from core.app.app_config.workflow_ui_based_app.variables.manager import ( @pytest.fixture -def mock_workflow(mocker): +def mock_workflow(mocker: MockerFixture): workflow = mocker.MagicMock() workflow.graph_dict = {"nodes": []} return workflow @pytest.fixture -def mock_variable_entity(mocker): +def mock_variable_entity(mocker: MockerFixture): return mocker.patch("core.app.app_config.workflow_ui_based_app.variables.manager.VariableEntity") @pytest.fixture -def mock_rag_entity(mocker): +def mock_rag_entity(mocker: MockerFixture): return mocker.patch("core.app.app_config.workflow_ui_based_app.variables.manager.RagPipelineVariableEntity") diff --git a/api/tests/unit_tests/core/app/apps/advanced_chat/test_app_generator.py b/api/tests/unit_tests/core/app/apps/advanced_chat/test_app_generator.py index af5d203f12..bc3b06cd1b 100644 --- a/api/tests/unit_tests/core/app/apps/advanced_chat/test_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/advanced_chat/test_app_generator.py @@ -111,7 +111,7 @@ class TestAdvancedChatAppGeneratorInternals: workflow_id="workflow-id", ) - def test_generate_loads_conversation_and_files(self, monkeypatch): + def test_generate_loads_conversation_and_files(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() app_config = self._build_app_config() @@ -195,7 +195,7 @@ class TestAdvancedChatAppGeneratorInternals: assert captured["application_generate_entity"].files == built_files assert build_files_called["called"] is True - def test_resume_delegates_to_generate(self, monkeypatch): + def test_resume_delegates_to_generate(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() application_generate_entity = AdvancedChatAppGenerateEntity.model_construct( task_id="task", @@ -235,7 +235,7 @@ class TestAdvancedChatAppGeneratorInternals: assert result == {"resumed": True} assert captured["graph_runtime_state"] is not None - def test_single_iteration_generate_builds_debug_task(self, monkeypatch): + def test_single_iteration_generate_builds_debug_task(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() app_config = self._build_app_config() captured: dict[str, object] = {} @@ -293,7 +293,7 @@ class TestAdvancedChatAppGeneratorInternals: assert captured["variable_loader"] is var_loader assert captured["application_generate_entity"].single_iteration_run.node_id == "node-1" - def test_single_loop_generate_builds_debug_task(self, monkeypatch): + def test_single_loop_generate_builds_debug_task(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() app_config = self._build_app_config() captured: dict[str, object] = {} @@ -351,7 +351,7 @@ class TestAdvancedChatAppGeneratorInternals: assert captured["variable_loader"] is var_loader assert captured["application_generate_entity"].single_loop_run.node_id == "node-2" - def test_generate_internal_flow_initial_conversation_with_pause_layer(self, monkeypatch): + def test_generate_internal_flow_initial_conversation_with_pause_layer(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 0 app_config = self._build_app_config() @@ -449,7 +449,7 @@ class TestAdvancedChatAppGeneratorInternals: assert isinstance(captured["conversation"], ConversationSnapshot) assert isinstance(captured["message"], MessageSnapshot) - def test_generate_internal_flow_with_existing_records_skips_init(self, monkeypatch): + def test_generate_internal_flow_with_existing_records_skips_init(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 0 app_config = self._build_app_config() @@ -535,7 +535,7 @@ class TestAdvancedChatAppGeneratorInternals: db_session.refresh.assert_not_called() db_session.close.assert_called_once() - def test_generate_worker_raises_when_workflow_not_found(self, monkeypatch): + def test_generate_worker_raises_when_workflow_not_found(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 app_config = self._build_app_config() @@ -594,7 +594,7 @@ class TestAdvancedChatAppGeneratorInternals: graph_runtime_state=None, ) - def test_generate_worker_raises_when_app_not_found_for_internal_call(self, monkeypatch): + def test_generate_worker_raises_when_app_not_found_for_internal_call(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 app_config = self._build_app_config() @@ -658,7 +658,7 @@ class TestAdvancedChatAppGeneratorInternals: graph_runtime_state=None, ) - def test_generate_worker_handles_stopped_error(self, monkeypatch): + def test_generate_worker_handles_stopped_error(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 app_config = self._build_app_config() @@ -732,7 +732,7 @@ class TestAdvancedChatAppGeneratorInternals: queue_manager.publish_error.assert_not_called() - def test_generate_worker_handles_validation_error(self, monkeypatch): + def test_generate_worker_handles_validation_error(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 app_config = self._build_app_config() @@ -816,7 +816,7 @@ class TestAdvancedChatAppGeneratorInternals: queue_manager.publish_error.assert_called_once() - def test_generate_worker_handles_value_and_unknown_errors(self, monkeypatch): + def test_generate_worker_handles_value_and_unknown_errors(self, monkeypatch: pytest.MonkeyPatch): app_config = self._build_app_config() @contextmanager @@ -897,7 +897,7 @@ class TestAdvancedChatAppGeneratorInternals: queue_manager.publish_error.assert_called_once() - def test_handle_response_closed_file_raises_stopped(self, monkeypatch): + def test_handle_response_closed_file_raises_stopped(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 @@ -953,7 +953,7 @@ class TestAdvancedChatAppGeneratorInternals: stream=False, ) - def test_handle_response_re_raises_value_error(self, monkeypatch): + def test_handle_response_re_raises_value_error(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 app_config = self._build_app_config() @@ -1002,7 +1002,7 @@ class TestAdvancedChatAppGeneratorInternals: logger_exception.assert_called_once() - def test_generate_worker_handles_invoke_auth_error(self, monkeypatch): + def test_generate_worker_handles_invoke_auth_error(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() generator._dialogue_count = 1 @@ -1088,7 +1088,7 @@ class TestAdvancedChatAppGeneratorInternals: assert queue_manager.publish_error.called - def test_generate_debugger_enables_retrieve_source(self, monkeypatch): + def test_generate_debugger_enables_retrieve_source(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() app_config = WorkflowUIBasedAppConfig( @@ -1167,7 +1167,7 @@ class TestAdvancedChatAppGeneratorInternals: assert app_config.additional_features.show_retrieve_source is True assert captured["application_generate_entity"].query == "hello" - def test_generate_service_api_sets_parent_message_id(self, monkeypatch): + def test_generate_service_api_sets_parent_message_id(self, monkeypatch: pytest.MonkeyPatch): generator = AdvancedChatAppGenerator() app_config = WorkflowUIBasedAppConfig( diff --git a/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py b/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py index 64bcfa9a18..1d72d7807d 100644 --- a/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py +++ b/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py @@ -224,7 +224,7 @@ class TestAdvancedChatGenerateTaskPipeline: assert isinstance(responses[0], ValueError) - def test_handle_workflow_started_event_sets_run_id(self, monkeypatch): + def test_handle_workflow_started_event_sets_run_id(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._graph_runtime_state = GraphRuntimeState( variable_pool=build_test_variable_pool(variables=build_system_variables(workflow_execution_id="run-id")), @@ -368,7 +368,7 @@ class TestAdvancedChatGenerateTaskPipeline: assert list(pipeline._handle_loop_next_event(loop_next)) == ["loop_next"] assert list(pipeline._handle_loop_completed_event(loop_done)) == ["loop_done"] - def test_workflow_finish_handlers(self, monkeypatch): + def test_workflow_finish_handlers(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._workflow_run_id = "run-id" pipeline._graph_runtime_state = GraphRuntimeState( @@ -593,7 +593,7 @@ class TestAdvancedChatGenerateTaskPipeline: assert message.answer == "hello" assert message.message_metadata - def test_handle_stop_event_saves_message_for_moderation(self, monkeypatch): + def test_handle_stop_event_saves_message_for_moderation(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._message_end_to_stream_response = lambda: "end" saved: list[str] = [] @@ -614,7 +614,7 @@ class TestAdvancedChatGenerateTaskPipeline: assert responses == ["end"] assert saved == ["saved"] - def test_handle_message_end_event_applies_output_moderation(self, monkeypatch): + def test_handle_message_end_event_applies_output_moderation(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._graph_runtime_state = GraphRuntimeState( variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), diff --git a/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_config_manager.py b/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_config_manager.py index a871e8d93b..d47b70e950 100644 --- a/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_config_manager.py +++ b/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_config_manager.py @@ -2,6 +2,7 @@ import uuid from types import SimpleNamespace import pytest +from pytest_mock import MockerFixture from core.app.app_config.entities import EasyUIBasedAppModelConfigFrom from core.app.apps.agent_chat.app_config_manager import ( @@ -11,7 +12,7 @@ from core.entities.agent_entities import PlanningStrategy class TestAgentChatAppConfigManagerGetAppConfig: - def test_get_app_config_override_config(self, mocker): + def test_get_app_config_override_config(self, mocker: MockerFixture): app_model = mocker.MagicMock(id="app1", tenant_id="tenant", mode="agent-chat") app_model_config = mocker.MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"ignored": True} @@ -45,7 +46,7 @@ class TestAgentChatAppConfigManagerGetAppConfig: assert result.variables == "variables" assert result.external_data_variables == "external" - def test_get_app_config_conversation_specific(self, mocker): + def test_get_app_config_conversation_specific(self, mocker: MockerFixture): app_model = mocker.MagicMock(id="app1", tenant_id="tenant", mode="agent-chat") app_model_config = mocker.MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"model": {"provider": "p"}} @@ -76,7 +77,7 @@ class TestAgentChatAppConfigManagerGetAppConfig: assert result.app_model_config_dict == app_model_config.to_dict.return_value assert result.app_model_config_from.value == "conversation-specific-config" - def test_get_app_config_latest_config(self, mocker): + def test_get_app_config_latest_config(self, mocker: MockerFixture): app_model = mocker.MagicMock(id="app1", tenant_id="tenant", mode="agent-chat") app_model_config = mocker.MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"model": {"provider": "p"}} @@ -107,7 +108,7 @@ class TestAgentChatAppConfigManagerGetAppConfig: class TestAgentChatAppConfigManagerConfigValidate: - def test_config_validate_filters_related_keys(self, mocker): + def test_config_validate_filters_related_keys(self, mocker: MockerFixture): config = { "model": {}, "user_input_form": {}, @@ -247,7 +248,7 @@ class TestValidateAgentModeAndSetDefaults: {"agent_mode": {"enabled": True, "tools": [{"dataset": {"enabled": True, "id": "bad"}}]}}, ) - def test_old_tool_dataset_id_not_exists(self, mocker): + def test_old_tool_dataset_id_not_exists(self, mocker: MockerFixture): mocker.patch( "core.app.apps.agent_chat.app_config_manager.DatasetConfigManager.is_dataset_exists", return_value=False, @@ -275,7 +276,7 @@ class TestValidateAgentModeAndSetDefaults: "tenant", {"agent_mode": {"enabled": True, "tools": [tool]}} ) - def test_valid_old_and_new_style_tools(self, mocker): + def test_valid_old_and_new_style_tools(self, mocker: MockerFixture): mocker.patch( "core.app.apps.agent_chat.app_config_manager.DatasetConfigManager.is_dataset_exists", return_value=True, diff --git a/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_generator.py b/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_generator.py index 80f7f94b1a..6cd62c933a 100644 --- a/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_generator.py @@ -2,6 +2,7 @@ import contextlib import pytest from pydantic import ValidationError +from pytest_mock import MockerFixture from core.app.apps.agent_chat.app_generator import AgentChatAppGenerator from core.app.apps.exc import GenerateTaskStoppedError @@ -16,7 +17,7 @@ class DummyAccount: @pytest.fixture -def generator(mocker): +def generator(mocker: MockerFixture): gen = AgentChatAppGenerator() mocker.patch( "core.app.apps.agent_chat.app_generator.current_app", @@ -27,19 +28,19 @@ def generator(mocker): class TestAgentChatAppGeneratorGenerate: - def test_generate_rejects_blocking_mode(self, generator, mocker): + def test_generate_rejects_blocking_mode(self, generator, mocker: MockerFixture): app_model = mocker.MagicMock() user = DummyAccount("user") with pytest.raises(ValueError): generator.generate(app_model=app_model, user=user, args={}, invoke_from=mocker.MagicMock(), streaming=False) - def test_generate_requires_query(self, generator, mocker): + def test_generate_requires_query(self, generator, mocker: MockerFixture): app_model = mocker.MagicMock() user = DummyAccount("user") with pytest.raises(ValueError): generator.generate(app_model=app_model, user=user, args={"inputs": {}}, invoke_from=mocker.MagicMock()) - def test_generate_rejects_non_string_query(self, generator, mocker): + def test_generate_rejects_non_string_query(self, generator, mocker: MockerFixture): app_model = mocker.MagicMock() user = DummyAccount("user") with pytest.raises(ValueError): @@ -50,7 +51,7 @@ class TestAgentChatAppGeneratorGenerate: invoke_from=mocker.MagicMock(), ) - def test_generate_override_requires_debugger(self, generator, mocker): + def test_generate_override_requires_debugger(self, generator, mocker: MockerFixture): app_model = mocker.MagicMock() user = DummyAccount("user") @@ -62,7 +63,7 @@ class TestAgentChatAppGeneratorGenerate: invoke_from=InvokeFrom.WEB_APP, ) - def test_generate_success_with_debugger_override(self, generator, mocker): + def test_generate_success_with_debugger_override(self, generator, mocker: MockerFixture): app_model = mocker.MagicMock(id="app1", tenant_id="tenant", mode="agent-chat") app_model_config = mocker.MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"model": {"provider": "p"}} @@ -142,7 +143,7 @@ class TestAgentChatAppGeneratorGenerate: assert result == {"result": "ok"} thread_obj.start.assert_called_once() - def test_generate_without_file_config(self, generator, mocker): + def test_generate_without_file_config(self, generator, mocker: MockerFixture): app_model = mocker.MagicMock(id="app1", tenant_id="tenant", mode="agent-chat") app_model_config = mocker.MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"model": {"provider": "p"}} @@ -213,14 +214,14 @@ class TestAgentChatAppGeneratorGenerate: class TestAgentChatAppGeneratorWorker: @pytest.fixture(autouse=True) - def patch_context(self, mocker): + def patch_context(self, mocker: MockerFixture): @contextlib.contextmanager def ctx_manager(*args, **kwargs): yield mocker.patch("core.app.apps.agent_chat.app_generator.preserve_flask_contexts", ctx_manager) - def test_generate_worker_handles_generate_task_stopped(self, generator, mocker): + def test_generate_worker_handles_generate_task_stopped(self, generator, mocker: MockerFixture): queue_manager = mocker.MagicMock() generator._get_conversation = mocker.MagicMock(return_value=mocker.MagicMock()) generator._get_message = mocker.MagicMock(return_value=mocker.MagicMock()) @@ -250,7 +251,7 @@ class TestAgentChatAppGeneratorWorker: Exception("bad"), ], ) - def test_generate_worker_publishes_errors(self, generator, mocker, error): + def test_generate_worker_publishes_errors(self, generator, mocker: MockerFixture, error): queue_manager = mocker.MagicMock() generator._get_conversation = mocker.MagicMock(return_value=mocker.MagicMock()) generator._get_message = mocker.MagicMock(return_value=mocker.MagicMock()) @@ -271,7 +272,7 @@ class TestAgentChatAppGeneratorWorker: assert queue_manager.publish_error.called - def test_generate_worker_logs_value_error_when_debug(self, generator, mocker): + def test_generate_worker_logs_value_error_when_debug(self, generator, mocker: MockerFixture): queue_manager = mocker.MagicMock() generator._get_conversation = mocker.MagicMock(return_value=mocker.MagicMock()) generator._get_message = mocker.MagicMock(return_value=mocker.MagicMock()) diff --git a/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_runner.py b/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_runner.py index 4567b35480..0260235b03 100644 --- a/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_runner.py +++ b/api/tests/unit_tests/core/app/apps/agent_chat/test_agent_chat_app_runner.py @@ -1,4 +1,5 @@ import pytest +from pytest_mock import MockerFixture from core.agent.entities import AgentEntity from core.app.apps.agent_chat.app_runner import AgentChatAppRunner @@ -13,7 +14,7 @@ def runner(): class TestAgentChatAppRunnerRun: - def test_run_app_not_found(self, runner, mocker): + def test_run_app_not_found(self, runner, mocker: MockerFixture): app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", agent=mocker.MagicMock()) generate_entity = mocker.MagicMock(app_config=app_config, inputs={}, query="q", files=[], stream=True) @@ -22,7 +23,7 @@ class TestAgentChatAppRunnerRun: with pytest.raises(ValueError): runner.run(generate_entity, mocker.MagicMock(), mocker.MagicMock(), mocker.MagicMock()) - def test_run_moderation_error_direct_output(self, runner, mocker): + def test_run_moderation_error_direct_output(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = mocker.MagicMock() @@ -45,7 +46,7 @@ class TestAgentChatAppRunnerRun: runner.direct_output.assert_called_once() - def test_run_annotation_reply_short_circuits(self, runner, mocker): + def test_run_annotation_reply_short_circuits(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = mocker.MagicMock() @@ -74,7 +75,7 @@ class TestAgentChatAppRunnerRun: queue_manager.publish.assert_called_once() runner.direct_output.assert_called_once() - def test_run_hosting_moderation_short_circuits(self, runner, mocker): + def test_run_hosting_moderation_short_circuits(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = mocker.MagicMock() @@ -98,7 +99,7 @@ class TestAgentChatAppRunnerRun: runner.run(generate_entity, mocker.MagicMock(), mocker.MagicMock(), mocker.MagicMock()) - def test_run_model_schema_missing(self, runner, mocker): + def test_run_model_schema_missing(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = AgentEntity(provider="p", model="m", strategy=AgentEntity.Strategy.CHAIN_OF_THOUGHT) @@ -140,7 +141,7 @@ class TestAgentChatAppRunnerRun: (LLMMode.COMPLETION, "CotCompletionAgentRunner"), ], ) - def test_run_chain_of_thought_modes(self, runner, mocker, mode, expected_runner): + def test_run_chain_of_thought_modes(self, runner, mocker: MockerFixture, mode, expected_runner): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = AgentEntity(provider="p", model="m", strategy=AgentEntity.Strategy.CHAIN_OF_THOUGHT) @@ -196,7 +197,7 @@ class TestAgentChatAppRunnerRun: runner_instance.run.assert_called_once() runner._handle_invoke_result.assert_called_once() - def test_run_invalid_llm_mode_raises(self, runner, mocker): + def test_run_invalid_llm_mode_raises(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = AgentEntity(provider="p", model="m", strategy=AgentEntity.Strategy.CHAIN_OF_THOUGHT) @@ -242,7 +243,7 @@ class TestAgentChatAppRunnerRun: with pytest.raises(ValueError): runner.run(generate_entity, mocker.MagicMock(), conversation, message) - def test_run_function_calling_strategy_selected_by_features(self, runner, mocker): + def test_run_function_calling_strategy_selected_by_features(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = AgentEntity(provider="p", model="m", strategy=AgentEntity.Strategy.CHAIN_OF_THOUGHT) @@ -298,7 +299,7 @@ class TestAgentChatAppRunnerRun: assert app_config.agent.strategy == AgentEntity.Strategy.FUNCTION_CALLING runner_instance.run.assert_called_once() - def test_run_conversation_not_found(self, runner, mocker): + def test_run_conversation_not_found(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = AgentEntity(provider="p", model="m", strategy=AgentEntity.Strategy.FUNCTION_CALLING) @@ -332,7 +333,7 @@ class TestAgentChatAppRunnerRun: with pytest.raises(ValueError): runner.run(generate_entity, mocker.MagicMock(), mocker.MagicMock(id="conv"), mocker.MagicMock(id="msg")) - def test_run_message_not_found(self, runner, mocker): + def test_run_message_not_found(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = AgentEntity(provider="p", model="m", strategy=AgentEntity.Strategy.FUNCTION_CALLING) @@ -366,7 +367,7 @@ class TestAgentChatAppRunnerRun: with pytest.raises(ValueError): runner.run(generate_entity, mocker.MagicMock(), mocker.MagicMock(id="conv"), mocker.MagicMock(id="msg")) - def test_run_invalid_agent_strategy_raises(self, runner, mocker): + def test_run_invalid_agent_strategy_raises(self, runner, mocker: MockerFixture): app_record = mocker.MagicMock(id="app1", tenant_id="tenant") app_config = mocker.MagicMock(app_id="app1", tenant_id="tenant", prompt_template=mocker.MagicMock()) app_config.agent = mocker.MagicMock(strategy="invalid", provider="p", model="m") diff --git a/api/tests/unit_tests/core/app/apps/completion/test_app_runner.py b/api/tests/unit_tests/core/app/apps/completion/test_app_runner.py index aa2085177e..8dcf6e9193 100644 --- a/api/tests/unit_tests/core/app/apps/completion/test_app_runner.py +++ b/api/tests/unit_tests/core/app/apps/completion/test_app_runner.py @@ -2,6 +2,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture import core.app.apps.completion.app_runner as module from core.app.apps.completion.app_runner import CompletionAppRunner @@ -47,7 +48,7 @@ def _build_generate_entity(app_config, file_upload_config=None): class TestCompletionAppRunner: - def test_run_app_not_found(self, runner, mocker): + def test_run_app_not_found(self, runner, mocker: MockerFixture): session = mocker.MagicMock() session.scalar.return_value = None mocker.patch.object(module.db, "session", session) @@ -58,7 +59,7 @@ class TestCompletionAppRunner: with pytest.raises(ValueError): runner.run(app_generate_entity, MagicMock(), MagicMock()) - def test_run_moderation_error_outputs_direct(self, runner, mocker): + def test_run_moderation_error_outputs_direct(self, runner, mocker: MockerFixture): app_record = MagicMock(id="app1", tenant_id="tenant") session = mocker.MagicMock() @@ -78,7 +79,7 @@ class TestCompletionAppRunner: runner.direct_output.assert_called_once() runner._handle_invoke_result.assert_not_called() - def test_run_hosting_moderation_stops(self, runner, mocker): + def test_run_hosting_moderation_stops(self, runner, mocker: MockerFixture): app_record = MagicMock(id="app1", tenant_id="tenant") session = mocker.MagicMock() @@ -97,7 +98,7 @@ class TestCompletionAppRunner: runner._handle_invoke_result.assert_not_called() - def test_run_dataset_and_external_tools_flow(self, runner, mocker): + def test_run_dataset_and_external_tools_flow(self, runner, mocker: MockerFixture): app_record = MagicMock(id="app1", tenant_id="tenant") session = mocker.MagicMock() @@ -140,7 +141,7 @@ class TestCompletionAppRunner: assert dataset_retrieval.retrieve.call_args.kwargs["query"] == "query_from_input" runner._handle_invoke_result.assert_called_once() - def test_run_uses_low_image_detail_default(self, runner, mocker): + def test_run_uses_low_image_detail_default(self, runner, mocker: MockerFixture): app_record = MagicMock(id="app1", tenant_id="tenant") session = mocker.MagicMock() diff --git a/api/tests/unit_tests/core/app/apps/completion/test_completion_app_config_manager.py b/api/tests/unit_tests/core/app/apps/completion/test_completion_app_config_manager.py index 024bd8f302..353162be8c 100644 --- a/api/tests/unit_tests/core/app/apps/completion/test_completion_app_config_manager.py +++ b/api/tests/unit_tests/core/app/apps/completion/test_completion_app_config_manager.py @@ -1,6 +1,8 @@ from types import SimpleNamespace from unittest.mock import MagicMock +from pytest_mock import MockerFixture + import core.app.apps.completion.app_config_manager as module from core.app.app_config.entities import EasyUIBasedAppModelConfigFrom from core.app.apps.completion.app_config_manager import CompletionAppConfigManager @@ -8,7 +10,7 @@ from models.model import AppMode class TestCompletionAppConfigManager: - def test_get_app_config_with_override(self, mocker): + def test_get_app_config_with_override(self, mocker: MockerFixture): app_model = MagicMock(tenant_id="tenant", id="app1", mode=AppMode.COMPLETION.value) app_model_config = MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"model": {"provider": "x"}} @@ -35,8 +37,8 @@ class TestCompletionAppConfigManager: assert result.external_data_variables == ["ext1"] assert result.app_mode == AppMode.COMPLETION - def test_get_app_config_without_override_uses_model_config(self, mocker): - app_model = MagicMock(tenant_id="tenant", id="app1", mode=AppMode.COMPLETION.value) + def test_get_app_config_without_override_uses_model_config(self, mocker: MockerFixture): + app_model = MagicMock(tenant_id="tenant", id="app1", mode=AppMode.COMPLETION) app_model_config = MagicMock(id="cfg1") app_model_config.to_dict.return_value = {"model": {"provider": "x"}} @@ -53,7 +55,7 @@ class TestCompletionAppConfigManager: assert result.app_model_config_from == EasyUIBasedAppModelConfigFrom.APP_LATEST_CONFIG assert result.app_model_config_dict == {"model": {"provider": "x"}} - def test_config_validate_filters_related_keys(self, mocker): + def test_config_validate_filters_related_keys(self, mocker: MockerFixture): config = { "model": {"provider": "x"}, "variables": ["v"], diff --git a/api/tests/unit_tests/core/app/apps/completion/test_completion_completion_app_generator.py b/api/tests/unit_tests/core/app/apps/completion/test_completion_completion_app_generator.py index f2e35f9900..de20dde677 100644 --- a/api/tests/unit_tests/core/app/apps/completion/test_completion_completion_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/completion/test_completion_completion_app_generator.py @@ -4,6 +4,7 @@ from unittest.mock import MagicMock import pytest from pydantic import ValidationError +from pytest_mock import MockerFixture import core.app.apps.completion.app_generator as module from core.app.apps.completion.app_generator import CompletionAppGenerator @@ -15,7 +16,7 @@ from services.errors.message import MessageNotExistsError @pytest.fixture -def generator(mocker): +def generator(mocker: MockerFixture): gen = CompletionAppGenerator() mocker.patch.object(module, "copy_current_request_context", side_effect=lambda fn: fn) @@ -69,7 +70,7 @@ class TestCompletionAppGenerator: streaming=False, ) - def test_generate_success_no_file_config(self, generator, mocker): + def test_generate_success_no_file_config(self, generator, mocker: MockerFixture): app_model_config = _build_app_model_config() mocker.patch.object(generator, "_get_app_model_config", return_value=app_model_config) mocker.patch.object(module.FileUploadConfigManager, "convert", return_value=None) @@ -99,7 +100,7 @@ class TestCompletionAppGenerator: assert result == "converted" module.file_factory.build_from_mappings.assert_not_called() - def test_generate_success_with_files(self, generator, mocker): + def test_generate_success_with_files(self, generator, mocker: MockerFixture): app_model_config = _build_app_model_config() mocker.patch.object(generator, "_get_app_model_config", return_value=app_model_config) @@ -131,7 +132,7 @@ class TestCompletionAppGenerator: assert result == "converted" module.file_factory.build_from_mappings.assert_called_once() - def test_generate_override_model_config_debugger(self, generator, mocker): + def test_generate_override_model_config_debugger(self, generator, mocker: MockerFixture): app_model_config = _build_app_model_config() mocker.patch.object(generator, "_get_app_model_config", return_value=app_model_config) @@ -165,7 +166,7 @@ class TestCompletionAppGenerator: assert get_app_config.call_args.kwargs["override_config_dict"] == override_config - def test_generate_more_like_this_message_not_found(self, generator, mocker): + def test_generate_more_like_this_message_not_found(self, generator, mocker: MockerFixture): session = mocker.MagicMock() session.scalar.return_value = None mocker.patch.object(module.db, "session", session) @@ -178,7 +179,7 @@ class TestCompletionAppGenerator: invoke_from=InvokeFrom.WEB_APP, ) - def test_generate_more_like_this_disabled(self, generator, mocker): + def test_generate_more_like_this_disabled(self, generator, mocker: MockerFixture): app_model = _build_app_model() app_model.app_model_config = MagicMock(more_like_this=False, more_like_this_dict={"enabled": False}) @@ -195,7 +196,7 @@ class TestCompletionAppGenerator: invoke_from=InvokeFrom.WEB_APP, ) - def test_generate_more_like_this_app_model_config_missing(self, generator, mocker): + def test_generate_more_like_this_app_model_config_missing(self, generator, mocker: MockerFixture): app_model = _build_app_model() app_model.app_model_config = None @@ -212,7 +213,7 @@ class TestCompletionAppGenerator: invoke_from=InvokeFrom.WEB_APP, ) - def test_generate_more_like_this_message_config_none(self, generator, mocker): + def test_generate_more_like_this_message_config_none(self, generator, mocker: MockerFixture): app_model = _build_app_model() app_model.app_model_config = MagicMock(more_like_this=True, more_like_this_dict={"enabled": True}) @@ -229,7 +230,7 @@ class TestCompletionAppGenerator: invoke_from=InvokeFrom.WEB_APP, ) - def test_generate_more_like_this_success(self, generator, mocker): + def test_generate_more_like_this_success(self, generator, mocker: MockerFixture): app_model = _build_app_model() app_model.app_model_config = MagicMock(more_like_this=True, more_like_this_dict={"enabled": True}) @@ -297,7 +298,7 @@ class TestCompletionAppGenerator: (RuntimeError("boom"), True), ], ) - def test_generate_worker_error_handling(self, generator, mocker, error, should_publish): + def test_generate_worker_error_handling(self, generator, mocker: MockerFixture, error, should_publish): flask_app = MagicMock() flask_app.app_context.return_value = contextlib.nullcontext() diff --git a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_config_manager.py b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_config_manager.py index 5d4c9bcde0..6c1ee20ffb 100644 --- a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_config_manager.py +++ b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_config_manager.py @@ -1,12 +1,14 @@ from types import SimpleNamespace from unittest.mock import MagicMock +from pytest_mock import MockerFixture + import core.app.apps.pipeline.pipeline_config_manager as module from core.app.apps.pipeline.pipeline_config_manager import PipelineConfigManager from models.model import AppMode -def test_get_pipeline_config(mocker): +def test_get_pipeline_config(mocker: MockerFixture): pipeline = MagicMock(tenant_id="tenant", id="pipe1") workflow = MagicMock(id="wf1") @@ -26,7 +28,7 @@ def test_get_pipeline_config(mocker): assert result.rag_pipeline_variables == ["var1"] -def test_config_validate_filters_related_keys(mocker): +def test_config_validate_filters_related_keys(mocker: MockerFixture): config = { "file_upload": {"enabled": True}, "tts": {"enabled": True}, diff --git a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_generator.py b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_generator.py index c36edf48fc..dd91243a37 100644 --- a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_generator.py +++ b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_generator.py @@ -3,6 +3,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock, PropertyMock import pytest +from pytest_mock import MockerFixture import core.app.apps.pipeline.pipeline_generator as module from core.app.apps.exc import GenerateTaskStoppedError @@ -23,7 +24,7 @@ class FakeRagPipelineGenerateEntity(SimpleNamespace): @pytest.fixture -def generator(mocker): +def generator(mocker: MockerFixture): gen = module.PipelineGenerator() mocker.patch.object(module, "RagPipelineGenerateEntity", FakeRagPipelineGenerateEntity) @@ -88,7 +89,7 @@ class DummySession: return False -def test_generate_dataset_missing(generator, mocker): +def test_generate_dataset_missing(generator, mocker: MockerFixture): pipeline = _build_pipeline() pipeline.retrieve_dataset.return_value = None @@ -106,7 +107,7 @@ def test_generate_dataset_missing(generator, mocker): ) -def test_generate_debugger_calls_generate(generator, mocker): +def test_generate_debugger_calls_generate(generator, mocker: MockerFixture): pipeline = _build_pipeline() workflow = _build_workflow() @@ -150,7 +151,7 @@ def test_generate_debugger_calls_generate(generator, mocker): assert result == {"result": "ok"} -def test_generate_published_pipeline_creates_documents_and_delay(generator, mocker): +def test_generate_published_pipeline_creates_documents_and_delay(generator, mocker: MockerFixture): pipeline = _build_pipeline() workflow = _build_workflow() @@ -228,7 +229,7 @@ def test_generate_published_pipeline_creates_documents_and_delay(generator, mock task_proxy.delay.assert_called_once() -def test_generate_is_retry_calls_generate(generator, mocker): +def test_generate_is_retry_calls_generate(generator, mocker: MockerFixture): pipeline = _build_pipeline() workflow = _build_workflow() @@ -273,7 +274,7 @@ def test_generate_is_retry_calls_generate(generator, mocker): assert result == {"result": "ok"} -def test_generate_worker_handles_errors(generator, mocker): +def test_generate_worker_handles_errors(generator, mocker: MockerFixture): flask_app = MagicMock() flask_app.app_context.return_value = contextlib.nullcontext() mocker.patch.object(module, "preserve_flask_contexts", _dummy_preserve) @@ -308,7 +309,7 @@ def test_generate_worker_handles_errors(generator, mocker): queue_manager.publish_error.assert_called_once() -def test_generate_worker_sets_system_user_id_for_external_call(generator, mocker): +def test_generate_worker_sets_system_user_id_for_external_call(generator, mocker: MockerFixture): flask_app = MagicMock() flask_app.app_context.return_value = contextlib.nullcontext() mocker.patch.object(module, "preserve_flask_contexts", _dummy_preserve) @@ -341,7 +342,7 @@ def test_generate_worker_sets_system_user_id_for_external_call(generator, mocker assert module.PipelineRunner.call_args.kwargs["system_user_id"] == "session" -def test_generate_raises_when_workflow_not_found(generator, mocker): +def test_generate_raises_when_workflow_not_found(generator, mocker: MockerFixture): flask_app = MagicMock() mocker.patch.object(module, "preserve_flask_contexts", _dummy_preserve) @@ -369,7 +370,7 @@ def test_generate_raises_when_workflow_not_found(generator, mocker): ) -def test_generate_success_returns_converted(generator, mocker): +def test_generate_success_returns_converted(generator, mocker: MockerFixture): flask_app = MagicMock() mocker.patch.object(module, "preserve_flask_contexts", _dummy_preserve) @@ -409,7 +410,7 @@ def test_generate_success_returns_converted(generator, mocker): assert result == "converted" -def test_single_iteration_generate_validates_inputs(generator, mocker): +def test_single_iteration_generate_validates_inputs(generator, mocker: MockerFixture): with pytest.raises(ValueError): generator.single_iteration_generate(_build_pipeline(), _build_workflow(), "", _build_user(), {}) @@ -419,7 +420,7 @@ def test_single_iteration_generate_validates_inputs(generator, mocker): ) -def test_single_iteration_generate_dataset_required(generator, mocker): +def test_single_iteration_generate_dataset_required(generator, mocker: MockerFixture): pipeline = _build_pipeline() pipeline.retrieve_dataset.return_value = None @@ -436,7 +437,7 @@ def test_single_iteration_generate_dataset_required(generator, mocker): ) -def test_single_iteration_generate_success(generator, mocker): +def test_single_iteration_generate_success(generator, mocker: MockerFixture): pipeline = _build_pipeline() session = DummySession() @@ -476,7 +477,7 @@ def test_single_iteration_generate_success(generator, mocker): assert result == {"ok": True} -def test_single_loop_generate_success(generator, mocker): +def test_single_loop_generate_success(generator, mocker: MockerFixture): pipeline = _build_pipeline() session = DummySession() @@ -516,7 +517,7 @@ def test_single_loop_generate_success(generator, mocker): assert result == {"ok": True} -def test_handle_response_value_error_triggers_generate_task_stopped(generator, mocker): +def test_handle_response_value_error_triggers_generate_task_stopped(generator, mocker: MockerFixture): pipeline = _build_pipeline() workflow = _build_workflow() app_entity = FakeRagPipelineGenerateEntity(task_id="t") @@ -536,7 +537,7 @@ def test_handle_response_value_error_triggers_generate_task_stopped(generator, m ) -def test_build_document_sets_metadata_for_builtin_fields(generator, mocker): +def test_build_document_sets_metadata_for_builtin_fields(generator, mocker: MockerFixture): class DummyDocument(SimpleNamespace): pass @@ -620,7 +621,7 @@ def test_format_datasource_info_list_missing_node_data(generator): ) -def test_format_datasource_info_list_online_drive_folder(generator, mocker): +def test_format_datasource_info_list_online_drive_folder(generator, mocker: MockerFixture): workflow = MagicMock( graph_dict={ "nodes": [ diff --git a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_queue_manager.py b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_queue_manager.py index 9db83f5531..abfc76afa0 100644 --- a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_queue_manager.py +++ b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_queue_manager.py @@ -1,4 +1,5 @@ import pytest +from pytest_mock import MockerFixture import core.app.apps.pipeline.pipeline_queue_manager as module from core.app.apps.base_app_queue_manager import PublishFrom @@ -16,7 +17,7 @@ from core.app.entities.queue_entities import ( from graphon.model_runtime.entities.llm_entities import LLMResult -def test_publish_sets_stop_listen_and_raises_on_stopped(mocker): +def test_publish_sets_stop_listen_and_raises_on_stopped(mocker: MockerFixture): manager = PipelineQueueManager(task_id="t", user_id="u", invoke_from=InvokeFrom.WEB_APP, app_mode="rag") manager._q = mocker.MagicMock() manager.stop_listen = mocker.MagicMock() @@ -28,7 +29,7 @@ def test_publish_sets_stop_listen_and_raises_on_stopped(mocker): manager.stop_listen.assert_called_once() -def test_publish_stop_events_trigger_stop_listen(mocker): +def test_publish_stop_events_trigger_stop_listen(mocker: MockerFixture): manager = PipelineQueueManager(task_id="t", user_id="u", invoke_from=InvokeFrom.WEB_APP, app_mode="rag") manager._q = mocker.MagicMock() manager.stop_listen = mocker.MagicMock() @@ -46,7 +47,7 @@ def test_publish_stop_events_trigger_stop_listen(mocker): manager.stop_listen.assert_called_once() -def test_publish_non_stop_event_no_stop_listen(mocker): +def test_publish_non_stop_event_no_stop_listen(mocker: MockerFixture): manager = PipelineQueueManager(task_id="t", user_id="u", invoke_from=InvokeFrom.WEB_APP, app_mode="rag") manager._q = mocker.MagicMock() manager.stop_listen = mocker.MagicMock() diff --git a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_runner.py b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_runner.py index 603062a51c..1eed76cf84 100644 --- a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_runner.py +++ b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_runner.py @@ -22,6 +22,7 @@ from types import SimpleNamespace from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture import core.app.apps.pipeline.pipeline_runner as module from core.app.apps.pipeline.pipeline_runner import PipelineRunner @@ -126,7 +127,7 @@ def test_update_document_status_on_failure(mocker, runner): session.commit.assert_called_once() -def test_run_pipeline_not_found(mocker): +def test_run_pipeline_not_found(mocker: MockerFixture): app_generate_entity = _build_app_generate_entity() app_generate_entity.invoke_from = InvokeFrom.WEB_APP app_generate_entity.single_iteration_run = None @@ -150,7 +151,7 @@ def test_run_pipeline_not_found(mocker): runner.run() -def test_run_workflow_not_initialized(mocker): +def test_run_workflow_not_initialized(mocker: MockerFixture): app_generate_entity = _build_app_generate_entity() pipeline = MagicMock(id="pipe") @@ -174,7 +175,7 @@ def test_run_workflow_not_initialized(mocker): runner.run() -def test_run_single_iteration_path(mocker): +def test_run_single_iteration_path(mocker: MockerFixture): app_generate_entity = _build_app_generate_entity() app_generate_entity.single_iteration_run = MagicMock() @@ -223,7 +224,7 @@ def test_run_single_iteration_path(mocker): runner._handle_event.assert_called() -def test_run_normal_path_builds_graph(mocker): +def test_run_normal_path_builds_graph(mocker: MockerFixture): app_generate_entity = _build_app_generate_entity() pipeline = MagicMock(id="pipe") diff --git a/api/tests/unit_tests/core/app/apps/test_advanced_chat_app_generator.py b/api/tests/unit_tests/core/app/apps/test_advanced_chat_app_generator.py index f48a7fb38e..835c9a8576 100644 --- a/api/tests/unit_tests/core/app/apps/test_advanced_chat_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/test_advanced_chat_app_generator.py @@ -45,7 +45,7 @@ def _make_generate_entity(app_config: WorkflowUIBasedAppConfig) -> AdvancedChatA @pytest.fixture(autouse=True) -def _mock_db_session(monkeypatch): +def _mock_db_session(monkeypatch: pytest.MonkeyPatch): session = MagicMock() def refresh_side_effect(obj): @@ -108,7 +108,7 @@ def test_init_generate_records_marks_existing_conversation(): assert entity.is_new_conversation is False -def test_message_cycle_manager_uses_new_conversation_flag(monkeypatch): +def test_message_cycle_manager_uses_new_conversation_flag(monkeypatch: pytest.MonkeyPatch): app_config = _make_app_config() entity = _make_generate_entity(app_config) entity.conversation_id = "existing-conversation-id" diff --git a/api/tests/unit_tests/core/app/apps/test_base_app_generator.py b/api/tests/unit_tests/core/app/apps/test_base_app_generator.py index b0f8b423e1..f2a1700664 100644 --- a/api/tests/unit_tests/core/app/apps/test_base_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/test_base_app_generator.py @@ -369,7 +369,7 @@ def test_validate_inputs_optional_file_with_empty_string_ignores_default(): class TestBaseAppGeneratorExtras: - def test_prepare_user_inputs_converts_files_and_lists(self, monkeypatch): + def test_prepare_user_inputs_converts_files_and_lists(self, monkeypatch: pytest.MonkeyPatch): base_app_generator = BaseAppGenerator() variables = [ diff --git a/api/tests/unit_tests/core/app/apps/test_base_app_runner.py b/api/tests/unit_tests/core/app/apps/test_base_app_runner.py index 17de39ca99..c6eedf7be7 100644 --- a/api/tests/unit_tests/core/app/apps/test_base_app_runner.py +++ b/api/tests/unit_tests/core/app/apps/test_base_app_runner.py @@ -42,7 +42,7 @@ class _QueueRecorder: class TestAppRunner: - def test_recalc_llm_max_tokens_updates_parameters(self, monkeypatch): + def test_recalc_llm_max_tokens_updates_parameters(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() model_schema = SimpleNamespace( @@ -65,7 +65,7 @@ class TestAppRunner: assert model_config.parameters["max_tokens"] == 20 - def test_recalc_llm_max_tokens_returns_minus_one_when_no_context(self, monkeypatch): + def test_recalc_llm_max_tokens_returns_minus_one_when_no_context(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() model_schema = SimpleNamespace( @@ -86,7 +86,7 @@ class TestAppRunner: assert runner.recalc_llm_max_tokens(model_config, prompt_messages=[]) == -1 - def test_direct_output_streaming_publishes_chunks_and_end(self, monkeypatch): + def test_direct_output_streaming_publishes_chunks_and_end(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() queue = _QueueRecorder() app_generate_entity = SimpleNamespace(model_conf=SimpleNamespace(model="mock"), stream=True) @@ -133,7 +133,7 @@ class TestAppRunner: stream=True, ) - def test_organize_prompt_messages_simple_template(self, monkeypatch): + def test_organize_prompt_messages_simple_template(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() model_config = SimpleNamespace(mode="chat", stop=["STOP"]) prompt_template_entity = PromptTemplateEntity( @@ -158,7 +158,7 @@ class TestAppRunner: assert prompt_messages == ["simple-message"] assert stop == ["simple-stop"] - def test_organize_prompt_messages_advanced_completion_template(self, monkeypatch): + def test_organize_prompt_messages_advanced_completion_template(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() model_config = SimpleNamespace(mode="completion", stop=[""]) captured: dict[str, object] = {} @@ -191,7 +191,7 @@ class TestAppRunner: assert memory_config.role_prefix.user == "U" assert memory_config.role_prefix.assistant == "A" - def test_organize_prompt_messages_advanced_chat_template(self, monkeypatch): + def test_organize_prompt_messages_advanced_chat_template(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() model_config = SimpleNamespace(mode="chat", stop=[""]) captured: dict[str, object] = {} @@ -245,7 +245,7 @@ class TestAppRunner: files=[], ) - def test_handle_invoke_result_stream_routes_chunks_and_builds_message(self, monkeypatch): + def test_handle_invoke_result_stream_routes_chunks_and_builds_message(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() queue = _QueueRecorder() warning_logger = MagicMock() @@ -284,7 +284,7 @@ class TestAppRunner: assert queue.events[-1].llm_result.message.content == "abc" warning_logger.assert_called_once() - def test_handle_invoke_result_stream_agent_mode_handles_multimodal_errors(self, monkeypatch): + def test_handle_invoke_result_stream_agent_mode_handles_multimodal_errors(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() queue = _QueueRecorder() exception_logger = MagicMock() @@ -331,7 +331,7 @@ class TestAppRunner: assert queue.events[-1].llm_result.usage == usage exception_logger.assert_called_once() - def test_handle_multimodal_image_content_fallback_return_branch(self, monkeypatch): + def test_handle_multimodal_image_content_fallback_return_branch(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() class _ToggleBool: @@ -367,7 +367,7 @@ class TestAppRunner: db_session.add.assert_not_called() queue_manager.publish.assert_not_called() - def test_check_hosting_moderation_direct_output_called(self, monkeypatch): + def test_check_hosting_moderation_direct_output_called(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() queue = _QueueRecorder() app_generate_entity = SimpleNamespace(stream=False) @@ -388,7 +388,7 @@ class TestAppRunner: assert result is True assert direct_output.called - def test_fill_in_inputs_from_external_data_tools(self, monkeypatch): + def test_fill_in_inputs_from_external_data_tools(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() monkeypatch.setattr( "core.app.apps.base_app_runner.ExternalDataFetch.fetch", @@ -405,7 +405,7 @@ class TestAppRunner: assert result == {"foo": "bar"} - def test_moderation_for_inputs_returns_result(self, monkeypatch): + def test_moderation_for_inputs_returns_result(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() monkeypatch.setattr( "core.app.apps.base_app_runner.InputModeration.check", @@ -424,7 +424,7 @@ class TestAppRunner: assert result == (True, {}, "") - def test_query_app_annotations_to_reply(self, monkeypatch): + def test_query_app_annotations_to_reply(self, monkeypatch: pytest.MonkeyPatch): runner = AppRunner() monkeypatch.setattr( "core.app.apps.base_app_runner.AnnotationReplyFeature.query", diff --git a/api/tests/unit_tests/core/app/apps/test_message_based_app_generator.py b/api/tests/unit_tests/core/app/apps/test_message_based_app_generator.py index 1250ac5ecf..6a9b5e7619 100644 --- a/api/tests/unit_tests/core/app/apps/test_message_based_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/test_message_based_app_generator.py @@ -85,7 +85,7 @@ def _make_chat_generate_entity(app_config: EasyUIBasedAppConfig) -> ChatAppGener @pytest.fixture(autouse=True) -def _mock_db_session(monkeypatch): +def _mock_db_session(monkeypatch: pytest.MonkeyPatch): session = MagicMock() def refresh_side_effect(obj): @@ -130,7 +130,7 @@ def test_init_generate_records_sets_conversation_fields_for_chat_entity(): class TestMessageBasedAppGeneratorExtras: - def test_handle_response_closed_file_raises_stopped(self, monkeypatch): + def test_handle_response_closed_file_raises_stopped(self, monkeypatch: pytest.MonkeyPatch): generator = MessageBasedAppGenerator() class _Pipeline: @@ -155,7 +155,7 @@ class TestMessageBasedAppGeneratorExtras: stream=False, ) - def test_get_app_model_config_requires_valid_config(self, monkeypatch): + def test_get_app_model_config_requires_valid_config(self, monkeypatch: pytest.MonkeyPatch): generator = MessageBasedAppGenerator() app_model = SimpleNamespace(id="app", app_model_config_id=None, app_model_config=None) diff --git a/api/tests/unit_tests/core/app/apps/test_pause_resume.py b/api/tests/unit_tests/core/app/apps/test_pause_resume.py index 6104b8d6ca..aa71f4d9c4 100644 --- a/api/tests/unit_tests/core/app/apps/test_pause_resume.py +++ b/api/tests/unit_tests/core/app/apps/test_pause_resume.py @@ -3,6 +3,8 @@ import time from types import ModuleType, SimpleNamespace from typing import Any +from pytest_mock import MockerFixture + import graphon.nodes.human_input.entities # noqa: F401 from core.app.apps.advanced_chat import app_generator as adv_app_gen_module from core.app.apps.workflow import app_generator as wf_app_gen_module @@ -101,7 +103,7 @@ class _StubToolNode(Node[_StubToolNodeData]): yield self._convert_node_run_result_to_graph_node_event(result) -def _patch_tool_node(mocker): +def _patch_tool_node(mocker: MockerFixture): original_resolve_node_class = node_factory_module.resolve_workflow_node_class def _patched_resolve_node_class(*, node_type: NodeType, node_version: str) -> type[Node]: @@ -196,7 +198,7 @@ def _node_successes(events: list[GraphEngineEvent]) -> list[str]: return [evt.node_id for evt in events if isinstance(evt, NodeRunSucceededEvent)] -def test_workflow_app_pause_resume_matches_baseline(mocker): +def test_workflow_app_pause_resume_matches_baseline(mocker: MockerFixture): _patch_tool_node(mocker) baseline_state = _build_runtime_state("baseline") @@ -236,7 +238,7 @@ def test_workflow_app_pause_resume_matches_baseline(mocker): assert resumed_state.outputs == baseline_outputs -def test_advanced_chat_pause_resume_matches_baseline(mocker): +def test_advanced_chat_pause_resume_matches_baseline(mocker: MockerFixture): _patch_tool_node(mocker) baseline_state = _build_runtime_state("adv-baseline") diff --git a/api/tests/unit_tests/core/app/apps/test_streaming_utils.py b/api/tests/unit_tests/core/app/apps/test_streaming_utils.py index 58f0e47a4b..12f3ed9f07 100644 --- a/api/tests/unit_tests/core/app/apps/test_streaming_utils.py +++ b/api/tests/unit_tests/core/app/apps/test_streaming_utils.py @@ -54,7 +54,7 @@ class FakeTopic: return self._state["subscribed"] -def test_retrieve_events_calls_on_subscribe_after_subscription(monkeypatch): +def test_retrieve_events_calls_on_subscribe_after_subscription(monkeypatch: pytest.MonkeyPatch): topic = FakeTopic() def fake_get_response_topic(cls, app_mode, workflow_run_id): @@ -92,7 +92,7 @@ def test_normalize_terminal_events_empty_values(): assert _normalize_terminal_events([]) == set({}) -def test_stream_topic_events_emits_ping_and_idle_timeout(monkeypatch): +def test_stream_topic_events_emits_ping_and_idle_timeout(monkeypatch: pytest.MonkeyPatch): topic = FakeTopic() times = [1000.0, 1000.0, 1001.0, 1001.0, 1002.0] diff --git a/api/tests/unit_tests/core/app/apps/test_workflow_app_generator.py b/api/tests/unit_tests/core/app/apps/test_workflow_app_generator.py index 7e8367c6c4..0e9f8b6f35 100644 --- a/api/tests/unit_tests/core/app/apps/test_workflow_app_generator.py +++ b/api/tests/unit_tests/core/app/apps/test_workflow_app_generator.py @@ -1,6 +1,8 @@ from types import SimpleNamespace from unittest.mock import MagicMock +from pytest_mock import MockerFixture + from core.app.apps.workflow.app_generator import SKIP_PREPARE_USER_INPUTS_KEY, WorkflowAppGenerator @@ -22,7 +24,7 @@ def test_should_prepare_user_inputs_keeps_validation_when_flag_false(): assert WorkflowAppGenerator()._should_prepare_user_inputs(args) -def test_resume_delegates_to_generate(mocker): +def test_resume_delegates_to_generate(mocker: MockerFixture): generator = WorkflowAppGenerator() mock_generate = mocker.patch.object(generator, "_generate", return_value="ok") @@ -52,7 +54,7 @@ def test_resume_delegates_to_generate(mocker): assert kwargs["invoke_from"] == "debugger" -def test_generate_appends_pause_layer_and_forwards_state(mocker): +def test_generate_appends_pause_layer_and_forwards_state(mocker: MockerFixture): generator = WorkflowAppGenerator() mock_queue_manager = MagicMock() @@ -124,7 +126,7 @@ def test_generate_appends_pause_layer_and_forwards_state(mocker): assert worker_kwargs["kwargs"]["graph_runtime_state"] is graph_runtime_state -def test_resume_path_runs_worker_with_runtime_state(mocker): +def test_resume_path_runs_worker_with_runtime_state(mocker: MockerFixture): generator = WorkflowAppGenerator() runtime_state = MagicMock(name="runtime-state") diff --git a/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_core.py b/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_core.py index 58c7bfa4bc..4a0d4f490e 100644 --- a/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_core.py +++ b/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_core.py @@ -90,7 +90,7 @@ class TestWorkflowBasedAppRunner: with pytest.raises(ValueError, match="Neither single_iteration_run nor single_loop_run"): runner._prepare_single_node_execution(workflow, None, None, user_id="00000000-0000-0000-0000-000000000001") - def test_get_graph_and_variable_pool_for_single_node_run(self, monkeypatch): + def test_get_graph_and_variable_pool_for_single_node_run(self, monkeypatch: pytest.MonkeyPatch): runner = WorkflowBasedAppRunner(queue_manager=SimpleNamespace(), app_id="app") graph_runtime_state = GraphRuntimeState( variable_pool=VariablePool(system_variables=default_system_variables()), @@ -142,7 +142,9 @@ class TestWorkflowBasedAppRunner: assert graph is not None assert variable_pool is graph_runtime_state.variable_pool - def test_get_graph_and_variable_pool_preloads_constructor_variables_before_graph_init(self, monkeypatch): + def test_get_graph_and_variable_pool_preloads_constructor_variables_before_graph_init( + self, monkeypatch: pytest.MonkeyPatch + ): variable_loader = SimpleNamespace( load_variables=lambda selectors: ( [ @@ -232,7 +234,7 @@ class TestWorkflowBasedAppRunner: assert graph is not None assert variable_pool.get(["sys", "conversation_id"]).value == "conv-1" - def test_handle_graph_run_events_and_pause_notifications(self, monkeypatch): + def test_handle_graph_run_events_and_pause_notifications(self, monkeypatch: pytest.MonkeyPatch): published: list[object] = [] class _QueueManager: diff --git a/api/tests/unit_tests/core/app/apps/workflow/test_app_generator_extra.py b/api/tests/unit_tests/core/app/apps/workflow/test_app_generator_extra.py index 09ad078a70..320189143e 100644 --- a/api/tests/unit_tests/core/app/apps/workflow/test_app_generator_extra.py +++ b/api/tests/unit_tests/core/app/apps/workflow/test_app_generator_extra.py @@ -67,7 +67,7 @@ class TestWorkflowAppGeneratorValidation: class TestWorkflowAppGeneratorHandleResponse: - def test_handle_response_closed_file_raises_stopped(self, monkeypatch): + def test_handle_response_closed_file_raises_stopped(self, monkeypatch: pytest.MonkeyPatch): generator = WorkflowAppGenerator() app_config = WorkflowUIBasedAppConfig( @@ -116,7 +116,7 @@ class TestWorkflowAppGeneratorHandleResponse: class TestWorkflowAppGeneratorGenerate: - def test_generate_skips_prepare_inputs_when_flag_set(self, monkeypatch): + def test_generate_skips_prepare_inputs_when_flag_set(self, monkeypatch: pytest.MonkeyPatch): generator = WorkflowAppGenerator() app_config = WorkflowUIBasedAppConfig( diff --git a/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py b/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py index 0bcc1029b0..1311d5e9cb 100644 --- a/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py +++ b/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py @@ -187,7 +187,7 @@ class TestWorkflowGenerateTaskPipeline: assert isinstance(responses[0], ValueError) - def test_handle_workflow_started_event_sets_run_id(self, monkeypatch): + def test_handle_workflow_started_event_sets_run_id(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._graph_runtime_state = GraphRuntimeState( variable_pool=build_test_variable_pool(variables=build_system_variables(workflow_execution_id="run-id")), @@ -408,7 +408,7 @@ class TestWorkflowGenerateTaskPipeline: assert list(pipeline._handle_human_input_form_timeout_event(timeout_event)) == ["timeout"] assert list(pipeline._handle_agent_log_event(agent_event)) == ["log"] - def test_wrapper_process_stream_response_emits_audio_end(self, monkeypatch): + def test_wrapper_process_stream_response_emits_audio_end(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._workflow_features_dict = { "text_to_speech": {"enabled": True, "autoPlay": "enabled", "voice": "v", "language": "en"} @@ -560,7 +560,7 @@ class TestWorkflowGenerateTaskPipeline: responses = list(pipeline._wrapper_process_stream_response()) assert responses == [PingStreamResponse(task_id="task")] - def test_wrapper_process_stream_response_final_audio_none_then_finish(self, monkeypatch): + def test_wrapper_process_stream_response_final_audio_none_then_finish(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._workflow_features_dict = { "text_to_speech": {"enabled": True, "autoPlay": "enabled", "voice": "v", "language": "en"} @@ -597,7 +597,7 @@ class TestWorkflowGenerateTaskPipeline: assert sleep_spy assert any(isinstance(item, MessageAudioEndStreamResponse) for item in responses) - def test_wrapper_process_stream_response_handles_audio_exception(self, monkeypatch): + def test_wrapper_process_stream_response_handles_audio_exception(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._workflow_features_dict = { "text_to_speech": {"enabled": True, "autoPlay": "enabled", "voice": "v", "language": "en"} @@ -633,7 +633,7 @@ class TestWorkflowGenerateTaskPipeline: assert logger_exception assert any(isinstance(item, MessageAudioEndStreamResponse) for item in responses) - def test_database_session_rolls_back_on_error(self, monkeypatch): + def test_database_session_rolls_back_on_error(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() calls = {"enter": 0, "exit_exc": None} diff --git a/api/tests/unit_tests/core/app/task_pipeline/test_easy_ui_based_generate_task_pipeline_core.py b/api/tests/unit_tests/core/app/task_pipeline/test_easy_ui_based_generate_task_pipeline_core.py index a20d89d807..f10e0084d0 100644 --- a/api/tests/unit_tests/core/app/task_pipeline/test_easy_ui_based_generate_task_pipeline_core.py +++ b/api/tests/unit_tests/core/app/task_pipeline/test_easy_ui_based_generate_task_pipeline_core.py @@ -143,7 +143,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert pipeline._listen_audio_msg(publisher=None, task_id="task") is None - def test_process_stream_response_handles_chunks_and_end(self, monkeypatch): + def test_process_stream_response_handles_chunks_and_end(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) @@ -245,7 +245,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert any(isinstance(event, QueueLLMChunkEvent) for event in events) assert any(isinstance(event, QueueStopEvent) for event in events) - def test_handle_stop_updates_usage(self, monkeypatch): + def test_handle_stop_updates_usage(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) @@ -313,7 +313,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert pipeline._task_state.llm_result.usage.prompt_tokens == 10 assert pipeline._task_state.llm_result.usage.completion_tokens == 5 - def test_record_files_builds_file_payloads(self, monkeypatch): + def test_record_files_builds_file_payloads(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) @@ -405,7 +405,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert files assert len(files) == 3 - def test_process_stream_response_handles_annotation_and_error(self, monkeypatch): + def test_process_stream_response_handles_annotation_and_error(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) @@ -472,7 +472,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert isinstance(responses[-1], ValueError) assert pipeline._task_state.llm_result.message.content == "annotatedagent" - def test_agent_thought_to_stream_response_returns_payload(self, monkeypatch): + def test_agent_thought_to_stream_response_returns_payload(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) @@ -681,7 +681,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert responses == ["payload"] - def test_wrapper_process_stream_response_with_tts_publisher(self, monkeypatch): + def test_wrapper_process_stream_response_with_tts_publisher(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) entity = _make_entity(ChatAppGenerateEntity, AppMode.CHAT) @@ -715,7 +715,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert responses[1] == "payload" assert isinstance(responses[-1], MessageAudioEndStreamResponse) - def test_wrapper_process_stream_response_timeout_yields_audio_chunk(self, monkeypatch): + def test_wrapper_process_stream_response_timeout_yields_audio_chunk(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) entity = _make_entity(ChatAppGenerateEntity, AppMode.CHAT) @@ -756,7 +756,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert any(isinstance(item, MessageAudioStreamResponse) for item in responses) assert isinstance(responses[-1], MessageAudioEndStreamResponse) - def test_process_stream_response_handles_stop_event_and_output_replacement(self, monkeypatch): + def test_process_stream_response_handles_stop_event_and_output_replacement(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) pipeline = EasyUIBasedGenerateTaskPipeline( @@ -896,7 +896,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert list(pipeline._process_stream_response(publisher=None)) == [] - def test_save_message_persists_fields_and_emits_trace(self, monkeypatch): + def test_save_message_persists_fields_and_emits_trace(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) pipeline = EasyUIBasedGenerateTaskPipeline( @@ -981,7 +981,7 @@ class TestEasyUiBasedGenerateTaskPipeline: with pytest.raises(ValueError, match="Conversation conv not found"): pipeline._save_message(session=session) - def test_message_end_to_stream_response_includes_usage_metadata(self, monkeypatch): + def test_message_end_to_stream_response_includes_usage_metadata(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) pipeline = EasyUIBasedGenerateTaskPipeline( @@ -1021,7 +1021,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert response.id == "msg" assert response.metadata["usage"]["prompt_tokens"] == 1 - def test_record_files_returns_none_when_message_has_no_files(self, monkeypatch): + def test_record_files_returns_none_when_message_has_no_files(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) pipeline = EasyUIBasedGenerateTaskPipeline( @@ -1059,7 +1059,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert response.files is None - def test_record_files_handles_local_fallback_and_tool_url_variants(self, monkeypatch): + def test_record_files_handles_local_fallback_and_tool_url_variants(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) pipeline = EasyUIBasedGenerateTaskPipeline( @@ -1155,7 +1155,7 @@ class TestEasyUiBasedGenerateTaskPipeline: assert response.id == "msg" assert response.answer == "hello" - def test_agent_thought_to_stream_response_returns_none_when_not_found(self, monkeypatch): + def test_agent_thought_to_stream_response_returns_none_when_not_found(self, monkeypatch: pytest.MonkeyPatch): conversation = SimpleNamespace(id="conv", mode=AppMode.CHAT) message = SimpleNamespace(id="msg", created_at=datetime.now(UTC)) pipeline = EasyUIBasedGenerateTaskPipeline( diff --git a/api/tests/unit_tests/core/app/workflow/test_node_factory.py b/api/tests/unit_tests/core/app/workflow/test_node_factory.py index 30a068f4c5..7c9f174129 100644 --- a/api/tests/unit_tests/core/app/workflow/test_node_factory.py +++ b/api/tests/unit_tests/core/app/workflow/test_node_factory.py @@ -46,7 +46,7 @@ class TestDifyNodeFactory: lambda **_kwargs: node_class, ) - def _factory(self, monkeypatch): + def _factory(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr("core.workflow.node_factory.dify_config.CODE_MAX_STRING_LENGTH", 10) monkeypatch.setattr("core.workflow.node_factory.dify_config.CODE_MAX_NUMBER", 10) monkeypatch.setattr("core.workflow.node_factory.dify_config.CODE_MIN_NUMBER", -10) @@ -72,20 +72,20 @@ class TestDifyNodeFactory: graph_runtime_state=SimpleNamespace(), ) - def test_create_node_unknown_type(self, monkeypatch): + def test_create_node_unknown_type(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) with pytest.raises(ValueError): factory.create_node({"id": "node-1", "data": {"type": "unknown"}}) - def test_create_node_missing_mapping(self, monkeypatch): + def test_create_node_missing_mapping(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) monkeypatch.setattr("core.workflow.node_factory.get_node_type_classes_mapping", lambda: {}) with pytest.raises(ValueError): factory.create_node({"id": "node-1", "data": {"type": BuiltinNodeTypes.START}}) - def test_create_node_missing_latest_class(self, monkeypatch): + def test_create_node_missing_latest_class(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) monkeypatch.setattr( "core.workflow.node_factory.get_node_type_classes_mapping", @@ -96,7 +96,7 @@ class TestDifyNodeFactory: with pytest.raises(ValueError): factory.create_node({"id": "node-1", "data": {"type": BuiltinNodeTypes.START}}) - def test_create_node_selects_versioned_class(self, monkeypatch): + def test_create_node_selects_versioned_class(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) selected_versions: list[tuple[str, str]] = [] @@ -115,7 +115,7 @@ class TestDifyNodeFactory: assert node.id == "node-1" assert selected_versions == [("snapshot", "called")] - def test_create_node_code_branch(self, monkeypatch): + def test_create_node_code_branch(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) self._stub_node_resolution(monkeypatch, DummyCodeNode) @@ -124,7 +124,7 @@ class TestDifyNodeFactory: assert isinstance(node, DummyCodeNode) assert node.id == "node-1" - def test_create_node_template_transform_branch(self, monkeypatch): + def test_create_node_template_transform_branch(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) self._stub_node_resolution(monkeypatch, DummyTemplateTransformNode) @@ -133,7 +133,7 @@ class TestDifyNodeFactory: assert isinstance(node, DummyTemplateTransformNode) assert "jinja2_template_renderer" in node.kwargs - def test_create_node_http_request_branch(self, monkeypatch): + def test_create_node_http_request_branch(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) self._stub_node_resolution(monkeypatch, DummyHttpRequestNode) @@ -142,7 +142,7 @@ class TestDifyNodeFactory: assert isinstance(node, DummyHttpRequestNode) assert "http_request_config" in node.kwargs - def test_create_node_knowledge_retrieval_branch(self, monkeypatch): + def test_create_node_knowledge_retrieval_branch(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) self._stub_node_resolution(monkeypatch, DummyKnowledgeRetrievalNode) @@ -151,7 +151,7 @@ class TestDifyNodeFactory: assert isinstance(node, DummyKnowledgeRetrievalNode) assert node.kwargs == {} - def test_create_node_document_extractor_branch(self, monkeypatch): + def test_create_node_document_extractor_branch(self, monkeypatch: pytest.MonkeyPatch): factory = self._factory(monkeypatch) self._stub_node_resolution(monkeypatch, DummyDocumentExtractorNode) diff --git a/api/tests/unit_tests/core/app/workflow/test_observability_layer_extra.py b/api/tests/unit_tests/core/app/workflow/test_observability_layer_extra.py index 82552470a9..04ce524904 100644 --- a/api/tests/unit_tests/core/app/workflow/test_observability_layer_extra.py +++ b/api/tests/unit_tests/core/app/workflow/test_observability_layer_extra.py @@ -2,12 +2,14 @@ from __future__ import annotations from types import SimpleNamespace +import pytest + from core.app.workflow.layers.observability import ObservabilityLayer from graphon.enums import BuiltinNodeTypes class TestObservabilityLayerExtras: - def test_init_tracer_enabled_sets_tracer(self, monkeypatch): + def test_init_tracer_enabled_sets_tracer(self, monkeypatch: pytest.MonkeyPatch): tracer = object() monkeypatch.setattr("core.app.workflow.layers.observability.dify_config.ENABLE_OTEL", True) monkeypatch.setattr("core.app.workflow.layers.observability.is_instrument_flag_enabled", lambda: False) @@ -18,7 +20,7 @@ class TestObservabilityLayerExtras: assert layer._is_disabled is False assert layer._tracer is tracer - def test_init_tracer_disables_when_get_tracer_fails(self, monkeypatch, caplog): + def test_init_tracer_disables_when_get_tracer_fails(self, monkeypatch: pytest.MonkeyPatch, caplog): monkeypatch.setattr("core.app.workflow.layers.observability.dify_config.ENABLE_OTEL", True) monkeypatch.setattr("core.app.workflow.layers.observability.is_instrument_flag_enabled", lambda: False) @@ -33,7 +35,7 @@ class TestObservabilityLayerExtras: assert layer._tracer is None assert "Failed to get OpenTelemetry tracer" in caplog.text - def test_init_tracer_disables_when_otel_disabled(self, monkeypatch): + def test_init_tracer_disables_when_otel_disabled(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr("core.app.workflow.layers.observability.dify_config.ENABLE_OTEL", False) monkeypatch.setattr("core.app.workflow.layers.observability.is_instrument_flag_enabled", lambda: False) @@ -143,7 +145,7 @@ class TestObservabilityLayerExtras: assert layer._node_contexts == {} - def test_on_node_run_end_calls_span_end(self, monkeypatch): + def test_on_node_run_end_calls_span_end(self, monkeypatch: pytest.MonkeyPatch): layer = ObservabilityLayer() layer._is_disabled = False ended: list[str] = [] @@ -164,7 +166,7 @@ class TestObservabilityLayerExtras: assert ended == ["ended"] assert "exec" not in layer._node_contexts - def test_on_node_run_end_logs_detach_failure(self, monkeypatch, caplog): + def test_on_node_run_end_logs_detach_failure(self, monkeypatch: pytest.MonkeyPatch, caplog): layer = ObservabilityLayer() layer._is_disabled = False @@ -186,7 +188,7 @@ class TestObservabilityLayerExtras: assert "Failed to detach OpenTelemetry token" in caplog.text assert "exec" not in layer._node_contexts - def test_on_node_run_start_and_end_creates_span(self, monkeypatch): + def test_on_node_run_start_and_end_creates_span(self, monkeypatch: pytest.MonkeyPatch): layer = ObservabilityLayer() layer._is_disabled = False diff --git a/api/tests/unit_tests/core/app/workflow/test_persistence_layer.py b/api/tests/unit_tests/core/app/workflow/test_persistence_layer.py index cacb4dd4fa..23fe682017 100644 --- a/api/tests/unit_tests/core/app/workflow/test_persistence_layer.py +++ b/api/tests/unit_tests/core/app/workflow/test_persistence_layer.py @@ -120,7 +120,7 @@ class TestWorkflowPersistenceLayer: with pytest.raises(ValueError, match="workflow_execution_id must be provided"): layer._get_execution_id() - def test_prepare_workflow_inputs_excludes_conversation_id(self, monkeypatch): + def test_prepare_workflow_inputs_excludes_conversation_id(self, monkeypatch: pytest.MonkeyPatch): layer, _, _, _ = _make_layer() monkeypatch.setattr( diff --git a/api/tests/unit_tests/core/base/test_app_generator_tts_publisher.py b/api/tests/unit_tests/core/base/test_app_generator_tts_publisher.py index 7b433ab57b..1125ce6dbc 100644 --- a/api/tests/unit_tests/core/base/test_app_generator_tts_publisher.py +++ b/api/tests/unit_tests/core/base/test_app_generator_tts_publisher.py @@ -3,6 +3,7 @@ import queue from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.base.tts.app_generator_tts_publisher import ( AppGeneratorTTSPublisher, @@ -17,7 +18,7 @@ from core.base.tts.app_generator_tts_publisher import ( @pytest.fixture -def mock_model_instance(mocker): +def mock_model_instance(mocker: MockerFixture): model = mocker.MagicMock() model.invoke_tts.return_value = [b"audio1", b"audio2"] model.get_tts_voices.return_value = [{"value": "voice1"}, {"value": "voice2"}] @@ -33,7 +34,7 @@ def mock_model_manager(mocker, mock_model_instance): @pytest.fixture(autouse=True) -def patch_threads(mocker): +def patch_threads(mocker: MockerFixture): """Prevent real threads from starting during tests""" mocker.patch("threading.Thread.start", return_value=None) @@ -114,7 +115,7 @@ class TestProcessFuture: finish = audio_queue.get() assert finish.status == "finish" - def test_process_future_exception(self, mocker): + def test_process_future_exception(self, mocker: MockerFixture): future_queue = queue.Queue() audio_queue = queue.Queue() @@ -222,7 +223,7 @@ class TestAppGeneratorTTSPublisher: publisher.executor.submit.assert_not_called() - def test_runtime_sentence_threshold_triggers_submit(self, mock_model_manager, mocker): + def test_runtime_sentence_threshold_triggers_submit(self, mock_model_manager, mocker: MockerFixture): publisher = AppGeneratorTTSPublisher("tenant", "voice1") publisher.executor = MagicMock() @@ -297,7 +298,7 @@ class TestAppGeneratorTTSPublisher: publisher.executor.submit.assert_not_called() - def test_runtime_handles_agent_message_event_list_content(self, mock_model_manager, mocker): + def test_runtime_handles_agent_message_event_list_content(self, mock_model_manager, mocker: MockerFixture): publisher = AppGeneratorTTSPublisher("tenant", "voice1") publisher.executor = MagicMock() @@ -332,7 +333,7 @@ class TestAppGeneratorTTSPublisher: assert publisher.msg_text == "Hello " - def test_runtime_handles_agent_message_event_empty_content(self, mock_model_manager, mocker): + def test_runtime_handles_agent_message_event_empty_content(self, mock_model_manager, mocker: MockerFixture): publisher = AppGeneratorTTSPublisher("tenant", "voice1") publisher.executor = MagicMock() @@ -358,7 +359,7 @@ class TestAppGeneratorTTSPublisher: assert publisher.msg_text == "" - def test_runtime_resets_msg_text_when_text_tmp_not_str(self, mock_model_manager, mocker): + def test_runtime_resets_msg_text_when_text_tmp_not_str(self, mock_model_manager, mocker: MockerFixture): publisher = AppGeneratorTTSPublisher("tenant", "voice1") publisher.executor = MagicMock() diff --git a/api/tests/unit_tests/core/callback_handler/test_agent_tool_callback_handler.py b/api/tests/unit_tests/core/callback_handler/test_agent_tool_callback_handler.py index 4c1aa33540..f9b3b1864e 100644 --- a/api/tests/unit_tests/core/callback_handler/test_agent_tool_callback_handler.py +++ b/api/tests/unit_tests/core/callback_handler/test_agent_tool_callback_handler.py @@ -1,8 +1,10 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture import core.callback_handler.agent_tool_callback_handler as module +from core.callback_handler.agent_tool_callback_handler import DifyAgentCallbackHandler # ----------------------------- # Fixtures @@ -10,17 +12,17 @@ import core.callback_handler.agent_tool_callback_handler as module @pytest.fixture -def enable_debug(mocker): +def enable_debug(mocker: MockerFixture): mocker.patch.object(module.dify_config, "DEBUG", True) @pytest.fixture -def disable_debug(mocker): +def disable_debug(mocker: MockerFixture): mocker.patch.object(module.dify_config, "DEBUG", False) @pytest.fixture -def mock_print(mocker): +def mock_print(mocker: MockerFixture): return mocker.patch("builtins.print") @@ -71,7 +73,7 @@ class TestPrintText: module.print_text("hello") mock_print.assert_called_once_with("hello", end="", file=None) - def test_print_text_with_color(self, mocker, mock_print): + def test_print_text_with_color(self, mocker: MockerFixture, mock_print): mock_get_color = mocker.patch( "core.callback_handler.agent_tool_callback_handler.get_colored_text", return_value="colored_text", @@ -82,7 +84,7 @@ class TestPrintText: mock_get_color.assert_called_once_with("hello", "green") mock_print.assert_called_once_with("colored_text", end="", file=None) - def test_print_text_with_file_flush(self, mocker): + def test_print_text_with_file_flush(self, mocker: MockerFixture): mock_file = MagicMock() mock_print = mocker.patch("builtins.print") @@ -107,21 +109,25 @@ class TestDifyAgentCallbackHandler: assert handler.color == "green" assert handler.current_loop == 1 - def test_on_tool_start_debug_enabled(self, handler, enable_debug, mocker): + def test_on_tool_start_debug_enabled(self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_tool_start("tool1", {"a": 1}) mock_print_text.assert_called() - def test_on_tool_start_debug_disabled(self, handler, disable_debug, mocker): + def test_on_tool_start_debug_disabled( + self, handler: DifyAgentCallbackHandler, disable_debug, mocker: MockerFixture + ): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_tool_start("tool1", {"a": 1}) mock_print_text.assert_not_called() - def test_on_tool_end_debug_enabled_and_trace(self, handler, enable_debug, mocker): + def test_on_tool_end_debug_enabled_and_trace( + self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture + ): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") mock_trace_manager = MagicMock() @@ -137,7 +143,9 @@ class TestDifyAgentCallbackHandler: assert mock_print_text.call_count >= 1 mock_trace_manager.add_trace_task.assert_called_once() - def test_on_tool_end_without_trace_manager(self, handler, enable_debug, mocker): + def test_on_tool_end_without_trace_manager( + self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture + ): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_tool_end( @@ -148,14 +156,16 @@ class TestDifyAgentCallbackHandler: assert mock_print_text.call_count >= 1 - def test_on_tool_error_debug_enabled(self, handler, enable_debug, mocker): + def test_on_tool_error_debug_enabled(self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_tool_error(Exception("error")) mock_print_text.assert_called_once() - def test_on_tool_error_debug_disabled(self, handler, disable_debug, mocker): + def test_on_tool_error_debug_disabled( + self, handler: DifyAgentCallbackHandler, disable_debug, mocker: MockerFixture + ): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_tool_error(Exception("error")) @@ -163,14 +173,16 @@ class TestDifyAgentCallbackHandler: mock_print_text.assert_not_called() @pytest.mark.parametrize("thought", ["thinking", ""]) - def test_on_agent_start(self, handler, enable_debug, mocker, thought): + def test_on_agent_start(self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture, thought): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_agent_start(thought) mock_print_text.assert_called() - def test_on_agent_finish_increments_loop(self, handler, enable_debug, mocker): + def test_on_agent_finish_increments_loop( + self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture + ): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") current_loop = handler.current_loop @@ -179,19 +191,21 @@ class TestDifyAgentCallbackHandler: assert handler.current_loop == current_loop + 1 mock_print_text.assert_called() - def test_on_datasource_start_debug_enabled(self, handler, enable_debug, mocker): + def test_on_datasource_start_debug_enabled( + self, handler: DifyAgentCallbackHandler, enable_debug, mocker: MockerFixture + ): mock_print_text = mocker.patch("core.callback_handler.agent_tool_callback_handler.print_text") handler.on_datasource_start("ds1", {"x": 1}) mock_print_text.assert_called_once() - def test_ignore_agent_property(self, disable_debug, handler): + def test_ignore_agent_property(self, disable_debug, handler: DifyAgentCallbackHandler): assert handler.ignore_agent is True - def test_ignore_chat_model_property(self, disable_debug, handler): + def test_ignore_chat_model_property(self, disable_debug, handler: DifyAgentCallbackHandler): assert handler.ignore_chat_model is True - def test_ignore_properties_when_debug_enabled(self, enable_debug, handler): + def test_ignore_properties_when_debug_enabled(self, enable_debug, handler: DifyAgentCallbackHandler): assert handler.ignore_agent is False assert handler.ignore_chat_model is False diff --git a/api/tests/unit_tests/core/callback_handler/test_index_tool_callback_handler.py b/api/tests/unit_tests/core/callback_handler/test_index_tool_callback_handler.py index 8e5670e9be..f23669c3c7 100644 --- a/api/tests/unit_tests/core/callback_handler/test_index_tool_callback_handler.py +++ b/api/tests/unit_tests/core/callback_handler/test_index_tool_callback_handler.py @@ -1,4 +1,5 @@ import pytest +from pytest_mock import MockerFixture from core.app.entities.app_invoke_entities import InvokeFrom from core.callback_handler.index_tool_callback_handler import ( @@ -7,12 +8,12 @@ from core.callback_handler.index_tool_callback_handler import ( @pytest.fixture -def mock_queue_manager(mocker): +def mock_queue_manager(mocker: MockerFixture): return mocker.Mock() @pytest.fixture -def handler(mock_queue_manager, mocker): +def handler(mock_queue_manager, mocker: MockerFixture): mocker.patch( "core.callback_handler.index_tool_callback_handler.db", ) @@ -34,7 +35,7 @@ class TestOnQuery: (InvokeFrom.WEB_APP, "end_user"), ], ) - def test_on_query_success_roles(self, mocker, mock_queue_manager, invoke_from, expected_role): + def test_on_query_success_roles(self, mocker: MockerFixture, mock_queue_manager, invoke_from, expected_role): # Arrange mock_db = mocker.patch("core.callback_handler.index_tool_callback_handler.db") @@ -57,7 +58,7 @@ class TestOnQuery: assert dataset_query.created_by_role == expected_role mock_db.session.commit.assert_called_once() - def test_on_query_none_values(self, mocker, mock_queue_manager): + def test_on_query_none_values(self, mocker: MockerFixture, mock_queue_manager): mock_db = mocker.patch("core.callback_handler.index_tool_callback_handler.db") handler = DatasetIndexToolCallbackHandler( @@ -75,7 +76,7 @@ class TestOnQuery: class TestOnToolEnd: - def test_on_tool_end_no_metadata(self, handler, mocker): + def test_on_tool_end_no_metadata(self, handler: DatasetIndexToolCallbackHandler, mocker: MockerFixture): mock_db = mocker.patch("core.callback_handler.index_tool_callback_handler.db") document = mocker.Mock() @@ -85,7 +86,9 @@ class TestOnToolEnd: mock_db.session.commit.assert_not_called() - def test_on_tool_end_dataset_document_not_found(self, handler, mocker): + def test_on_tool_end_dataset_document_not_found( + self, handler: DatasetIndexToolCallbackHandler, mocker: MockerFixture + ): mock_db = mocker.patch("core.callback_handler.index_tool_callback_handler.db") mock_db.session.scalar.return_value = None @@ -96,7 +99,9 @@ class TestOnToolEnd: mock_db.session.scalar.assert_called_once() - def test_on_tool_end_parent_child_index_with_child(self, handler, mocker): + def test_on_tool_end_parent_child_index_with_child( + self, handler: DatasetIndexToolCallbackHandler, mocker: MockerFixture + ): mock_db = mocker.patch("core.callback_handler.index_tool_callback_handler.db") mock_dataset_doc = mocker.Mock() @@ -119,7 +124,7 @@ class TestOnToolEnd: mock_db.session.execute.assert_called_once() mock_db.session.commit.assert_called_once() - def test_on_tool_end_non_parent_child_index(self, handler, mocker): + def test_on_tool_end_non_parent_child_index(self, handler: DatasetIndexToolCallbackHandler, mocker: MockerFixture): mock_db = mocker.patch("core.callback_handler.index_tool_callback_handler.db") mock_dataset_doc = mocker.Mock() @@ -139,12 +144,12 @@ class TestOnToolEnd: mock_db.session.execute.assert_called_once() mock_db.session.commit.assert_called_once() - def test_on_tool_end_empty_documents(self, handler): + def test_on_tool_end_empty_documents(self, handler: DatasetIndexToolCallbackHandler): handler.on_tool_end([]) class TestReturnRetrieverResourceInfo: - def test_publish_called(self, handler, mock_queue_manager, mocker): + def test_publish_called(self, handler: DatasetIndexToolCallbackHandler, mock_queue_manager, mocker: MockerFixture): mock_event = mocker.patch("core.callback_handler.index_tool_callback_handler.QueueRetrieverResourcesEvent") resources = [mocker.Mock()] diff --git a/api/tests/unit_tests/core/callback_handler/test_workflow_tool_callback_handler.py b/api/tests/unit_tests/core/callback_handler/test_workflow_tool_callback_handler.py index 131fb006ed..5b53c5965c 100644 --- a/api/tests/unit_tests/core/callback_handler/test_workflow_tool_callback_handler.py +++ b/api/tests/unit_tests/core/callback_handler/test_workflow_tool_callback_handler.py @@ -1,6 +1,7 @@ from unittest.mock import MagicMock, call import pytest +from pytest_mock import MockerFixture from core.callback_handler.workflow_tool_callback_handler import ( DifyWorkflowCallbackHandler, @@ -26,13 +27,13 @@ def handler(): @pytest.fixture -def mock_print_text(mocker): +def mock_print_text(mocker: MockerFixture): """Mock print_text to avoid real stdout printing.""" return mocker.patch("core.callback_handler.workflow_tool_callback_handler.print_text") class TestDifyWorkflowCallbackHandler: - def test_on_tool_execution_single_output_success(self, handler, mock_print_text): + def test_on_tool_execution_single_output_success(self, handler: DifyWorkflowCallbackHandler, mock_print_text): # Arrange tool_name = "test_tool" tool_inputs = {"a": 1} @@ -62,7 +63,7 @@ class TestDifyWorkflowCallbackHandler: ] ) - def test_on_tool_execution_multiple_outputs(self, handler, mock_print_text): + def test_on_tool_execution_multiple_outputs(self, handler: DifyWorkflowCallbackHandler, mock_print_text): # Arrange tool_name = "multi_tool" outputs = [ @@ -83,7 +84,7 @@ class TestDifyWorkflowCallbackHandler: assert results == outputs assert mock_print_text.call_count == 4 * len(outputs) - def test_on_tool_execution_empty_iterable(self, handler, mock_print_text): + def test_on_tool_execution_empty_iterable(self, handler: DifyWorkflowCallbackHandler, mock_print_text): # Arrange tool_name = "empty_tool" @@ -108,7 +109,9 @@ class TestDifyWorkflowCallbackHandler: ("not_iterable", AttributeError), ], ) - def test_on_tool_execution_invalid_outputs_type(self, handler, invalid_outputs, expected_exception): + def test_on_tool_execution_invalid_outputs_type( + self, handler: DifyWorkflowCallbackHandler, invalid_outputs, expected_exception + ): # Arrange tool_name = "invalid_tool" @@ -122,7 +125,7 @@ class TestDifyWorkflowCallbackHandler: ) ) - def test_on_tool_execution_long_json_truncation(self, handler, mock_print_text): + def test_on_tool_execution_long_json_truncation(self, handler: DifyWorkflowCallbackHandler, mock_print_text): # Arrange tool_name = "long_json_tool" long_json = "x" * 1500 @@ -144,7 +147,7 @@ class TestDifyWorkflowCallbackHandler: color="blue", ) - def test_on_tool_execution_model_dump_json_exception(self, handler, mock_print_text): + def test_on_tool_execution_model_dump_json_exception(self, handler: DifyWorkflowCallbackHandler, mock_print_text): # Arrange tool_name = "exception_tool" bad_message = MagicMock() @@ -163,7 +166,9 @@ class TestDifyWorkflowCallbackHandler: # Ensure first two prints happened before failure assert mock_print_text.call_count >= 2 - def test_on_tool_execution_none_message_id_and_trace_manager(self, handler, mock_print_text): + def test_on_tool_execution_none_message_id_and_trace_manager( + self, handler: DifyWorkflowCallbackHandler, mock_print_text + ): # Arrange tool_name = "optional_params_tool" message = DummyToolInvokeMessage('{"data": "ok"}') diff --git a/api/tests/unit_tests/core/datasource/test_datasource_manager.py b/api/tests/unit_tests/core/datasource/test_datasource_manager.py index deeac49bbc..8842d678c7 100644 --- a/api/tests/unit_tests/core/datasource/test_datasource_manager.py +++ b/api/tests/unit_tests/core/datasource/test_datasource_manager.py @@ -2,6 +2,7 @@ import types from collections.abc import Generator import pytest +from pytest_mock import MockerFixture from contexts.wrapper import RecyclableContextVar from core.datasource.datasource_manager import DatasourceManager @@ -37,7 +38,7 @@ def _invalidate_recyclable_contextvars() -> None: RecyclableContextVar.increment_thread_recycles() -def test_get_icon_url_calls_runtime(mocker): +def test_get_icon_url_calls_runtime(mocker: MockerFixture): fake_runtime = mocker.Mock() fake_runtime.get_icon_url.return_value = "https://icon" mocker.patch.object(DatasourceManager, "get_datasource_runtime", return_value=fake_runtime) @@ -52,7 +53,7 @@ def test_get_icon_url_calls_runtime(mocker): DatasourceManager.get_datasource_runtime.assert_called_once() -def test_get_datasource_runtime_delegates_to_provider_controller(mocker): +def test_get_datasource_runtime_delegates_to_provider_controller(mocker: MockerFixture): provider_controller = mocker.Mock() provider_controller.get_datasource.return_value = object() mocker.patch.object(DatasourceManager, "get_datasource_plugin_provider", return_value=provider_controller) @@ -114,7 +115,7 @@ def test_get_datasource_plugin_provider_creates_controller_and_caches(mocker, da assert ctrl_cls.call_count == 1 -def test_get_datasource_plugin_provider_raises_when_provider_entity_missing(mocker): +def test_get_datasource_plugin_provider_raises_when_provider_entity_missing(mocker: MockerFixture): _invalidate_recyclable_contextvars() mocker.patch( "core.datasource.datasource_manager.PluginDatasourceManager.fetch_datasource_provider", @@ -129,7 +130,7 @@ def test_get_datasource_plugin_provider_raises_when_provider_entity_missing(mock ) -def test_get_datasource_plugin_provider_raises_for_unsupported_type(mocker): +def test_get_datasource_plugin_provider_raises_for_unsupported_type(mocker: MockerFixture): _invalidate_recyclable_contextvars() provider_entity = types.SimpleNamespace(declaration=object(), plugin_id="plugin", plugin_unique_identifier="uniq") mocker.patch( @@ -145,7 +146,7 @@ def test_get_datasource_plugin_provider_raises_for_unsupported_type(mocker): ) -def test_get_datasource_plugin_provider_raises_when_controller_none(mocker): +def test_get_datasource_plugin_provider_raises_when_controller_none(mocker: MockerFixture): _invalidate_recyclable_contextvars() provider_entity = types.SimpleNamespace(declaration=object(), plugin_id="plugin", plugin_unique_identifier="uniq") mocker.patch( @@ -165,7 +166,7 @@ def test_get_datasource_plugin_provider_raises_when_controller_none(mocker): ) -def test_stream_online_results_yields_messages_online_document(mocker): +def test_stream_online_results_yields_messages_online_document(mocker: MockerFixture): # stub runtime to yield a text message def _doc_messages(**_): yield from _gen_messages_text_only("hello") @@ -195,7 +196,7 @@ def test_stream_online_results_yields_messages_online_document(mocker): assert msgs[0].message.text == "hello" -def test_stream_online_results_sets_credentials_and_returns_empty_dict_online_document(mocker): +def test_stream_online_results_sets_credentials_and_returns_empty_dict_online_document(mocker: MockerFixture): class _Runtime: def __init__(self) -> None: self.runtime = types.SimpleNamespace(credentials=None) @@ -229,7 +230,7 @@ def test_stream_online_results_sets_credentials_and_returns_empty_dict_online_do assert final_value == {} -def test_stream_online_results_raises_when_missing_params(mocker): +def test_stream_online_results_raises_when_missing_params(mocker: MockerFixture): class _Runtime: def __init__(self) -> None: self.runtime = types.SimpleNamespace(credentials=None) @@ -279,7 +280,7 @@ def test_stream_online_results_raises_when_missing_params(mocker): ) -def test_stream_online_results_yields_messages_and_returns_empty_dict_online_drive(mocker): +def test_stream_online_results_yields_messages_and_returns_empty_dict_online_drive(mocker: MockerFixture): class _Runtime: def __init__(self) -> None: self.runtime = types.SimpleNamespace(credentials=None) @@ -313,7 +314,7 @@ def test_stream_online_results_yields_messages_and_returns_empty_dict_online_dri assert final_value == {} -def test_stream_online_results_raises_for_unsupported_stream_type(mocker): +def test_stream_online_results_raises_for_unsupported_stream_type(mocker: MockerFixture): mocker.patch.object(DatasourceManager, "get_datasource_runtime", return_value=mocker.Mock()) mocker.patch( "core.datasource.datasource_manager.DatasourceProviderService.get_datasource_credentials", @@ -337,7 +338,7 @@ def test_stream_online_results_raises_for_unsupported_stream_type(mocker): ) -def test_stream_node_events_emits_events_online_document(mocker): +def test_stream_node_events_emits_events_online_document(mocker: MockerFixture): # make manager's low-level stream produce TEXT only mocker.patch.object( DatasourceManager, @@ -370,7 +371,7 @@ def test_stream_node_events_emits_events_online_document(mocker): assert events[-1].node_run_result.status == WorkflowNodeExecutionStatus.SUCCEEDED -def test_stream_node_events_builds_file_and_variables_from_messages(mocker): +def test_stream_node_events_builds_file_and_variables_from_messages(mocker: MockerFixture): mocker.patch.object(DatasourceManager, "stream_online_results", return_value=_gen_messages_text_only("ignored")) def _transformed(**_kwargs): @@ -478,7 +479,7 @@ def test_stream_node_events_builds_file_and_variables_from_messages(mocker): assert events[-1].node_run_result.outputs["x"] == 1 -def test_stream_node_events_raises_when_toolfile_missing(mocker): +def test_stream_node_events_raises_when_toolfile_missing(mocker: MockerFixture): mocker.patch.object(DatasourceManager, "stream_online_results", return_value=_gen_messages_text_only("ignored")) def _transformed(**_kwargs): @@ -526,7 +527,7 @@ def test_stream_node_events_raises_when_toolfile_missing(mocker): ) -def test_stream_node_events_online_drive_sets_variable_pool_file_and_outputs(mocker): +def test_stream_node_events_online_drive_sets_variable_pool_file_and_outputs(mocker: MockerFixture): mocker.patch.object(DatasourceManager, "stream_online_results", return_value=_gen_messages_text_only("ignored")) file_in = File( @@ -580,7 +581,7 @@ def test_stream_node_events_online_drive_sets_variable_pool_file_and_outputs(moc assert completed.node_run_result.outputs["datasource_type"] == DatasourceProviderType.ONLINE_DRIVE -def test_stream_node_events_skips_file_build_for_non_online_types(mocker): +def test_stream_node_events_skips_file_build_for_non_online_types(mocker: MockerFixture): mocker.patch.object(DatasourceManager, "stream_online_results", return_value=_gen_messages_text_only("ignored")) def _transformed(**_kwargs): @@ -620,7 +621,7 @@ def test_stream_node_events_skips_file_build_for_non_online_types(mocker): assert events[-1].node_run_result.outputs["file"] is None -def test_get_upload_file_by_id_builds_file(mocker): +def test_get_upload_file_by_id_builds_file(mocker: MockerFixture): # fake UploadFile row fake_row = types.SimpleNamespace( id="fid", @@ -654,7 +655,7 @@ def test_get_upload_file_by_id_builds_file(mocker): assert f.storage_key == "k" -def test_get_upload_file_by_id_raises_when_missing(mocker): +def test_get_upload_file_by_id_raises_when_missing(mocker: MockerFixture): class _S: def __enter__(self): return self diff --git a/api/tests/unit_tests/core/extension/test_api_based_extension_requestor.py b/api/tests/unit_tests/core/extension/test_api_based_extension_requestor.py index 399b531205..9c1cbe82a0 100644 --- a/api/tests/unit_tests/core/extension/test_api_based_extension_requestor.py +++ b/api/tests/unit_tests/core/extension/test_api_based_extension_requestor.py @@ -1,11 +1,12 @@ import httpx import pytest +from pytest_mock import MockerFixture from core.extension.api_based_extension_requestor import APIBasedExtensionRequestor from models.api_based_extension import APIBasedExtensionPoint -def test_request_success(mocker): +def test_request_success(mocker: MockerFixture): # Mock httpx.Client and its context manager mock_client = mocker.MagicMock() mock_client_instance = mock_client.__enter__.return_value @@ -28,7 +29,7 @@ def test_request_success(mocker): ) -def test_request_with_ssrf_proxy(mocker): +def test_request_with_ssrf_proxy(mocker: MockerFixture): # Mock dify_config mocker.patch("configs.dify_config.SSRF_PROXY_HTTP_URL", "http://proxy:8080") mocker.patch("configs.dify_config.SSRF_PROXY_HTTPS_URL", "https://proxy:8081") @@ -59,7 +60,7 @@ def test_request_with_ssrf_proxy(mocker): assert mock_transport.call_count == 2 -def test_request_with_only_one_proxy_config(mocker): +def test_request_with_only_one_proxy_config(mocker: MockerFixture): # Mock dify_config with only one proxy mocker.patch("configs.dify_config.SSRF_PROXY_HTTP_URL", "http://proxy:8080") mocker.patch("configs.dify_config.SSRF_PROXY_HTTPS_URL", None) @@ -84,7 +85,7 @@ def test_request_with_only_one_proxy_config(mocker): assert kwargs.get("mounts") is None -def test_request_timeout(mocker): +def test_request_timeout(mocker: MockerFixture): mock_client = mocker.MagicMock() mock_client_instance = mock_client.__enter__.return_value mocker.patch("httpx.Client", return_value=mock_client) @@ -95,7 +96,7 @@ def test_request_timeout(mocker): requestor.request(APIBasedExtensionPoint.PING, {}) -def test_request_connection_error(mocker): +def test_request_connection_error(mocker: MockerFixture): mock_client = mocker.MagicMock() mock_client_instance = mock_client.__enter__.return_value mocker.patch("httpx.Client", return_value=mock_client) @@ -106,7 +107,7 @@ def test_request_connection_error(mocker): requestor.request(APIBasedExtensionPoint.PING, {}) -def test_request_error_status_code(mocker): +def test_request_error_status_code(mocker: MockerFixture): mock_client = mocker.MagicMock() mock_client_instance = mock_client.__enter__.return_value mocker.patch("httpx.Client", return_value=mock_client) @@ -121,7 +122,7 @@ def test_request_error_status_code(mocker): requestor.request(APIBasedExtensionPoint.PING, {}) -def test_request_error_status_code_long_content(mocker): +def test_request_error_status_code_long_content(mocker: MockerFixture): mock_client = mocker.MagicMock() mock_client_instance = mock_client.__enter__.return_value mocker.patch("httpx.Client", return_value=mock_client) diff --git a/api/tests/unit_tests/core/helper/test_creators.py b/api/tests/unit_tests/core/helper/test_creators.py index df67d3f513..8750f6d907 100644 --- a/api/tests/unit_tests/core/helper/test_creators.py +++ b/api/tests/unit_tests/core/helper/test_creators.py @@ -8,7 +8,7 @@ from yarl import URL @pytest.fixture(autouse=True) -def _patch_creators_url(monkeypatch): +def _patch_creators_url(monkeypatch: pytest.MonkeyPatch): """Patch the module-level creators_platform_api_url for all tests.""" monkeypatch.setattr( "core.helper.creators.creators_platform_api_url", diff --git a/api/tests/unit_tests/core/ops/test_base_trace_instance.py b/api/tests/unit_tests/core/ops/test_base_trace_instance.py index ac65d13454..15a2af17ca 100644 --- a/api/tests/unit_tests/core/ops/test_base_trace_instance.py +++ b/api/tests/unit_tests/core/ops/test_base_trace_instance.py @@ -18,7 +18,7 @@ class ConcreteTraceInstance(BaseTraceInstance): @pytest.fixture -def mock_db_session(monkeypatch): +def mock_db_session(monkeypatch: pytest.MonkeyPatch): mock_session = MagicMock(spec=Session) mock_session.__enter__.return_value = mock_session mock_session.__exit__.return_value = None diff --git a/api/tests/unit_tests/core/ops/test_ops_trace_manager.py b/api/tests/unit_tests/core/ops/test_ops_trace_manager.py index beb99f92cd..33a3293682 100644 --- a/api/tests/unit_tests/core/ops/test_ops_trace_manager.py +++ b/api/tests/unit_tests/core/ops/test_ops_trace_manager.py @@ -203,7 +203,7 @@ class DummySessionContext: @pytest.fixture(autouse=True) -def patch_provider_map(monkeypatch): +def patch_provider_map(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.ops.ops_trace_manager.provider_config_map", FakeProviderMap({"dummy": FAKE_PROVIDER_ENTRY}) ) @@ -212,7 +212,7 @@ def patch_provider_map(monkeypatch): @pytest.fixture(autouse=True) -def patch_timer_and_current_app(monkeypatch): +def patch_timer_and_current_app(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr("core.ops.ops_trace_manager.threading.Timer", DummyTimer) monkeypatch.setattr("core.ops.ops_trace_manager.trace_manager_queue", queue.Queue()) monkeypatch.setattr("core.ops.ops_trace_manager.trace_manager_timer", None) @@ -227,12 +227,12 @@ def patch_timer_and_current_app(monkeypatch): @pytest.fixture(autouse=True) -def patch_sqlalchemy_session(monkeypatch): +def patch_sqlalchemy_session(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr("core.ops.ops_trace_manager.Session", DummySessionContext) @pytest.fixture -def encryption_mocks(monkeypatch): +def encryption_mocks(monkeypatch: pytest.MonkeyPatch): encrypt_mock = MagicMock(side_effect=lambda tenant, value: f"enc-{value}") batch_decrypt_mock = MagicMock(side_effect=lambda tenant, values: [f"dec-{value}" for value in values]) obfuscate_mock = MagicMock(side_effect=lambda value: f"ob-{value}") @@ -243,7 +243,7 @@ def encryption_mocks(monkeypatch): @pytest.fixture -def mock_db(monkeypatch): +def mock_db(monkeypatch: pytest.MonkeyPatch): session = MagicMock() session.scalars.return_value.all.return_value = ["chat"] db_mock = MagicMock() @@ -254,7 +254,7 @@ def mock_db(monkeypatch): @pytest.fixture -def workflow_repo_fixture(monkeypatch): +def workflow_repo_fixture(monkeypatch: pytest.MonkeyPatch): repo = MagicMock() repo.get_workflow_run_by_id_without_tenant.return_value = make_workflow_run() monkeypatch.setattr(TraceTask, "_get_workflow_run_repo", classmethod(lambda cls: repo)) @@ -340,13 +340,13 @@ def test_get_ops_trace_instance_handles_none_app(mock_db): assert OpsTraceManager.get_ops_trace_instance("app-id") is None -def test_get_ops_trace_instance_returns_none_when_disabled(mock_db, monkeypatch): +def test_get_ops_trace_instance_returns_none_when_disabled(mock_db, monkeypatch: pytest.MonkeyPatch): app = SimpleNamespace(id="app-id", tracing=json.dumps({"enabled": False})) mock_db.get.return_value = app assert OpsTraceManager.get_ops_trace_instance("app-id") is None -def test_get_ops_trace_instance_invalid_provider(mock_db, monkeypatch): +def test_get_ops_trace_instance_invalid_provider(mock_db, monkeypatch: pytest.MonkeyPatch): app = SimpleNamespace(id="app-id", tracing=json.dumps({"enabled": True, "tracing_provider": "missing"})) mock_db.get.return_value = app monkeypatch.setattr("core.ops.ops_trace_manager.provider_config_map", FakeProviderMap({})) @@ -388,7 +388,7 @@ def test_get_app_config_through_message_id_app_model_config(mock_db): assert result.id == "cfg" -def test_update_app_tracing_config_invalid_provider(mock_db, monkeypatch): +def test_update_app_tracing_config_invalid_provider(mock_db, monkeypatch: pytest.MonkeyPatch): mock_db.get.return_value = None with pytest.raises(ValueError, match="Invalid tracing provider"): OpsTraceManager.update_app_tracing_config("app", True, "bad") @@ -421,7 +421,7 @@ def test_get_app_tracing_config_returns_payload(mock_db): assert OpsTraceManager.get_app_tracing_config("app-id", mock_db) == payload -def test_check_and_project_helpers(monkeypatch): +def test_check_and_project_helpers(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.ops.ops_trace_manager.provider_config_map", FakeProviderMap( @@ -449,7 +449,7 @@ def test_check_and_project_helpers(monkeypatch): assert OpsTraceManager.get_trace_config_project_url({}, "dummy") == "url" -def test_trace_task_conversation_and_extract(monkeypatch): +def test_trace_task_conversation_and_extract(monkeypatch: pytest.MonkeyPatch): task = TraceTask(trace_type=TraceTaskName.CONVERSATION_TRACE, message_id="msg") assert task.conversation_trace(foo="bar") == {"foo": "bar"} assert task._extract_streaming_metrics(make_message_data(message_metadata="not json")) == {} @@ -525,7 +525,7 @@ def test_extract_streaming_metrics_invalid_json(): assert task._extract_streaming_metrics(fake_message) == {} -def test_trace_queue_manager_add_and_collect(monkeypatch): +def test_trace_queue_manager_add_and_collect(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.ops.ops_trace_manager.OpsTraceManager.get_ops_trace_instance", classmethod(lambda cls, aid: True) ) @@ -536,7 +536,7 @@ def test_trace_queue_manager_add_and_collect(monkeypatch): assert tasks == [task] -def test_trace_queue_manager_run_invokes_send(monkeypatch): +def test_trace_queue_manager_run_invokes_send(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.ops.ops_trace_manager.OpsTraceManager.get_ops_trace_instance", classmethod(lambda cls, aid: True) ) @@ -556,7 +556,7 @@ def test_trace_queue_manager_run_invokes_send(monkeypatch): assert called["tasks"] == [task] -def test_trace_queue_manager_send_to_celery(monkeypatch): +def test_trace_queue_manager_send_to_celery(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.ops.ops_trace_manager.OpsTraceManager.get_ops_trace_instance", classmethod(lambda cls, aid: True) ) diff --git a/api/tests/unit_tests/core/ops/test_trace_queue_manager.py b/api/tests/unit_tests/core/ops/test_trace_queue_manager.py index a4903054e0..13cf01651e 100644 --- a/api/tests/unit_tests/core/ops/test_trace_queue_manager.py +++ b/api/tests/unit_tests/core/ops/test_trace_queue_manager.py @@ -19,7 +19,7 @@ import pytest @pytest.fixture -def trace_queue_manager_and_task(monkeypatch): +def trace_queue_manager_and_task(monkeypatch: pytest.MonkeyPatch): """Fixture to provide TraceQueueManager and TraceTask with delayed imports.""" module_name = "core.ops.ops_trace_manager" if module_name not in sys.modules: diff --git a/api/tests/unit_tests/core/plugin/impl/test_agent_client.py b/api/tests/unit_tests/core/plugin/impl/test_agent_client.py index 1537ffacf5..d8843f0eeb 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_agent_client.py +++ b/api/tests/unit_tests/core/plugin/impl/test_agent_client.py @@ -1,5 +1,7 @@ from types import SimpleNamespace +from pytest_mock import MockerFixture + from core.plugin.entities.request import PluginInvokeContext from core.plugin.impl.agent import PluginAgentClient @@ -15,7 +17,7 @@ def _agent_provider(name: str = "agent") -> SimpleNamespace: class TestPluginAgentClient: - def test_fetch_agent_strategy_providers(self, mocker): + def test_fetch_agent_strategy_providers(self, mocker: MockerFixture): client = PluginAgentClient() provider = _agent_provider("remote") @@ -43,7 +45,7 @@ class TestPluginAgentClient: assert result[0].declaration.identity.name == "org/plugin/remote" assert result[0].declaration.strategies[0].identity.provider == "org/plugin/remote" - def test_fetch_agent_strategy_provider(self, mocker): + def test_fetch_agent_strategy_provider(self, mocker: MockerFixture): client = PluginAgentClient() provider = _agent_provider("provider") @@ -63,7 +65,7 @@ class TestPluginAgentClient: assert result.declaration.identity.name == "org/plugin/provider" assert result.declaration.strategies[0].identity.provider == "org/plugin/provider" - def test_invoke_merges_chunks_and_passes_context(self, mocker): + def test_invoke_merges_chunks_and_passes_context(self, mocker: MockerFixture): client = PluginAgentClient() stream_mock = mocker.patch.object( client, "_request_with_plugin_daemon_response_stream", return_value=iter(["raw"]) diff --git a/api/tests/unit_tests/core/plugin/impl/test_asset_manager.py b/api/tests/unit_tests/core/plugin/impl/test_asset_manager.py index 5f564062d5..c2cce5d691 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_asset_manager.py +++ b/api/tests/unit_tests/core/plugin/impl/test_asset_manager.py @@ -1,12 +1,13 @@ from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture from core.plugin.impl.asset import PluginAssetManager class TestPluginAssetManager: - def test_fetch_asset_success(self, mocker): + def test_fetch_asset_success(self, mocker: MockerFixture): manager = PluginAssetManager() response = MagicMock(status_code=200, content=b"asset-bytes") request_mock = mocker.patch.object(manager, "_request", return_value=response) @@ -16,14 +17,14 @@ class TestPluginAssetManager: assert result == b"asset-bytes" request_mock.assert_called_once_with(method="GET", path="plugin/tenant-1/asset/asset-1") - def test_fetch_asset_not_found_raises(self, mocker): + def test_fetch_asset_not_found_raises(self, mocker: MockerFixture): manager = PluginAssetManager() mocker.patch.object(manager, "_request", return_value=MagicMock(status_code=404, content=b"")) with pytest.raises(ValueError, match="can not found asset asset-1"): manager.fetch_asset("tenant-1", "asset-1") - def test_extract_asset_success(self, mocker): + def test_extract_asset_success(self, mocker: MockerFixture): manager = PluginAssetManager() response = MagicMock(status_code=200, content=b"file-content") request_mock = mocker.patch.object(manager, "_request", return_value=response) @@ -37,7 +38,7 @@ class TestPluginAssetManager: params={"plugin_unique_identifier": "org/plugin:1", "file_path": "README.md"}, ) - def test_extract_asset_not_found_raises(self, mocker): + def test_extract_asset_not_found_raises(self, mocker: MockerFixture): manager = PluginAssetManager() mocker.patch.object(manager, "_request", return_value=MagicMock(status_code=404, content=b"")) diff --git a/api/tests/unit_tests/core/plugin/impl/test_base_client_impl.py b/api/tests/unit_tests/core/plugin/impl/test_base_client_impl.py index 23894bd417..b154f056ca 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_base_client_impl.py +++ b/api/tests/unit_tests/core/plugin/impl/test_base_client_impl.py @@ -1,6 +1,7 @@ import json import pytest +from pytest_mock import MockerFixture from core.plugin.endpoint.exc import EndpointSetupFailedError from core.plugin.entities.plugin_daemon import PluginDaemonInnerError @@ -39,7 +40,7 @@ class _StreamContext: class TestBasePluginClientImpl: - def test_inject_trace_headers(self, mocker): + def test_inject_trace_headers(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch("core.plugin.impl.base.dify_config.ENABLE_OTEL", True) trace_header = "00-abc-xyz-01" @@ -54,7 +55,7 @@ class TestBasePluginClientImpl: client._inject_trace_headers(headers_with_existing) assert headers_with_existing["TraceParent"] == "exists" - def test_stream_request_handles_data_lines_and_dict_payload(self, mocker): + def test_stream_request_handles_data_lines_and_dict_payload(self, mocker: MockerFixture): client = BasePluginClient() stream_mock = mocker.patch( "httpx.Client.stream", @@ -66,14 +67,14 @@ class TestBasePluginClientImpl: assert result == ["hello", "world"] assert stream_mock.call_args.kwargs["data"] == {"k": "v"} - def test_request_with_plugin_daemon_response_handles_request_exception(self, mocker): + def test_request_with_plugin_daemon_response_handles_request_exception(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch.object(client, "_request", side_effect=RuntimeError("boom")) with pytest.raises(ValueError, match="Failed to request plugin daemon"): client._request_with_plugin_daemon_response("GET", "plugin/tenant/path", bool) - def test_request_with_plugin_daemon_response_applies_transformer(self, mocker): + def test_request_with_plugin_daemon_response_applies_transformer(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch.object(client, "_request", return_value=_ResponseStub({"code": 0, "message": "", "data": True})) @@ -88,14 +89,14 @@ class TestBasePluginClientImpl: assert result is True assert transformed == {"code": 0, "message": "", "data": True} - def test_request_with_plugin_daemon_response_stream_malformed_json_error(self, mocker): + def test_request_with_plugin_daemon_response_stream_malformed_json_error(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch.object(client, "_stream_request", return_value=iter(['{"error":"bad-line"}'])) with pytest.raises(ValueError, match="bad-line"): list(client._request_with_plugin_daemon_response_stream("GET", "p", bool)) - def test_request_with_plugin_daemon_response_stream_plugin_daemon_inner_error(self, mocker): + def test_request_with_plugin_daemon_response_stream_plugin_daemon_inner_error(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch.object( client, "_stream_request", return_value=iter(['{"code":-500,"message":"not-json","data":null}']) @@ -105,14 +106,14 @@ class TestBasePluginClientImpl: list(client._request_with_plugin_daemon_response_stream("GET", "p", bool)) assert exc_info.value.message == "not-json" - def test_request_with_plugin_daemon_response_stream_plugin_daemon_error(self, mocker): + def test_request_with_plugin_daemon_response_stream_plugin_daemon_error(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch.object(client, "_stream_request", return_value=iter(['{"code":-1,"message":"err","data":null}'])) with pytest.raises(ValueError, match="plugin daemon: err, code: -1"): list(client._request_with_plugin_daemon_response_stream("GET", "p", bool)) - def test_request_with_plugin_daemon_response_stream_empty_data_error(self, mocker): + def test_request_with_plugin_daemon_response_stream_empty_data_error(self, mocker: MockerFixture): client = BasePluginClient() mocker.patch.object(client, "_stream_request", return_value=iter(['{"code":0,"message":"","data":null}'])) diff --git a/api/tests/unit_tests/core/plugin/impl/test_datasource_manager.py b/api/tests/unit_tests/core/plugin/impl/test_datasource_manager.py index 4c5987d759..94723dcfe2 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_datasource_manager.py +++ b/api/tests/unit_tests/core/plugin/impl/test_datasource_manager.py @@ -1,5 +1,7 @@ from types import SimpleNamespace +from pytest_mock import MockerFixture + from core.datasource.entities.datasource_entities import ( GetOnlineDocumentPageContentRequest, OnlineDriveBrowseFilesRequest, @@ -19,7 +21,7 @@ def _datasource_provider(name: str = "provider") -> SimpleNamespace: class TestPluginDatasourceManager: - def test_fetch_datasource_providers(self, mocker): + def test_fetch_datasource_providers(self, mocker: MockerFixture): manager = PluginDatasourceManager() provider = _datasource_provider("remote") repack = mocker.patch("core.plugin.impl.datasource.ToolTransformService.repack_provider") @@ -52,7 +54,7 @@ class TestPluginDatasourceManager: assert result[1].declaration.datasources[0].identity.provider == "org/plugin/remote" repack.assert_called_once_with(tenant_id="tenant-1", provider=provider) - def test_fetch_installed_datasource_providers(self, mocker): + def test_fetch_installed_datasource_providers(self, mocker: MockerFixture): manager = PluginDatasourceManager() provider = _datasource_provider("remote") repack = mocker.patch("core.plugin.impl.datasource.ToolTransformService.repack_provider") @@ -83,7 +85,7 @@ class TestPluginDatasourceManager: assert result[0].declaration.datasources[0].identity.provider == "org/plugin/remote" repack.assert_called_once_with(tenant_id="tenant-1", provider=provider) - def test_fetch_datasource_provider_local_and_remote(self, mocker): + def test_fetch_datasource_provider_local_and_remote(self, mocker: MockerFixture): manager = PluginDatasourceManager() local = manager.fetch_datasource_provider("tenant-1", "langgenius/file/file") @@ -113,7 +115,7 @@ class TestPluginDatasourceManager: assert result.declaration.identity.name == "org/plugin/provider" assert result.declaration.datasources[0].identity.provider == "org/plugin/provider" - def test_get_website_crawl_streaming(self, mocker): + def test_get_website_crawl_streaming(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter(["crawl"]) @@ -132,7 +134,7 @@ class TestPluginDatasourceManager: assert stream_mock.call_count == 1 - def test_get_online_document_pages_streaming(self, mocker): + def test_get_online_document_pages_streaming(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter(["pages"]) @@ -151,7 +153,7 @@ class TestPluginDatasourceManager: assert stream_mock.call_count == 1 - def test_get_online_document_page_content_streaming(self, mocker): + def test_get_online_document_page_content_streaming(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter(["content"]) @@ -170,7 +172,7 @@ class TestPluginDatasourceManager: assert stream_mock.call_count == 1 - def test_online_drive_browse_files_streaming(self, mocker): + def test_online_drive_browse_files_streaming(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter(["browse"]) @@ -189,7 +191,7 @@ class TestPluginDatasourceManager: assert stream_mock.call_count == 1 - def test_online_drive_download_file_streaming(self, mocker): + def test_online_drive_download_file_streaming(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter(["download"]) @@ -208,14 +210,14 @@ class TestPluginDatasourceManager: assert stream_mock.call_count == 1 - def test_validate_provider_credentials_returns_true_when_stream_yields_result(self, mocker): + def test_validate_provider_credentials_returns_true_when_stream_yields_result(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter([SimpleNamespace(result=True)]) assert manager.validate_provider_credentials("tenant-1", "user-1", "provider", "org/plugin", {"k": "v"}) is True - def test_validate_provider_credentials_returns_false_when_stream_empty(self, mocker): + def test_validate_provider_credentials_returns_false_when_stream_empty(self, mocker: MockerFixture): manager = PluginDatasourceManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") stream_mock.return_value = iter([]) diff --git a/api/tests/unit_tests/core/plugin/impl/test_debugging_client.py b/api/tests/unit_tests/core/plugin/impl/test_debugging_client.py index c80785aee0..05959207b1 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_debugging_client.py +++ b/api/tests/unit_tests/core/plugin/impl/test_debugging_client.py @@ -1,10 +1,12 @@ from types import SimpleNamespace +from pytest_mock import MockerFixture + from core.plugin.impl.debugging import PluginDebuggingClient class TestPluginDebuggingClient: - def test_get_debugging_key(self, mocker): + def test_get_debugging_key(self, mocker: MockerFixture): client = PluginDebuggingClient() request_mock = mocker.patch.object( client, diff --git a/api/tests/unit_tests/core/plugin/impl/test_endpoint_client_impl.py b/api/tests/unit_tests/core/plugin/impl/test_endpoint_client_impl.py index 4cf657a050..7a24cc01d1 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_endpoint_client_impl.py +++ b/api/tests/unit_tests/core/plugin/impl/test_endpoint_client_impl.py @@ -1,11 +1,12 @@ import pytest +from pytest_mock import MockerFixture from core.plugin.impl.endpoint import PluginEndpointClient from core.plugin.impl.exc import PluginDaemonInternalServerError class TestPluginEndpointClientImpl: - def test_create_endpoint(self, mocker): + def test_create_endpoint(self, mocker: MockerFixture): client = PluginEndpointClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response", return_value=True) @@ -18,7 +19,7 @@ class TestPluginEndpointClientImpl: assert args[:3] == ("POST", "plugin/tenant-1/endpoint/setup", bool) assert kwargs["data"]["plugin_unique_identifier"] == "org/plugin:1" - def test_list_endpoints(self, mocker): + def test_list_endpoints(self, mocker: MockerFixture): client = PluginEndpointClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response", return_value=["endpoint"]) @@ -28,7 +29,7 @@ class TestPluginEndpointClientImpl: assert request_mock.call_args.args[1] == "plugin/tenant-1/endpoint/list" assert request_mock.call_args.kwargs["params"] == {"page": 2, "page_size": 20} - def test_list_endpoints_for_single_plugin(self, mocker): + def test_list_endpoints_for_single_plugin(self, mocker: MockerFixture): client = PluginEndpointClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response", return_value=["endpoint"]) @@ -38,7 +39,7 @@ class TestPluginEndpointClientImpl: assert request_mock.call_args.args[1] == "plugin/tenant-1/endpoint/list/plugin" assert request_mock.call_args.kwargs["params"] == {"plugin_id": "org/plugin", "page": 1, "page_size": 10} - def test_update_endpoint(self, mocker): + def test_update_endpoint(self, mocker: MockerFixture): client = PluginEndpointClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response", return_value=True) @@ -47,7 +48,7 @@ class TestPluginEndpointClientImpl: assert result is True assert request_mock.call_args.args[:3] == ("POST", "plugin/tenant-1/endpoint/update", bool) - def test_enable_and_disable_endpoint(self, mocker): + def test_enable_and_disable_endpoint(self, mocker: MockerFixture): client = PluginEndpointClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response", return_value=True) @@ -58,7 +59,7 @@ class TestPluginEndpointClientImpl: assert calls[0].args[1] == "plugin/tenant-1/endpoint/enable" assert calls[1].args[1] == "plugin/tenant-1/endpoint/disable" - def test_delete_endpoint_idempotent_and_re_raise(self, mocker): + def test_delete_endpoint_idempotent_and_re_raise(self, mocker: MockerFixture): client = PluginEndpointClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response") diff --git a/api/tests/unit_tests/core/plugin/impl/test_exc_impl.py b/api/tests/unit_tests/core/plugin/impl/test_exc_impl.py index 8c6f1c6b7f..d99a8c114f 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_exc_impl.py +++ b/api/tests/unit_tests/core/plugin/impl/test_exc_impl.py @@ -1,11 +1,13 @@ import json +from pytest_mock import MockerFixture + from core.plugin.impl import exc as exc_module from core.plugin.impl.exc import PluginDaemonError, PluginInvokeError class TestPluginImplExceptions: - def test_plugin_daemon_error_str_contains_request_id(self, mocker): + def test_plugin_daemon_error_str_contains_request_id(self, mocker: MockerFixture): mocker.patch("core.plugin.impl.exc.get_request_id", return_value="req-123") error = PluginDaemonError("bad") @@ -21,7 +23,7 @@ class TestPluginImplExceptions: assert "RateLimit" in friendly assert "too many" in friendly - def test_plugin_invoke_error_invalid_json_and_fallback(self, mocker): + def test_plugin_invoke_error_invalid_json_and_fallback(self, mocker: MockerFixture): err = PluginInvokeError("plain text") assert err._get_error_object() == {} @@ -32,7 +34,7 @@ class TestPluginImplExceptions: err2 = PluginInvokeError("plain text") assert err2.get_error_message() == "plain text" - def test_plugin_invoke_error_get_error_object_handles_adapter_exception(self, mocker): + def test_plugin_invoke_error_get_error_object_handles_adapter_exception(self, mocker: MockerFixture): adapter = mocker.patch.object(exc_module, "TypeAdapter") adapter.return_value.validate_json.side_effect = RuntimeError("invalid") diff --git a/api/tests/unit_tests/core/plugin/impl/test_model_client.py b/api/tests/unit_tests/core/plugin/impl/test_model_client.py index bcbebbb38b..6dc572310c 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_model_client.py +++ b/api/tests/unit_tests/core/plugin/impl/test_model_client.py @@ -4,13 +4,14 @@ import io from types import SimpleNamespace import pytest +from pytest_mock import MockerFixture from core.plugin.entities.plugin_daemon import PluginDaemonInnerError from core.plugin.impl.model import PluginModelClient class TestPluginModelClient: - def test_fetch_model_providers(self, mocker): + def test_fetch_model_providers(self, mocker: MockerFixture): client = PluginModelClient() request_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response", return_value=["provider-a"]) @@ -23,7 +24,7 @@ class TestPluginModelClient: ) assert request_mock.call_args.kwargs["params"] == {"page": 1, "page_size": 256} - def test_get_model_schema(self, mocker): + def test_get_model_schema(self, mocker: MockerFixture): client = PluginModelClient() schema = SimpleNamespace(name="schema") stream_mock = mocker.patch.object( @@ -45,7 +46,7 @@ class TestPluginModelClient: assert result is schema assert stream_mock.call_args.args[:2] == ("POST", "plugin/tenant-1/dispatch/model/schema") - def test_get_model_schema_empty_stream_returns_none(self, mocker): + def test_get_model_schema_empty_stream_returns_none(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -53,7 +54,7 @@ class TestPluginModelClient: assert result is None - def test_validate_provider_credentials(self, mocker): + def test_validate_provider_credentials(self, mocker: MockerFixture): client = PluginModelClient() stream_mock = mocker.patch.object( client, @@ -77,7 +78,7 @@ class TestPluginModelClient: "plugin/tenant-1/dispatch/model/validate_provider_credentials", ) - def test_validate_provider_credentials_without_dict_update(self, mocker): + def test_validate_provider_credentials_without_dict_update(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object( client, @@ -91,13 +92,13 @@ class TestPluginModelClient: assert result is False assert credentials == {"api_key": "same"} - def test_validate_provider_credentials_empty_returns_false(self, mocker): + def test_validate_provider_credentials_empty_returns_false(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) assert client.validate_provider_credentials("tenant-1", "user-1", "org/plugin:1", "provider-a", {}) is False - def test_validate_model_credentials(self, mocker): + def test_validate_model_credentials(self, mocker: MockerFixture): client = PluginModelClient() stream_mock = mocker.patch.object( client, @@ -123,7 +124,7 @@ class TestPluginModelClient: "plugin/tenant-1/dispatch/model/validate_model_credentials", ) - def test_validate_model_credentials_empty_returns_false(self, mocker): + def test_validate_model_credentials_empty_returns_false(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -132,7 +133,7 @@ class TestPluginModelClient: is False ) - def test_invoke_llm(self, mocker): + def test_invoke_llm(self, mocker: MockerFixture): client = PluginModelClient() stream_mock = mocker.patch.object( client, "_request_with_plugin_daemon_response_stream", return_value=iter(["chunk-1"]) @@ -160,7 +161,7 @@ class TestPluginModelClient: assert call_kwargs["data"]["data"]["stream"] is False assert call_kwargs["data"]["data"]["model_parameters"] == {"temperature": 0.1} - def test_invoke_llm_wraps_plugin_daemon_inner_error(self, mocker): + def test_invoke_llm_wraps_plugin_daemon_inner_error(self, mocker: MockerFixture): client = PluginModelClient() def _boom(): @@ -182,7 +183,7 @@ class TestPluginModelClient: ) ) - def test_get_llm_num_tokens(self, mocker): + def test_get_llm_num_tokens(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object( client, @@ -204,7 +205,7 @@ class TestPluginModelClient: assert result == 42 - def test_get_llm_num_tokens_empty_returns_zero(self, mocker): + def test_get_llm_num_tokens_empty_returns_zero(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -213,7 +214,7 @@ class TestPluginModelClient: == 0 ) - def test_invoke_text_embedding(self, mocker): + def test_invoke_text_embedding(self, mocker: MockerFixture): client = PluginModelClient() embedding_result = SimpleNamespace(data=[[0.1, 0.2]]) mocker.patch.object( @@ -233,7 +234,7 @@ class TestPluginModelClient: assert result is embedding_result - def test_invoke_text_embedding_empty_raises(self, mocker): + def test_invoke_text_embedding_empty_raises(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -242,7 +243,7 @@ class TestPluginModelClient: "tenant-1", "user-1", "org/plugin:1", "provider-a", "embedding-a", {}, ["hello"], "x" ) - def test_invoke_multimodal_embedding(self, mocker): + def test_invoke_multimodal_embedding(self, mocker: MockerFixture): client = PluginModelClient() embedding_result = SimpleNamespace(data=[[0.3, 0.4]]) mocker.patch.object( @@ -262,7 +263,7 @@ class TestPluginModelClient: assert result is embedding_result - def test_invoke_multimodal_embedding_empty_raises(self, mocker): + def test_invoke_multimodal_embedding_empty_raises(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -271,7 +272,7 @@ class TestPluginModelClient: "tenant-1", "user-1", "org/plugin:1", "provider-a", "embedding-a", {}, [{"type": "image"}], "x" ) - def test_get_text_embedding_num_tokens(self, mocker): + def test_get_text_embedding_num_tokens(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object( client, @@ -287,7 +288,7 @@ class TestPluginModelClient: 3, ] - def test_get_text_embedding_num_tokens_empty_returns_list(self, mocker): + def test_get_text_embedding_num_tokens_empty_returns_list(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -298,7 +299,7 @@ class TestPluginModelClient: == [] ) - def test_invoke_rerank(self, mocker): + def test_invoke_rerank(self, mocker: MockerFixture): client = PluginModelClient() rerank_result = SimpleNamespace(scores=[0.9]) mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([rerank_result])) @@ -318,14 +319,14 @@ class TestPluginModelClient: assert result is rerank_result - def test_invoke_rerank_empty_raises(self, mocker): + def test_invoke_rerank_empty_raises(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) with pytest.raises(ValueError, match="Failed to invoke rerank"): client.invoke_rerank("tenant-1", "user-1", "org/plugin:1", "provider-a", "rerank-a", {}, "q", ["doc-1"]) - def test_invoke_multimodal_rerank(self, mocker): + def test_invoke_multimodal_rerank(self, mocker: MockerFixture): client = PluginModelClient() rerank_result = SimpleNamespace(scores=[0.8]) mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([rerank_result])) @@ -345,7 +346,7 @@ class TestPluginModelClient: assert result is rerank_result - def test_invoke_multimodal_rerank_empty_raises(self, mocker): + def test_invoke_multimodal_rerank_empty_raises(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -361,7 +362,7 @@ class TestPluginModelClient: [{"type": "image"}], ) - def test_invoke_tts(self, mocker): + def test_invoke_tts(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object( client, @@ -384,7 +385,7 @@ class TestPluginModelClient: assert result == [b"hello", b"!"] - def test_invoke_tts_wraps_plugin_daemon_inner_error(self, mocker): + def test_invoke_tts_wraps_plugin_daemon_inner_error(self, mocker: MockerFixture): client = PluginModelClient() def _boom(): @@ -396,7 +397,7 @@ class TestPluginModelClient: with pytest.raises(ValueError, match="tts error-400"): list(client.invoke_tts("tenant-1", "user-1", "org/plugin:1", "provider-a", "tts-a", {}, "hello", "alloy")) - def test_get_tts_model_voices(self, mocker): + def test_get_tts_model_voices(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object( client, @@ -425,13 +426,13 @@ class TestPluginModelClient: assert result == [{"name": "Alloy", "value": "alloy"}, {"name": "Echo", "value": "echo"}] - def test_get_tts_model_voices_empty_returns_list(self, mocker): + def test_get_tts_model_voices_empty_returns_list(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) assert client.get_tts_model_voices("tenant-1", "user-1", "org/plugin:1", "provider-a", "tts-a", {}) == [] - def test_invoke_speech_to_text(self, mocker): + def test_invoke_speech_to_text(self, mocker: MockerFixture): client = PluginModelClient() stream_mock = mocker.patch.object( client, @@ -452,7 +453,7 @@ class TestPluginModelClient: assert result == "transcribed text" assert stream_mock.call_args.kwargs["data"]["data"]["file"] == "616263" - def test_invoke_speech_to_text_empty_raises(self, mocker): + def test_invoke_speech_to_text_empty_raises(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -461,7 +462,7 @@ class TestPluginModelClient: "tenant-1", "user-1", "org/plugin:1", "provider-a", "stt-a", {}, io.BytesIO(b"abc") ) - def test_invoke_moderation(self, mocker): + def test_invoke_moderation(self, mocker: MockerFixture): client = PluginModelClient() stream_mock = mocker.patch.object( client, @@ -482,7 +483,7 @@ class TestPluginModelClient: assert result is True assert stream_mock.call_args.kwargs["path"] == "plugin/tenant-1/dispatch/moderation/invoke" - def test_invoke_moderation_empty_raises(self, mocker): + def test_invoke_moderation_empty_raises(self, mocker: MockerFixture): client = PluginModelClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) diff --git a/api/tests/unit_tests/core/plugin/impl/test_oauth_handler.py b/api/tests/unit_tests/core/plugin/impl/test_oauth_handler.py index 6fb4c99432..f6c9b1c669 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_oauth_handler.py +++ b/api/tests/unit_tests/core/plugin/impl/test_oauth_handler.py @@ -2,6 +2,7 @@ from io import BytesIO from types import SimpleNamespace import pytest +from pytest_mock import MockerFixture from werkzeug import Request from core.plugin.impl.oauth import OAuthHandler @@ -25,7 +26,7 @@ def _build_request(body: bytes = b"payload") -> Request: class TestOAuthHandler: - def test_get_authorization_url(self, mocker): + def test_get_authorization_url(self, mocker: MockerFixture): handler = OAuthHandler() stream_mock = mocker.patch.object( handler, @@ -45,7 +46,7 @@ class TestOAuthHandler: assert response.authorization_url == "https://auth.example.com" assert stream_mock.call_count == 1 - def test_get_authorization_url_no_response_raises(self, mocker): + def test_get_authorization_url_no_response_raises(self, mocker: MockerFixture): handler = OAuthHandler() mocker.patch.object(handler, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -59,7 +60,7 @@ class TestOAuthHandler: system_credentials={}, ) - def test_get_credentials(self, mocker): + def test_get_credentials(self, mocker: MockerFixture): handler = OAuthHandler() captured_data = {} @@ -85,7 +86,7 @@ class TestOAuthHandler: assert "raw_http_request" in captured_data["data"] assert stream_mock.call_count == 1 - def test_get_credentials_no_response_raises(self, mocker): + def test_get_credentials_no_response_raises(self, mocker: MockerFixture): handler = OAuthHandler() mocker.patch.object(handler, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -100,7 +101,7 @@ class TestOAuthHandler: request=_build_request(), ) - def test_refresh_credentials(self, mocker): + def test_refresh_credentials(self, mocker: MockerFixture): handler = OAuthHandler() stream_mock = mocker.patch.object( handler, @@ -121,7 +122,7 @@ class TestOAuthHandler: assert response.credentials == {"token": "new"} assert stream_mock.call_count == 1 - def test_refresh_credentials_no_response_raises(self, mocker): + def test_refresh_credentials_no_response_raises(self, mocker: MockerFixture): handler = OAuthHandler() mocker.patch.object(handler, "_request_with_plugin_daemon_response_stream", return_value=iter([])) diff --git a/api/tests/unit_tests/core/plugin/impl/test_tool_manager.py b/api/tests/unit_tests/core/plugin/impl/test_tool_manager.py index 80cf46f9bb..3ae3cc18e4 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_tool_manager.py +++ b/api/tests/unit_tests/core/plugin/impl/test_tool_manager.py @@ -1,5 +1,7 @@ from types import SimpleNamespace +from pytest_mock import MockerFixture + from core.plugin.entities.plugin_daemon import CredentialType from core.plugin.impl.tool import PluginToolManager @@ -15,7 +17,7 @@ def _tool_provider(name: str = "provider") -> SimpleNamespace: class TestPluginToolManager: - def test_fetch_tool_providers(self, mocker): + def test_fetch_tool_providers(self, mocker: MockerFixture): manager = PluginToolManager() provider = _tool_provider("remote") mocker.patch("core.plugin.impl.tool.resolve_dify_schema_refs", return_value={"resolved": True}) @@ -44,7 +46,7 @@ class TestPluginToolManager: assert result[0].declaration.identity.name == "org/plugin/remote" assert result[0].declaration.tools[0].identity.provider == "org/plugin/remote" - def test_fetch_tool_provider(self, mocker): + def test_fetch_tool_provider(self, mocker: MockerFixture): manager = PluginToolManager() provider = _tool_provider("provider") mocker.patch("core.plugin.impl.tool.resolve_dify_schema_refs", return_value={"resolved": True}) @@ -68,7 +70,7 @@ class TestPluginToolManager: assert result.declaration.identity.name == "org/plugin/provider" assert result.declaration.tools[0].identity.provider == "org/plugin/provider" - def test_invoke_merges_chunks(self, mocker): + def test_invoke_merges_chunks(self, mocker: MockerFixture): manager = PluginToolManager() stream_mock = mocker.patch.object( manager, "_request_with_plugin_daemon_response_stream", return_value=iter(["chunk"]) @@ -92,7 +94,7 @@ class TestPluginToolManager: assert merge_mock.call_count == 1 assert stream_mock.call_args.kwargs["headers"]["X-Plugin-ID"] == "org/plugin" - def test_validate_credentials_paths(self, mocker): + def test_validate_credentials_paths(self, mocker: MockerFixture): manager = PluginToolManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") @@ -108,7 +110,7 @@ class TestPluginToolManager: stream_mock.return_value = iter([]) assert manager.validate_datasource_credentials("tenant-1", "user-1", "org/plugin/provider", {"k": "v"}) is False - def test_get_runtime_parameters_paths(self, mocker): + def test_get_runtime_parameters_paths(self, mocker: MockerFixture): manager = PluginToolManager() stream_mock = mocker.patch.object(manager, "_request_with_plugin_daemon_response_stream") diff --git a/api/tests/unit_tests/core/plugin/impl/test_trigger_client.py b/api/tests/unit_tests/core/plugin/impl/test_trigger_client.py index 76da51c2c8..811bb7e50d 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_trigger_client.py +++ b/api/tests/unit_tests/core/plugin/impl/test_trigger_client.py @@ -2,6 +2,7 @@ from io import BytesIO from types import SimpleNamespace import pytest +from pytest_mock import MockerFixture from werkzeug import Request from core.plugin.entities.plugin_daemon import CredentialType @@ -62,7 +63,7 @@ def _subscription_call_kwargs(method_name: str) -> dict: class TestPluginTriggerClient: - def test_fetch_trigger_providers(self, mocker): + def test_fetch_trigger_providers(self, mocker: MockerFixture): client = PluginTriggerClient() provider = _trigger_provider("remote") @@ -89,7 +90,7 @@ class TestPluginTriggerClient: assert result[0].declaration.identity.name == "org/plugin/remote" assert result[0].declaration.events[0].identity.provider == "org/plugin/remote" - def test_fetch_trigger_provider(self, mocker): + def test_fetch_trigger_provider(self, mocker: MockerFixture): client = PluginTriggerClient() provider = _trigger_provider("provider") @@ -108,7 +109,7 @@ class TestPluginTriggerClient: assert result.declaration.identity.name == "org/plugin/provider" assert result.declaration.events[0].identity.provider == "org/plugin/provider" - def test_invoke_trigger_event(self, mocker): + def test_invoke_trigger_event(self, mocker: MockerFixture): client = PluginTriggerClient() stream_mock = mocker.patch.object( client, @@ -132,7 +133,7 @@ class TestPluginTriggerClient: assert result.variables == {"ok": True} assert stream_mock.call_count == 1 - def test_invoke_trigger_event_no_response_raises(self, mocker): + def test_invoke_trigger_event_no_response_raises(self, mocker: MockerFixture): client = PluginTriggerClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) @@ -150,7 +151,7 @@ class TestPluginTriggerClient: payload={"payload": 1}, ) - def test_validate_provider_credentials(self, mocker): + def test_validate_provider_credentials(self, mocker: MockerFixture): client = PluginTriggerClient() stream_mock = mocker.patch.object(client, "_request_with_plugin_daemon_response_stream") @@ -163,7 +164,7 @@ class TestPluginTriggerClient: ): client.validate_provider_credentials("tenant-1", "user-1", "org/plugin/provider", {"k": "v"}) - def test_dispatch_event(self, mocker): + def test_dispatch_event(self, mocker: MockerFixture): client = PluginTriggerClient() stream_mock = mocker.patch.object( client, @@ -195,7 +196,7 @@ class TestPluginTriggerClient: ) @pytest.mark.parametrize("method_name", ["subscribe", "unsubscribe", "refresh"]) - def test_subscription_operations_success(self, mocker, method_name): + def test_subscription_operations_success(self, mocker: MockerFixture, method_name): client = PluginTriggerClient() stream_mock = mocker.patch.object( client, @@ -217,7 +218,7 @@ class TestPluginTriggerClient: ("refresh", "No response received from plugin daemon for refresh"), ], ) - def test_subscription_operations_no_response(self, mocker, method_name, expected): + def test_subscription_operations_no_response(self, mocker: MockerFixture, method_name, expected): client = PluginTriggerClient() mocker.patch.object(client, "_request_with_plugin_daemon_response_stream", return_value=iter([])) method = getattr(client, method_name) diff --git a/api/tests/unit_tests/core/plugin/test_backwards_invocation_app.py b/api/tests/unit_tests/core/plugin/test_backwards_invocation_app.py index 3feb4159ad..2ed7c70ed9 100644 --- a/api/tests/unit_tests/core/plugin/test_backwards_invocation_app.py +++ b/api/tests/unit_tests/core/plugin/test_backwards_invocation_app.py @@ -4,6 +4,7 @@ from unittest.mock import MagicMock import pytest from pydantic import BaseModel +from pytest_mock import MockerFixture from core.app.layers.pause_state_persist_layer import PauseStateLayerConfig from core.plugin.backwards_invocation.app import PluginAppBackwardsInvocation @@ -41,7 +42,7 @@ class TestBaseBackwardsInvocation: class TestPluginAppBackwardsInvocation: - def test_fetch_app_info_workflow_path(self, mocker): + def test_fetch_app_info_workflow_path(self, mocker: MockerFixture): workflow = MagicMock() workflow.features_dict = {"feature": "v"} workflow.user_input_form.return_value = [{"name": "foo"}] @@ -57,7 +58,7 @@ class TestPluginAppBackwardsInvocation: assert result == {"data": {"mapped": True}} mapper.assert_called_once_with(features_dict={"feature": "v"}, user_input_form=[{"name": "foo"}]) - def test_fetch_app_info_model_config_path(self, mocker): + def test_fetch_app_info_model_config_path(self, mocker: MockerFixture): model_config = MagicMock() model_config.to_dict.return_value = {"user_input_form": [{"name": "bar"}], "k": "v"} app = MagicMock(mode=AppMode.COMPLETION, app_model_config=model_config) @@ -81,7 +82,7 @@ class TestPluginAppBackwardsInvocation: (AppMode.COMPLETION, "invoke_completion_app"), ], ) - def test_invoke_app_routes_by_mode(self, mocker, mode, route_method): + def test_invoke_app_routes_by_mode(self, mocker: MockerFixture, mode, route_method): app = MagicMock(mode=mode) user = MagicMock() mocker.patch.object(PluginAppBackwardsInvocation, "_get_app", return_value=app) @@ -102,7 +103,7 @@ class TestPluginAppBackwardsInvocation: assert result == {"routed": True} assert route.call_count == 1 - def test_invoke_app_uses_end_user_when_user_id_missing(self, mocker): + def test_invoke_app_uses_end_user_when_user_id_missing(self, mocker: MockerFixture): app = MagicMock(mode=AppMode.WORKFLOW) end_user = MagicMock() mocker.patch.object(PluginAppBackwardsInvocation, "_get_app", return_value=app) @@ -127,7 +128,7 @@ class TestPluginAppBackwardsInvocation: get_or_create.assert_called_once_with(app) assert route.call_args.args[1] is end_user - def test_invoke_app_missing_query_for_chat_raises(self, mocker): + def test_invoke_app_missing_query_for_chat_raises(self, mocker: MockerFixture): mocker.patch.object(PluginAppBackwardsInvocation, "_get_app", return_value=MagicMock(mode=AppMode.CHAT)) mocker.patch.object(PluginAppBackwardsInvocation, "_get_user", return_value=MagicMock()) @@ -143,7 +144,7 @@ class TestPluginAppBackwardsInvocation: files=[], ) - def test_invoke_app_unexpected_mode_raises(self, mocker): + def test_invoke_app_unexpected_mode_raises(self, mocker: MockerFixture): mocker.patch.object(PluginAppBackwardsInvocation, "_get_app", return_value=MagicMock(mode="other")) mocker.patch.object(PluginAppBackwardsInvocation, "_get_user", return_value=MagicMock()) @@ -166,7 +167,7 @@ class TestPluginAppBackwardsInvocation: (AppMode.CHAT, "core.plugin.backwards_invocation.app.ChatAppGenerator.generate"), ], ) - def test_invoke_chat_app_agent_and_chat(self, mocker, mode, generator_path): + def test_invoke_chat_app_agent_and_chat(self, mocker: MockerFixture, mode, generator_path): app = MagicMock(mode=mode, workflow=None) spy = mocker.patch(generator_path, return_value={"result": "ok"}) @@ -183,7 +184,7 @@ class TestPluginAppBackwardsInvocation: assert result == {"result": "ok"} assert spy.call_count == 1 - def test_invoke_chat_app_advanced_chat_injects_pause_state_config(self, mocker): + def test_invoke_chat_app_advanced_chat_injects_pause_state_config(self, mocker: MockerFixture): workflow = MagicMock() workflow.created_by = "owner-id" @@ -242,7 +243,7 @@ class TestPluginAppBackwardsInvocation: files=[], ) - def test_invoke_workflow_app_injects_pause_state_config(self, mocker): + def test_invoke_workflow_app_injects_pause_state_config(self, mocker: MockerFixture): workflow = MagicMock() workflow.created_by = "owner-id" @@ -284,7 +285,7 @@ class TestPluginAppBackwardsInvocation: files=[], ) - def test_invoke_completion_app(self, mocker): + def test_invoke_completion_app(self, mocker: MockerFixture): spy = mocker.patch( "core.plugin.backwards_invocation.app.CompletionAppGenerator.generate", return_value={"ok": 1} ) @@ -295,7 +296,7 @@ class TestPluginAppBackwardsInvocation: assert result == {"ok": 1} assert spy.call_count == 1 - def test_get_user_returns_end_user(self, mocker): + def test_get_user_returns_end_user(self, mocker: MockerFixture): session = MagicMock() session.scalar.side_effect = [MagicMock(id="end-user")] session_ctx = MagicMock() @@ -307,7 +308,7 @@ class TestPluginAppBackwardsInvocation: user = PluginAppBackwardsInvocation._get_user("uid") assert user.id == "end-user" - def test_get_user_falls_back_to_account_user(self, mocker): + def test_get_user_falls_back_to_account_user(self, mocker: MockerFixture): session = MagicMock() session.scalar.side_effect = [None, MagicMock(id="account-user")] session_ctx = MagicMock() @@ -319,7 +320,7 @@ class TestPluginAppBackwardsInvocation: user = PluginAppBackwardsInvocation._get_user("uid") assert user.id == "account-user" - def test_get_user_raises_when_user_not_found(self, mocker): + def test_get_user_raises_when_user_not_found(self, mocker: MockerFixture): session = MagicMock() session.scalar.side_effect = [None, None] session_ctx = MagicMock() @@ -331,21 +332,21 @@ class TestPluginAppBackwardsInvocation: with pytest.raises(ValueError, match="user not found"): PluginAppBackwardsInvocation._get_user("uid") - def test_get_app_returns_app(self, mocker): + def test_get_app_returns_app(self, mocker: MockerFixture): app_obj = MagicMock(id="app") db = SimpleNamespace(session=MagicMock(scalar=MagicMock(return_value=app_obj))) mocker.patch("core.plugin.backwards_invocation.app.db", db) assert PluginAppBackwardsInvocation._get_app("app", "tenant") is app_obj - def test_get_app_raises_when_missing(self, mocker): + def test_get_app_raises_when_missing(self, mocker: MockerFixture): db = SimpleNamespace(session=MagicMock(scalar=MagicMock(return_value=None))) mocker.patch("core.plugin.backwards_invocation.app.db", db) with pytest.raises(ValueError, match="app not found"): PluginAppBackwardsInvocation._get_app("app", "tenant") - def test_get_app_raises_when_query_fails(self, mocker): + def test_get_app_raises_when_query_fails(self, mocker: MockerFixture): db = SimpleNamespace(session=MagicMock(scalar=MagicMock(side_effect=RuntimeError("db down")))) mocker.patch("core.plugin.backwards_invocation.app.db", db) diff --git a/api/tests/unit_tests/core/plugin/test_plugin_entities.py b/api/tests/unit_tests/core/plugin/test_plugin_entities.py index f1c4c7e700..deac0ba1da 100644 --- a/api/tests/unit_tests/core/plugin/test_plugin_entities.py +++ b/api/tests/unit_tests/core/plugin/test_plugin_entities.py @@ -5,6 +5,7 @@ from enum import StrEnum import pytest from flask import Response from pydantic import ValidationError +from pytest_mock import MockerFixture from core.plugin.entities.endpoint import EndpointEntityWithInstance from core.plugin.entities.marketplace import MarketplacePluginDeclaration, MarketplacePluginSnapshot @@ -34,7 +35,7 @@ from graphon.model_runtime.entities.message_entities import ( class TestEndpointEntity: - def test_endpoint_entity_with_instance_renders_url(self, mocker): + def test_endpoint_entity_with_instance_renders_url(self, mocker: MockerFixture): mocker.patch("core.plugin.entities.endpoint.dify_config.ENDPOINT_URL_TEMPLATE", "https://dify.test/{hook_id}") now = datetime.datetime.now(datetime.UTC) diff --git a/api/tests/unit_tests/core/prompt/test_extract_thread_messages.py b/api/tests/unit_tests/core/prompt/test_extract_thread_messages.py index 1b114b369a..1f46634b89 100644 --- a/api/tests/unit_tests/core/prompt/test_extract_thread_messages.py +++ b/api/tests/unit_tests/core/prompt/test_extract_thread_messages.py @@ -1,5 +1,7 @@ from uuid import uuid4 +from pytest_mock import MockerFixture + from constants import UUID_NIL from core.prompt.utils.extract_thread_messages import extract_thread_messages from core.prompt.utils.get_thread_messages_length import get_thread_messages_length @@ -103,7 +105,7 @@ def test_extract_thread_messages_breaks_when_parent_is_none(): assert result[0].id == id2 -def test_get_thread_messages_length_excludes_newly_created_empty_answer(mocker): +def test_get_thread_messages_length_excludes_newly_created_empty_answer(mocker: MockerFixture): id1, id2 = str(uuid4()), str(uuid4()) messages = [ MockMessage(id2, id1, answer=""), # newest generated message should be excluded @@ -119,7 +121,7 @@ def test_get_thread_messages_length_excludes_newly_created_empty_answer(mocker): mock_scalars.assert_called_once() -def test_get_thread_messages_length_keeps_non_empty_latest_answer(mocker): +def test_get_thread_messages_length_keeps_non_empty_latest_answer(mocker: MockerFixture): id1, id2 = str(uuid4()), str(uuid4()) messages = [ MockMessage(id2, id1, answer="latest-answer"), diff --git a/api/tests/unit_tests/core/prompt/test_prompt_transform.py b/api/tests/unit_tests/core/prompt/test_prompt_transform.py index 5308c8e7b3..3d71e73496 100644 --- a/api/tests/unit_tests/core/prompt/test_prompt_transform.py +++ b/api/tests/unit_tests/core/prompt/test_prompt_transform.py @@ -209,7 +209,7 @@ class TestPromptTransform: assert result == ["only"] memory.get_history_prompt_messages.assert_called_with(max_token_limit=10, message_limit=None) - def test_append_chat_histories_extends_prompt_messages(self, monkeypatch): + def test_append_chat_histories_extends_prompt_messages(self, monkeypatch: pytest.MonkeyPatch): transform = PromptTransform() memory = MagicMock() memory_config = SimpleNamespace(window=SimpleNamespace(enabled=False, size=None)) diff --git a/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba.py b/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba.py index 1e91c2dd88..e233bd2ef0 100644 --- a/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba.py +++ b/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba.py @@ -67,7 +67,7 @@ def _dataset(dataset_keyword_table=None, keyword_number=None): @pytest.fixture -def patched_runtime(monkeypatch): +def patched_runtime(monkeypatch: pytest.MonkeyPatch): session = MagicMock() db = SimpleNamespace(session=session) storage = MagicMock() @@ -151,7 +151,7 @@ def test_add_texts_without_keywords_list_always_uses_extractor(monkeypatch, patc assert set(keyword._update_segment_keywords.call_args.args[2]) == {"from-extractor"} -def test_text_exists_handles_missing_and_existing_keyword_table(monkeypatch): +def test_text_exists_handles_missing_and_existing_keyword_table(monkeypatch: pytest.MonkeyPatch): keyword = Jieba(_dataset(_dataset_keyword_table())) monkeypatch.setattr(keyword, "_get_dataset_keyword_table", MagicMock(return_value=None)) @@ -308,7 +308,7 @@ def test_add_and_delete_ids_from_keyword_table_helpers(): assert deleted["kw2"] == {"node-2"} -def test_retrieve_ids_by_query_ranks_by_keyword_frequency(monkeypatch): +def test_retrieve_ids_by_query_ranks_by_keyword_frequency(monkeypatch: pytest.MonkeyPatch): keyword = Jieba(_dataset(_dataset_keyword_table())) handler = MagicMock() handler.extract_keywords.return_value = ["kw-a", "kw-b"] @@ -350,7 +350,7 @@ def test_update_segment_keywords_updates_when_segment_exists(monkeypatch, patche patched_runtime.session.commit.assert_not_called() -def test_create_segment_keywords_and_update_segment_keywords_index(monkeypatch): +def test_create_segment_keywords_and_update_segment_keywords_index(monkeypatch: pytest.MonkeyPatch): keyword = Jieba(_dataset(_dataset_keyword_table())) monkeypatch.setattr(keyword, "_get_dataset_keyword_table", MagicMock(return_value={})) monkeypatch.setattr(keyword, "_update_segment_keywords", MagicMock()) @@ -365,7 +365,7 @@ def test_create_segment_keywords_and_update_segment_keywords_index(monkeypatch): keyword._save_dataset_keyword_table.assert_called_once() -def test_multi_create_segment_keywords_uses_provided_and_extracted_keywords(monkeypatch): +def test_multi_create_segment_keywords_uses_provided_and_extracted_keywords(monkeypatch: pytest.MonkeyPatch): keyword = Jieba(_dataset(_dataset_keyword_table(), keyword_number=2)) handler = MagicMock() handler.extract_keywords.return_value = {"auto"} diff --git a/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba_keyword_table_handler.py b/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba_keyword_table_handler.py index a4586c141b..c8ee75bf43 100644 --- a/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba_keyword_table_handler.py +++ b/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba_keyword_table_handler.py @@ -2,6 +2,8 @@ import sys import types from types import SimpleNamespace +import pytest + from core.rag.datasource.keyword.jieba.jieba_keyword_table_handler import JiebaKeywordTableHandler from core.rag.datasource.keyword.jieba.stopwords import STOPWORDS @@ -38,7 +40,7 @@ def _install_fake_jieba_modules( monkeypatch.delitem(sys.modules, "jieba.analyse.tfidf", raising=False) -def test_init_uses_existing_default_tfidf(monkeypatch): +def test_init_uses_existing_default_tfidf(monkeypatch: pytest.MonkeyPatch): analyse_module = types.ModuleType("jieba.analyse") default_tfidf = _DummyTFIDF() analyse_module.default_tfidf = default_tfidf @@ -51,7 +53,7 @@ def test_init_uses_existing_default_tfidf(monkeypatch): assert handler._tfidf.stop_words == STOPWORDS -def test_load_tfidf_extractor_uses_tfidf_class_and_caches_default(monkeypatch): +def test_load_tfidf_extractor_uses_tfidf_class_and_caches_default(monkeypatch: pytest.MonkeyPatch): analyse_module = types.ModuleType("jieba.analyse") analyse_module.default_tfidf = None @@ -67,7 +69,7 @@ def test_load_tfidf_extractor_uses_tfidf_class_and_caches_default(monkeypatch): assert analyse_module.default_tfidf is handler._tfidf -def test_load_tfidf_extractor_imports_from_tfidf_submodule(monkeypatch): +def test_load_tfidf_extractor_imports_from_tfidf_submodule(monkeypatch: pytest.MonkeyPatch): analyse_module = types.ModuleType("jieba.analyse") analyse_module.default_tfidf = None @@ -85,7 +87,7 @@ def test_load_tfidf_extractor_imports_from_tfidf_submodule(monkeypatch): assert analyse_module.default_tfidf is handler._tfidf -def test_load_tfidf_extractor_falls_back_when_tfidf_unavailable(monkeypatch): +def test_load_tfidf_extractor_falls_back_when_tfidf_unavailable(monkeypatch: pytest.MonkeyPatch): analyse_module = types.ModuleType("jieba.analyse") analyse_module.default_tfidf = None _install_fake_jieba_modules(monkeypatch, analyse_module) @@ -96,7 +98,7 @@ def test_load_tfidf_extractor_falls_back_when_tfidf_unavailable(monkeypatch): assert fallback_keywords == ["two"] -def test_build_fallback_tfidf_uses_lcut_when_available(monkeypatch): +def test_build_fallback_tfidf_uses_lcut_when_available(monkeypatch: pytest.MonkeyPatch): analyse_module = types.ModuleType("jieba.analyse") _install_fake_jieba_modules(monkeypatch, analyse_module, jieba_attrs={"lcut": lambda _: ["x", "x", "y"]}) @@ -105,7 +107,7 @@ def test_build_fallback_tfidf_uses_lcut_when_available(monkeypatch): assert tfidf.extract_tags("ignored", topK=1) == ["x"] -def test_build_fallback_tfidf_uses_cut_when_lcut_is_missing(monkeypatch): +def test_build_fallback_tfidf_uses_cut_when_lcut_is_missing(monkeypatch: pytest.MonkeyPatch): analyse_module = types.ModuleType("jieba.analyse") _install_fake_jieba_modules( monkeypatch, diff --git a/api/tests/unit_tests/core/rag/datasource/keyword/test_keyword_factory.py b/api/tests/unit_tests/core/rag/datasource/keyword/test_keyword_factory.py index 0d969a3270..e1765b17cb 100644 --- a/api/tests/unit_tests/core/rag/datasource/keyword/test_keyword_factory.py +++ b/api/tests/unit_tests/core/rag/datasource/keyword/test_keyword_factory.py @@ -10,7 +10,7 @@ from core.rag.datasource.keyword.keyword_type import KeyWordType from core.rag.models.document import Document -def test_get_keyword_factory_returns_jieba_factory(monkeypatch): +def test_get_keyword_factory_returns_jieba_factory(monkeypatch: pytest.MonkeyPatch): fake_module = types.ModuleType("core.rag.datasource.keyword.jieba.jieba") class FakeJieba: @@ -27,7 +27,7 @@ def test_get_keyword_factory_raises_for_unsupported_type(): Keyword.get_keyword_factory("unsupported") -def test_keyword_initialization_uses_configured_factory(monkeypatch): +def test_keyword_initialization_uses_configured_factory(monkeypatch: pytest.MonkeyPatch): dataset = SimpleNamespace(id="dataset-1") fake_processor = MagicMock() diff --git a/api/tests/unit_tests/core/rag/datasource/test_datasource_retrieval.py b/api/tests/unit_tests/core/rag/datasource/test_datasource_retrieval.py index b0ecad4d0c..d38213dd89 100644 --- a/api/tests/unit_tests/core/rag/datasource/test_datasource_retrieval.py +++ b/api/tests/unit_tests/core/rag/datasource/test_datasource_retrieval.py @@ -182,7 +182,7 @@ class TestRetrievalServiceInternals: app.app_context.return_value.__exit__.return_value = False return app - def test_retrieve_with_attachment_ids_only(self, monkeypatch, internal_dataset): + def test_retrieve_with_attachment_ids_only(self, monkeypatch: pytest.MonkeyPatch, internal_dataset): with ( patch("core.rag.datasource.retrieval_service.RetrievalService._get_dataset", return_value=internal_dataset), patch("core.rag.datasource.retrieval_service.RetrievalService._retrieve") as mock_retrieve, @@ -699,7 +699,9 @@ class TestRetrievalServiceInternals: assert RetrievalService.format_retrieval_documents(documents) == [] - def test_format_retrieval_documents_with_parent_child_summary_and_attachments(self, monkeypatch): + def test_format_retrieval_documents_with_parent_child_summary_and_attachments( + self, monkeypatch: pytest.MonkeyPatch + ): dataset_doc_parent = SimpleNamespace( id="doc-parent", doc_form=IndexStructureType.PARENT_CHILD_INDEX, @@ -877,7 +879,7 @@ class TestRetrievalServiceInternals: assert result_by_segment_id["segment-parent-summary"].summary == "summary for parent" assert result_by_segment_id["segment-parent-summary"].child_chunks == [] - def test_format_retrieval_documents_rolls_back_and_raises_when_db_fails(self, monkeypatch): + def test_format_retrieval_documents_rolls_back_and_raises_when_db_fails(self, monkeypatch: pytest.MonkeyPatch): rollback = Mock() monkeypatch.setattr(retrieval_service_module.db.session, "rollback", rollback) monkeypatch.setattr(retrieval_service_module.db.session, "scalars", Mock(side_effect=RuntimeError("db error"))) @@ -936,7 +938,7 @@ class TestRetrievalServiceInternals: future_ok.cancel.assert_called() def test_retrieve_internal_raises_value_error_when_exceptions_exist( - self, monkeypatch, internal_dataset, internal_flask_app + self, monkeypatch: pytest.MonkeyPatch, internal_dataset, internal_flask_app ): executor = _ImmediateExecutor() monkeypatch.setattr(retrieval_service_module, "ThreadPoolExecutor", lambda *args, **kwargs: executor) @@ -958,7 +960,9 @@ class TestRetrievalServiceInternals: query="query", ) - def test_retrieve_internal_hybrid_weighted_attachment_flow(self, monkeypatch, internal_dataset, internal_flask_app): + def test_retrieve_internal_hybrid_weighted_attachment_flow( + self, monkeypatch: pytest.MonkeyPatch, internal_dataset, internal_flask_app + ): executor = _ImmediateExecutor() monkeypatch.setattr(retrieval_service_module, "ThreadPoolExecutor", lambda *args, **kwargs: executor) monkeypatch.setattr( diff --git a/api/tests/unit_tests/core/rag/datasource/vdb/test_vector_factory.py b/api/tests/unit_tests/core/rag/datasource/vdb/test_vector_factory.py index 7b6ee97f1c..067159398d 100644 --- a/api/tests/unit_tests/core/rag/datasource/vdb/test_vector_factory.py +++ b/api/tests/unit_tests/core/rag/datasource/vdb/test_vector_factory.py @@ -102,7 +102,9 @@ def test_gen_index_struct_dict(vector_factory_module): ("HOLOGRES", "dify_vdb_hologres.hologres_vector", "HologresVectorFactory"), ], ) -def test_get_vector_factory_supported(vector_factory_module, monkeypatch, vector_type, module_path, class_name): +def test_get_vector_factory_supported( + vector_factory_module, monkeypatch: pytest.MonkeyPatch, vector_type, module_path, class_name +): expected_cls = _register_fake_factory_module(monkeypatch, module_path, class_name) result_cls = vector_factory_module.Vector.get_vector_factory(getattr(vector_factory_module.VectorType, vector_type)) @@ -119,7 +121,7 @@ class _PluginChromaFactory: """Stub used only for entry-point override test.""" -def test_get_vector_factory_entry_point_overrides_builtin(vector_factory_module, monkeypatch): +def test_get_vector_factory_entry_point_overrides_builtin(vector_factory_module, monkeypatch: pytest.MonkeyPatch): from importlib.metadata import EntryPoint from core.rag.datasource.vdb import vector_backend_registry as reg @@ -171,7 +173,7 @@ def test_vector_init_uses_default_and_custom_attributes(vector_factory_module): assert default_vector._vector_processor == "processor" -def test_lazy_embeddings_defer_real_load_until_first_embed_call(vector_factory_module, monkeypatch): +def test_lazy_embeddings_defer_real_load_until_first_embed_call(vector_factory_module, monkeypatch: pytest.MonkeyPatch): """``Vector(dataset)`` must not transitively call ``ModelManager`` during construction. The real embedding model should only be materialized on the first ``embed_*`` call (i.e. create / search paths) so cleanup paths @@ -214,7 +216,7 @@ def test_lazy_embeddings_defer_real_load_until_first_embed_call(vector_factory_m inner_model.embed_documents.assert_called_once_with(["world"]) -def test_init_vector_prefers_dataset_index_struct(vector_factory_module, monkeypatch): +def test_init_vector_prefers_dataset_index_struct(vector_factory_module, monkeypatch: pytest.MonkeyPatch): calls = {"vector_type": None, "init_args": None} class _Factory: @@ -242,7 +244,7 @@ def test_init_vector_prefers_dataset_index_struct(vector_factory_module, monkeyp assert calls["init_args"] == (vector._dataset, ["doc_id"], "embeddings") -def test_init_vector_uses_whitelist_override(vector_factory_module, monkeypatch): +def test_init_vector_uses_whitelist_override(vector_factory_module, monkeypatch: pytest.MonkeyPatch): class _Expr: def __eq__(self, _other): return "expr" @@ -279,7 +281,7 @@ def test_init_vector_uses_whitelist_override(vector_factory_module, monkeypatch) assert calls["vector_type"] == vector_factory_module.VectorType.TIDB_ON_QDRANT -def test_init_vector_raises_when_vector_store_missing(vector_factory_module, monkeypatch): +def test_init_vector_raises_when_vector_store_missing(vector_factory_module, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(vector_factory_module.dify_config, "VECTOR_STORE", None) monkeypatch.setattr(vector_factory_module.dify_config, "VECTOR_STORE_WHITELIST_ENABLE", False) @@ -343,7 +345,7 @@ def test_create_skips_empty_text_documents_before_embedding(vector_factory_modul vector._vector_processor.create.assert_not_called() -def test_create_multimodal_filters_missing_uploads(vector_factory_module, monkeypatch): +def test_create_multimodal_filters_missing_uploads(vector_factory_module, monkeypatch: pytest.MonkeyPatch): class _Field: def in_(self, value): return value @@ -484,7 +486,7 @@ def test_vector_delegation_methods(vector_factory_module): vector._vector_processor.delete_by_metadata_field.assert_called_once_with("doc_id", "doc-1") -def test_search_by_file_handles_missing_and_existing_upload(vector_factory_module, monkeypatch): +def test_search_by_file_handles_missing_and_existing_upload(vector_factory_module, monkeypatch: pytest.MonkeyPatch): vector = vector_factory_module.Vector.__new__(vector_factory_module.Vector) vector._embeddings = MagicMock() vector._vector_processor = MagicMock() @@ -507,7 +509,7 @@ def test_search_by_file_handles_missing_and_existing_upload(vector_factory_modul assert payload["file_id"] == "file-2" -def test_delete_clears_redis_cache_when_collection_exists(vector_factory_module, monkeypatch): +def test_delete_clears_redis_cache_when_collection_exists(vector_factory_module, monkeypatch: pytest.MonkeyPatch): delete_mock = MagicMock() redis_delete = MagicMock() monkeypatch.setattr(vector_factory_module.redis_client, "delete", redis_delete) @@ -526,7 +528,7 @@ def test_delete_clears_redis_cache_when_collection_exists(vector_factory_module, redis_delete.assert_not_called() -def test_get_embeddings_builds_cache_embedding(vector_factory_module, monkeypatch): +def test_get_embeddings_builds_cache_embedding(vector_factory_module, monkeypatch: pytest.MonkeyPatch): model_manager = MagicMock() model_manager.get_model_instance.return_value = "model-instance" diff --git a/api/tests/unit_tests/core/rag/extractor/test_csv_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_csv_extractor.py index e6a06f163e..2e1c5715c2 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_csv_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_csv_extractor.py @@ -39,7 +39,7 @@ class TestCSVExtractor: with pytest.raises(ValueError, match="Source column 'missing_col' not found"): extractor.extract() - def test_extract_wraps_unicode_error_when_autodetect_disabled(self, monkeypatch): + def test_extract_wraps_unicode_error_when_autodetect_disabled(self, monkeypatch: pytest.MonkeyPatch): extractor = CSVExtractor("dummy.csv", autodetect_encoding=False) def raise_decode(*args, **kwargs): @@ -50,7 +50,7 @@ class TestCSVExtractor: with pytest.raises(RuntimeError, match="Error loading dummy.csv"): extractor.extract() - def test_extract_autodetect_encoding_success(self, monkeypatch): + def test_extract_autodetect_encoding_success(self, monkeypatch: pytest.MonkeyPatch): extractor = CSVExtractor("dummy.csv", autodetect_encoding=True) attempted_encodings: list[str | None] = [] @@ -75,7 +75,7 @@ class TestCSVExtractor: assert docs[0].page_content == "id: source-1;body: hello" assert attempted_encodings == [None, "bad", "utf-8"] - def test_extract_autodetect_encoding_all_attempts_fail_returns_empty(self, monkeypatch): + def test_extract_autodetect_encoding_all_attempts_fail_returns_empty(self, monkeypatch: pytest.MonkeyPatch): extractor = CSVExtractor("dummy.csv", autodetect_encoding=True) def always_raise(*args, **kwargs): @@ -86,7 +86,7 @@ class TestCSVExtractor: assert extractor.extract() == [] - def test_read_from_file_re_raises_csv_error(self, monkeypatch): + def test_read_from_file_re_raises_csv_error(self, monkeypatch: pytest.MonkeyPatch): extractor = CSVExtractor("dummy.csv") monkeypatch.setattr(pd, "read_csv", lambda *args, **kwargs: (_ for _ in ()).throw(csv.Error("bad csv"))) diff --git a/api/tests/unit_tests/core/rag/extractor/test_excel_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_excel_extractor.py index d2bcc1e2c4..2b42adc716 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_excel_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_excel_extractor.py @@ -45,7 +45,7 @@ class _FakeWorkbook: class TestExcelExtractor: - def test_extract_xlsx_with_hyperlinks_and_sheet_skip(self, monkeypatch): + def test_extract_xlsx_with_hyperlinks_and_sheet_skip(self, monkeypatch: pytest.MonkeyPatch): sheet_with_data = _FakeSheet( header_rows=[("Name", "Link")], data_rows=[ @@ -68,7 +68,7 @@ class TestExcelExtractor: assert docs[1].page_content == '"Name":"";"Link":"123"' assert all(doc.metadata["source"] == "/tmp/sample.xlsx" for doc in docs) - def test_extract_xls_path(self, monkeypatch): + def test_extract_xls_path(self, monkeypatch: pytest.MonkeyPatch): class FakeExcelFile: sheet_names = ["Sheet1"] diff --git a/api/tests/unit_tests/core/rag/extractor/test_extract_processor.py b/api/tests/unit_tests/core/rag/extractor/test_extract_processor.py index 5beed88971..b4b08f57ec 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_extract_processor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_extract_processor.py @@ -56,7 +56,7 @@ def _patch_all_extractors(monkeypatch) -> _ExtractorFactory: class TestExtractProcessorLoaders: - def test_load_from_upload_file_return_docs_and_text(self, monkeypatch): + def test_load_from_upload_file_return_docs_and_text(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(processor_module, "ExtractSetting", lambda **kwargs: SimpleNamespace(**kwargs)) monkeypatch.setattr( @@ -93,7 +93,9 @@ class TestExtractProcessorLoaders: ), ], ) - def test_load_from_url_builds_temp_file_with_correct_suffix(self, monkeypatch, url, headers, expected_suffix): + def test_load_from_url_builds_temp_file_with_correct_suffix( + self, monkeypatch: pytest.MonkeyPatch, url, headers, expected_suffix + ): response = SimpleNamespace(headers=headers, content=b"body") monkeypatch.setattr(processor_module.ssrf_proxy, "get", lambda *args, **kwargs: response) monkeypatch.setattr(processor_module, "ExtractSetting", lambda **kwargs: SimpleNamespace(**kwargs)) @@ -119,11 +121,13 @@ class TestExtractProcessorLoaders: class TestExtractProcessorFileRouting: @pytest.fixture(autouse=True) - def _set_unstructured_config(self, monkeypatch): + def _set_unstructured_config(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(processor_module.dify_config, "UNSTRUCTURED_API_URL", "https://unstructured") monkeypatch.setattr(processor_module.dify_config, "UNSTRUCTURED_API_KEY", "key") - def _run_extract_for_extension(self, monkeypatch, extension: str, etl_type: str, is_automatic: bool = False): + def _run_extract_for_extension( + self, monkeypatch: pytest.MonkeyPatch, extension: str, etl_type: str, is_automatic: bool = False + ): factory = _patch_all_extractors(monkeypatch) monkeypatch.setattr(processor_module.dify_config, "ETL_TYPE", etl_type) @@ -167,7 +171,7 @@ class TestExtractProcessorFileRouting: ], ) def test_extract_routes_file_extensions_for_unstructured_mode( - self, monkeypatch, extension, expected_extractor, is_automatic + self, monkeypatch: pytest.MonkeyPatch, extension, expected_extractor, is_automatic ): extractor_name, args, kwargs = self._run_extract_for_extension( monkeypatch, extension, etl_type="Unstructured", is_automatic=is_automatic @@ -189,7 +193,9 @@ class TestExtractProcessorFileRouting: (".txt", "TextExtractor"), ], ) - def test_extract_routes_file_extensions_for_default_mode(self, monkeypatch, extension, expected_extractor): + def test_extract_routes_file_extensions_for_default_mode( + self, monkeypatch: pytest.MonkeyPatch, extension, expected_extractor + ): extractor_name, _, _ = self._run_extract_for_extension(monkeypatch, extension, etl_type="SelfHosted") assert extractor_name == expected_extractor @@ -202,7 +208,7 @@ class TestExtractProcessorFileRouting: class TestExtractProcessorDatasourceRouting: - def test_extract_routes_notion_datasource(self, monkeypatch): + def test_extract_routes_notion_datasource(self, monkeypatch: pytest.MonkeyPatch): factory = _patch_all_extractors(monkeypatch) notion_info = SimpleNamespace( @@ -228,7 +234,9 @@ class TestExtractProcessorDatasourceRouting: ("jinareader", "JinaReaderWebExtractor"), ], ) - def test_extract_routes_website_datasource_providers(self, monkeypatch, provider: str, expected: str): + def test_extract_routes_website_datasource_providers( + self, monkeypatch: pytest.MonkeyPatch, provider: str, expected: str + ): factory = _patch_all_extractors(monkeypatch) website_info = SimpleNamespace( diff --git a/api/tests/unit_tests/core/rag/extractor/test_helpers.py b/api/tests/unit_tests/core/rag/extractor/test_helpers.py index 74387f749d..1c6f97ec53 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_helpers.py +++ b/api/tests/unit_tests/core/rag/extractor/test_helpers.py @@ -21,7 +21,7 @@ class TestHelpers: # Assert the language field for full coverage assert encodings[0].language is not None - def test_detect_file_encodings_timeout(self, monkeypatch): + def test_detect_file_encodings_timeout(self, monkeypatch: pytest.MonkeyPatch): class FakeFuture: def result(self, timeout=None): raise helpers.concurrent.futures.TimeoutError() @@ -41,7 +41,7 @@ class TestHelpers: with pytest.raises(TimeoutError, match="Timeout reached while detecting encoding"): detect_file_encodings("file.txt", timeout=1) - def test_detect_file_encodings_raises_when_encoding_not_detected(self, monkeypatch): + def test_detect_file_encodings_raises_when_encoding_not_detected(self, monkeypatch: pytest.MonkeyPatch): class FakeResult: encoding = None coherence = 0.0 diff --git a/api/tests/unit_tests/core/rag/extractor/test_markdown_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_markdown_extractor.py index 7e78c86c7d..8ede44ec04 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_markdown_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_markdown_extractor.py @@ -74,7 +74,7 @@ after assert "[link]" not in tups[1][1] assert "img.png" not in tups[1][1] - def test_parse_tups_autodetects_encoding_after_decode_error(self, monkeypatch): + def test_parse_tups_autodetects_encoding_after_decode_error(self, monkeypatch: pytest.MonkeyPatch): extractor = MarkdownExtractor(file_path="dummy_path", autodetect_encoding=True) calls: list[str | None] = [] @@ -99,7 +99,7 @@ after assert len(tups) == 2 assert calls == [None, "bad-encoding", "utf-8"] - def test_parse_tups_decode_error_with_autodetect_disabled_raises(self, monkeypatch): + def test_parse_tups_decode_error_with_autodetect_disabled_raises(self, monkeypatch: pytest.MonkeyPatch): extractor = MarkdownExtractor(file_path="dummy_path", autodetect_encoding=False) def raise_decode(self, encoding=None): @@ -110,7 +110,7 @@ after with pytest.raises(RuntimeError, match="Error loading dummy_path"): extractor.parse_tups("dummy_path") - def test_parse_tups_other_exceptions_are_wrapped(self, monkeypatch): + def test_parse_tups_other_exceptions_are_wrapped(self, monkeypatch: pytest.MonkeyPatch): extractor = MarkdownExtractor(file_path="dummy_path") def raise_other(self, encoding=None): @@ -121,7 +121,7 @@ after with pytest.raises(RuntimeError, match="Error loading dummy_path"): extractor.parse_tups("dummy_path") - def test_extract_builds_documents_for_header_and_non_header(self, monkeypatch): + def test_extract_builds_documents_for_header_and_non_header(self, monkeypatch: pytest.MonkeyPatch): extractor = MarkdownExtractor(file_path="dummy_path") monkeypatch.setattr(extractor, "parse_tups", lambda _: [(None, "plain"), ("Header", "value")]) diff --git a/api/tests/unit_tests/core/rag/extractor/test_notion_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_notion_extractor.py index 808e41867e..49f7b592dc 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_notion_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_notion_extractor.py @@ -28,7 +28,7 @@ class TestNotionExtractorInitAndPublicMethods: assert extractor._notion_access_token == "token" - def test_init_falls_back_to_env_token_when_credential_lookup_fails(self, monkeypatch): + def test_init_falls_back_to_env_token_when_credential_lookup_fails(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( notion_extractor.NotionExtractor, "_get_access_token", @@ -46,7 +46,7 @@ class TestNotionExtractorInitAndPublicMethods: assert extractor._notion_access_token == "env-token" - def test_init_raises_if_no_credential_and_no_env_token(self, monkeypatch): + def test_init_raises_if_no_credential_and_no_env_token(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( notion_extractor.NotionExtractor, "_get_access_token", @@ -63,7 +63,7 @@ class TestNotionExtractorInitAndPublicMethods: credential_id="cred", ) - def test_extract_updates_last_edited_and_loads_documents(self, monkeypatch): + def test_extract_updates_last_edited_and_loads_documents(self, monkeypatch: pytest.MonkeyPatch): extractor = notion_extractor.NotionExtractor( notion_workspace_id="ws", notion_obj_id="obj", @@ -83,7 +83,7 @@ class TestNotionExtractorInitAndPublicMethods: load_mock.assert_called_once_with("obj", "page") assert len(docs) == 1 - def test_load_data_as_documents_page_database_and_invalid(self, monkeypatch): + def test_load_data_as_documents_page_database_and_invalid(self, monkeypatch: pytest.MonkeyPatch): extractor = notion_extractor.NotionExtractor( notion_workspace_id="ws", notion_obj_id="obj", @@ -394,7 +394,7 @@ class TestNotionMetadataAndCredentialMethods: assert extractor.update_last_edited_time(None) is None - def test_update_last_edited_time_updates_document_and_commits(self, monkeypatch): + def test_update_last_edited_time_updates_document_and_commits(self, monkeypatch: pytest.MonkeyPatch): extractor = notion_extractor.NotionExtractor( notion_workspace_id="ws", notion_obj_id="obj", @@ -479,7 +479,7 @@ class TestNotionMetadataAndCredentialMethods: with pytest.raises(AssertionError, match="Notion access token is required"): extractor.get_notion_last_edited_time() - def test_get_access_token_success_and_errors(self, monkeypatch): + def test_get_access_token_success_and_errors(self, monkeypatch: pytest.MonkeyPatch): with pytest.raises(Exception, match="No credential id found"): notion_extractor.NotionExtractor._get_access_token("tenant", None) diff --git a/api/tests/unit_tests/core/rag/extractor/test_pdf_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_pdf_extractor.py index 47222a23a2..f2caf02d5e 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_pdf_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_pdf_extractor.py @@ -7,7 +7,7 @@ import core.rag.extractor.pdf_extractor as pe @pytest.fixture -def mock_dependencies(monkeypatch): +def mock_dependencies(monkeypatch: pytest.MonkeyPatch): # Mock storage saves = [] @@ -61,7 +61,9 @@ def mock_dependencies(monkeypatch): (b"\x89PNG\r\n\x1a\n some png", "image/png", "png", "test_file_id_png"), ], ) -def test_extract_images_formats(mock_dependencies, monkeypatch, image_bytes, expected_mime, expected_ext, file_id): +def test_extract_images_formats( + mock_dependencies, monkeypatch: pytest.MonkeyPatch, image_bytes, expected_mime, expected_ext, file_id +): saves = mock_dependencies.saves db_stub = mock_dependencies.db @@ -122,7 +124,7 @@ def test_extract_images_get_objects_scenarios(mock_dependencies, get_objects_sid assert result == "" -def test_extract_calls_extract_images(mock_dependencies, monkeypatch): +def test_extract_calls_extract_images(mock_dependencies, monkeypatch: pytest.MonkeyPatch): # Mock pypdfium2 mock_pdf_doc = MagicMock() mock_page = MagicMock() diff --git a/api/tests/unit_tests/core/rag/extractor/test_text_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_text_extractor.py index fb3c6e52c6..71046d73af 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_text_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_text_extractor.py @@ -19,7 +19,7 @@ class TestTextExtractor: assert docs[0].page_content == "hello world" assert docs[0].metadata == {"source": str(file_path)} - def test_extract_autodetect_success_after_decode_error(self, monkeypatch): + def test_extract_autodetect_success_after_decode_error(self, monkeypatch: pytest.MonkeyPatch): extractor = TextExtractor("dummy.txt", autodetect_encoding=True) calls = [] @@ -44,7 +44,7 @@ class TestTextExtractor: assert docs[0].page_content == "decoded text" assert calls == [None, "bad", "utf-8"] - def test_extract_autodetect_all_fail_raises_runtime_error(self, monkeypatch): + def test_extract_autodetect_all_fail_raises_runtime_error(self, monkeypatch: pytest.MonkeyPatch): extractor = TextExtractor("dummy.txt", autodetect_encoding=True) def always_decode_error(self, encoding=None): @@ -56,7 +56,7 @@ class TestTextExtractor: with pytest.raises(RuntimeError, match="all detected encodings failed"): extractor.extract() - def test_extract_decode_error_without_autodetect_raises_runtime_error(self, monkeypatch): + def test_extract_decode_error_without_autodetect_raises_runtime_error(self, monkeypatch: pytest.MonkeyPatch): extractor = TextExtractor("dummy.txt", autodetect_encoding=False) def always_decode_error(self, encoding=None): @@ -67,7 +67,7 @@ class TestTextExtractor: with pytest.raises(RuntimeError, match="specified encoding failed"): extractor.extract() - def test_extract_wraps_non_decode_exceptions(self, monkeypatch): + def test_extract_wraps_non_decode_exceptions(self, monkeypatch: pytest.MonkeyPatch): extractor = TextExtractor("dummy.txt") def raise_other(self, encoding=None): diff --git a/api/tests/unit_tests/core/rag/extractor/test_word_extractor.py b/api/tests/unit_tests/core/rag/extractor/test_word_extractor.py index b9f2449cfb..513d232d7f 100644 --- a/api/tests/unit_tests/core/rag/extractor/test_word_extractor.py +++ b/api/tests/unit_tests/core/rag/extractor/test_word_extractor.py @@ -61,7 +61,7 @@ def test_parse_row(): assert extractor._parse_row(row, {}, 3) == gt[idx] -def test_init_downloads_via_ssrf_proxy(monkeypatch): +def test_init_downloads_via_ssrf_proxy(monkeypatch: pytest.MonkeyPatch): doc = Document() doc.add_paragraph("hello") buf = io.BytesIO() @@ -97,7 +97,7 @@ def test_init_downloads_via_ssrf_proxy(monkeypatch): extractor.temp_file.close() -def test_extract_images_from_docx(monkeypatch): +def test_extract_images_from_docx(monkeypatch: pytest.MonkeyPatch): external_bytes = b"ext-bytes" internal_bytes = b"int-bytes" @@ -210,7 +210,7 @@ def test_extract_images_from_docx_uses_internal_files_url(): dify_config.INTERNAL_FILES_URL = original_internal_files_url -def test_extract_hyperlinks(monkeypatch): +def test_extract_hyperlinks(monkeypatch: pytest.MonkeyPatch): # Mock db and storage to avoid issues during image extraction (even if no images are present) monkeypatch.setattr(we, "storage", SimpleNamespace(save=lambda k, d: None)) db_stub = SimpleNamespace(session=SimpleNamespace(add=lambda o: None, commit=lambda: None)) @@ -255,7 +255,7 @@ def test_extract_hyperlinks(monkeypatch): os.remove(tmp_path) -def test_extract_legacy_hyperlinks(monkeypatch): +def test_extract_legacy_hyperlinks(monkeypatch: pytest.MonkeyPatch): # Mock db and storage monkeypatch.setattr(we, "storage", SimpleNamespace(save=lambda k, d: None)) db_stub = SimpleNamespace(session=SimpleNamespace(add=lambda o: None, commit=lambda: None)) @@ -317,7 +317,7 @@ def test_extract_legacy_hyperlinks(monkeypatch): os.remove(tmp_path) -def test_init_rejects_invalid_url_status(monkeypatch): +def test_init_rejects_invalid_url_status(monkeypatch: pytest.MonkeyPatch): class FakeResponse: status_code = 404 content = b"" @@ -392,7 +392,7 @@ def test_close_closes_awaitable_close_result(): extractor.temp_file.close.assert_called_once() -def test_extract_images_handles_invalid_external_cases(monkeypatch): +def test_extract_images_handles_invalid_external_cases(monkeypatch: pytest.MonkeyPatch): class FakeTargetRef: def __contains__(self, item): return item == "image" @@ -437,7 +437,7 @@ def test_extract_images_handles_invalid_external_cases(monkeypatch): db_stub.session.commit.assert_called_once() -def test_table_to_markdown_and_parse_helpers(monkeypatch): +def test_table_to_markdown_and_parse_helpers(monkeypatch: pytest.MonkeyPatch): extractor = object.__new__(WordExtractor) table = SimpleNamespace( @@ -500,7 +500,7 @@ def test_table_to_markdown_and_parse_helpers(monkeypatch): assert extractor._parse_cell(cell, image_map) == "EXT-IMGINT-IMGplain" -def test_parse_docx_covers_drawing_shapes_hyperlink_error_and_table_branch(monkeypatch): +def test_parse_docx_covers_drawing_shapes_hyperlink_error_and_table_branch(monkeypatch: pytest.MonkeyPatch): extractor = object.__new__(WordExtractor) ext_image_id = "ext-image" diff --git a/api/tests/unit_tests/core/rag/extractor/unstructured/test_unstructured_extractors.py b/api/tests/unit_tests/core/rag/extractor/unstructured/test_unstructured_extractors.py index 26ce333e11..19fb385a6d 100644 --- a/api/tests/unit_tests/core/rag/extractor/unstructured/test_unstructured_extractors.py +++ b/api/tests/unit_tests/core/rag/extractor/unstructured/test_unstructured_extractors.py @@ -45,7 +45,7 @@ def _install_chunk_by_title(monkeypatch: pytest.MonkeyPatch, chunks: list[Simple class TestUnstructuredMarkdownMsgXml: - def test_markdown_extractor_without_api(self, monkeypatch): + def test_markdown_extractor_without_api(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text=" chunk-1 "), SimpleNamespace(text=" chunk-2 ")]) _register_module( monkeypatch, "unstructured.partition.md", partition_md=lambda filename: [SimpleNamespace(text="x")] @@ -55,7 +55,7 @@ class TestUnstructuredMarkdownMsgXml: assert [doc.page_content for doc in docs] == ["chunk-1", "chunk-2"] - def test_markdown_extractor_with_api(self, monkeypatch): + def test_markdown_extractor_with_api(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text=" via-api ")]) calls = {} @@ -70,7 +70,7 @@ class TestUnstructuredMarkdownMsgXml: assert docs[0].page_content == "via-api" assert calls == {"filename": "/tmp/file.md", "api_url": "https://u", "api_key": "k"} - def test_msg_extractor_local(self, monkeypatch): + def test_msg_extractor_local(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text="msg-doc")]) _register_module( monkeypatch, "unstructured.partition.msg", partition_msg=lambda filename: [SimpleNamespace(text="x")] @@ -78,7 +78,7 @@ class TestUnstructuredMarkdownMsgXml: assert UnstructuredMsgExtractor("/tmp/file.msg").extract()[0].page_content == "msg-doc" - def test_msg_extractor_with_api(self, monkeypatch): + def test_msg_extractor_with_api(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text="msg-doc")]) calls = {} @@ -94,7 +94,7 @@ class TestUnstructuredMarkdownMsgXml: ) assert calls["filename"] == "/tmp/file.msg" - def test_xml_extractor_local_and_api(self, monkeypatch): + def test_xml_extractor_local_and_api(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text="xml-doc")]) xml_calls = {} @@ -124,7 +124,7 @@ class TestUnstructuredMarkdownMsgXml: class TestUnstructuredEmailAndEpub: - def test_email_extractor_local_decodes_html_and_suppresses_decode_errors(self, monkeypatch): + def test_email_extractor_local_decodes_html_and_suppresses_decode_errors(self, monkeypatch: pytest.MonkeyPatch): _register_unstructured_packages(monkeypatch) captured = {} @@ -150,7 +150,7 @@ class TestUnstructuredEmailAndEpub: assert "Hello Email" in chunk_elements[0].text assert chunk_elements[1].text == bad_base64 - def test_email_extractor_with_api(self, monkeypatch): + def test_email_extractor_with_api(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text="api-email")]) _register_module( monkeypatch, @@ -162,7 +162,7 @@ class TestUnstructuredEmailAndEpub: assert docs[0].page_content == "api-email" - def test_epub_extractor_local_and_api(self, monkeypatch): + def test_epub_extractor_local_and_api(self, monkeypatch: pytest.MonkeyPatch): _install_chunk_by_title(monkeypatch, [SimpleNamespace(text="epub-doc")]) calls = {"download": 0, "partition": 0} @@ -198,7 +198,7 @@ class TestUnstructuredPPTAndPPTX: with pytest.raises(NotImplementedError, match="Unstructured API Url is not configured"): UnstructuredPPTExtractor("/tmp/file.ppt").extract() - def test_ppt_extractor_groups_text_by_page(self, monkeypatch): + def test_ppt_extractor_groups_text_by_page(self, monkeypatch: pytest.MonkeyPatch): _register_unstructured_packages(monkeypatch) _register_module( monkeypatch, @@ -215,7 +215,7 @@ class TestUnstructuredPPTAndPPTX: assert [doc.page_content for doc in docs] == ["A\nB", "C"] - def test_pptx_extractor_local_and_api(self, monkeypatch): + def test_pptx_extractor_local_and_api(self, monkeypatch: pytest.MonkeyPatch): _register_unstructured_packages(monkeypatch) _register_module( monkeypatch, @@ -244,7 +244,7 @@ class TestUnstructuredPPTAndPPTX: class TestUnstructuredWord: - def _install_doc_modules(self, monkeypatch, version: str, filetype_value): + def _install_doc_modules(self, monkeypatch: pytest.MonkeyPatch, version: str, filetype_value): _register_unstructured_packages(monkeypatch) class FileType: @@ -276,13 +276,13 @@ class TestUnstructuredWord: ], ) - def test_word_extractor_rejects_doc_on_old_unstructured_version(self, monkeypatch): + def test_word_extractor_rejects_doc_on_old_unstructured_version(self, monkeypatch: pytest.MonkeyPatch): self._install_doc_modules(monkeypatch, version="0.4.10", filetype_value="doc") with pytest.raises(ValueError, match="Partitioning .doc files is only supported"): UnstructuredWordExtractor("/tmp/file.doc", "https://u", "k").extract() - def test_word_extractor_doc_and_docx_paths(self, monkeypatch): + def test_word_extractor_doc_and_docx_paths(self, monkeypatch: pytest.MonkeyPatch): self._install_doc_modules(monkeypatch, version="0.4.11", filetype_value="doc") docs = UnstructuredWordExtractor("/tmp/file.doc", "https://u", "k").extract() @@ -292,7 +292,7 @@ class TestUnstructuredWord: docs = UnstructuredWordExtractor("/tmp/file.docx", "https://u", "k").extract() assert [doc.page_content for doc in docs] == ["chunk-1", "chunk-2"] - def test_word_extractor_magic_import_error_fallback_to_extension(self, monkeypatch): + def test_word_extractor_magic_import_error_fallback_to_extension(self, monkeypatch: pytest.MonkeyPatch): self._install_doc_modules(monkeypatch, version="0.4.10", filetype_value="not-used") monkeypatch.setitem(sys.modules, "magic", None) diff --git a/api/tests/unit_tests/core/rag/extractor/watercrawl/test_watercrawl.py b/api/tests/unit_tests/core/rag/extractor/watercrawl/test_watercrawl.py index d758be218a..95878fc688 100644 --- a/api/tests/unit_tests/core/rag/extractor/watercrawl/test_watercrawl.py +++ b/api/tests/unit_tests/core/rag/extractor/watercrawl/test_watercrawl.py @@ -59,7 +59,7 @@ class TestWaterCrawlExceptions: class TestBaseAPIClient: - def test_init_session_builds_expected_headers(self, monkeypatch): + def test_init_session_builds_expected_headers(self, monkeypatch: pytest.MonkeyPatch): captured = {} def fake_client(**kwargs): @@ -74,7 +74,7 @@ class TestBaseAPIClient: assert captured["headers"]["X-API-Key"] == "k" assert captured["headers"]["User-Agent"] == "WaterCrawl-Plugin" - def test_request_stream_and_non_stream_paths(self, monkeypatch): + def test_request_stream_and_non_stream_paths(self, monkeypatch: pytest.MonkeyPatch): class FakeSession: def __init__(self): self.request_calls = [] @@ -106,7 +106,7 @@ class TestBaseAPIClient: assert fake_session.build_calls assert fake_session.send_calls[0][1] is True - def test_http_method_helpers_delegate_to_request(self, monkeypatch): + def test_http_method_helpers_delegate_to_request(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(BaseAPIClient, "init_session", lambda self: MagicMock()) client = BaseAPIClient(api_key="k", base_url="https://watercrawl.dev") @@ -127,7 +127,7 @@ class TestBaseAPIClient: class TestWaterCrawlAPIClient: - def test_process_eventstream_and_download(self, monkeypatch): + def test_process_eventstream_and_download(self, monkeypatch: pytest.MonkeyPatch): client = WaterCrawlAPIClient(api_key="k") response = MagicMock() @@ -174,7 +174,7 @@ class TestWaterCrawlAPIClient: client.process_response(_response(200, content_type="application/octet-stream", content=b"bin")) == b"bin" ) - def test_process_response_event_stream_returns_generator(self, monkeypatch): + def test_process_response_event_stream_returns_generator(self, monkeypatch: pytest.MonkeyPatch): client = WaterCrawlAPIClient(api_key="k") generator = (item for item in [{"type": "result", "data": {}}]) monkeypatch.setattr(client, "process_eventstream", lambda response, download=False: generator) @@ -193,7 +193,7 @@ class TestWaterCrawlAPIClient: with pytest.raises(RuntimeError, match="http error"): client.process_response(response) - def test_endpoint_wrappers(self, monkeypatch): + def test_endpoint_wrappers(self, monkeypatch: pytest.MonkeyPatch): client = WaterCrawlAPIClient(api_key="k") monkeypatch.setattr(client, "process_response", lambda resp: "processed") @@ -208,7 +208,7 @@ class TestWaterCrawlAPIClient: assert client.download_crawl_request("id") == "processed" assert client.get_crawl_request_results("id") == "processed" - def test_monitor_crawl_request_generator_and_validation(self, monkeypatch): + def test_monitor_crawl_request_generator_and_validation(self, monkeypatch: pytest.MonkeyPatch): client = WaterCrawlAPIClient(api_key="k") monkeypatch.setattr(client, "process_response", lambda _: (x for x in [{"type": "result", "data": 1}])) @@ -221,7 +221,7 @@ class TestWaterCrawlAPIClient: with pytest.raises(ValueError, match="Generator expected"): list(client.monitor_crawl_request("job-1")) - def test_scrape_url_sync_and_async(self, monkeypatch): + def test_scrape_url_sync_and_async(self, monkeypatch: pytest.MonkeyPatch): client = WaterCrawlAPIClient(api_key="k") monkeypatch.setattr(client, "create_crawl_request", lambda **kwargs: {"uuid": "job-1"}) @@ -238,7 +238,7 @@ class TestWaterCrawlAPIClient: sync_result = client.scrape_url("https://example.com", sync=True) assert sync_result == {"url": "https://example.com"} - def test_download_result_fetches_json_and_closes(self, monkeypatch): + def test_download_result_fetches_json_and_closes(self, monkeypatch: pytest.MonkeyPatch): client = WaterCrawlAPIClient(api_key="k") response = _response(200, {"markdown": "body"}) @@ -251,7 +251,7 @@ class TestWaterCrawlAPIClient: class TestWaterCrawlProvider: - def test_crawl_url_builds_options_and_min_wait_time(self, monkeypatch): + def test_crawl_url_builds_options_and_min_wait_time(self, monkeypatch: pytest.MonkeyPatch): provider = WaterCrawlProvider(api_key="k") captured_kwargs = {} @@ -290,7 +290,7 @@ class TestWaterCrawlProvider: assert captured_kwargs["page_options"]["only_main_content"] is False assert captured_kwargs["page_options"]["wait_time"] == 1000 - def test_get_crawl_status_active_and_completed(self, monkeypatch): + def test_get_crawl_status_active_and_completed(self, monkeypatch: pytest.MonkeyPatch): provider = WaterCrawlProvider(api_key="k") monkeypatch.setattr( @@ -327,7 +327,7 @@ class TestWaterCrawlProvider: assert completed["status"] == "completed" assert completed["data"] == [{"url": "u"}] - def test_get_crawl_url_data_and_scrape(self, monkeypatch): + def test_get_crawl_url_data_and_scrape(self, monkeypatch: pytest.MonkeyPatch): provider = WaterCrawlProvider(api_key="k") monkeypatch.setattr(provider, "scrape_url", lambda url: {"source_url": url}) @@ -339,7 +339,7 @@ class TestWaterCrawlProvider: monkeypatch.setattr(provider, "_get_results", lambda job_id, query_params=None: iter([])) assert provider.get_crawl_url_data("job", "u1") is None - def test_structure_data_validation_and_get_results_pagination(self, monkeypatch): + def test_structure_data_validation_and_get_results_pagination(self, monkeypatch: pytest.MonkeyPatch): provider = WaterCrawlProvider(api_key="k") with pytest.raises(ValueError, match="Invalid result object"): @@ -380,7 +380,7 @@ class TestWaterCrawlProvider: assert len(results) == 1 assert results[0]["source_url"] == "https://a" - def test_scrape_url_uses_client_and_structure(self, monkeypatch): + def test_scrape_url_uses_client_and_structure(self, monkeypatch: pytest.MonkeyPatch): provider = WaterCrawlProvider(api_key="k") monkeypatch.setattr( provider.client, "scrape_url", lambda **kwargs: {"result": {"metadata": {}, "markdown": "m"}, "url": "u"} @@ -392,7 +392,7 @@ class TestWaterCrawlProvider: class TestWaterCrawlWebExtractor: - def test_extract_crawl_and_scrape_modes(self, monkeypatch): + def test_extract_crawl_and_scrape_modes(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.rag.extractor.watercrawl.extractor.WebsiteService.get_crawl_url_data", lambda job_id, provider, url, tenant_id: { @@ -418,7 +418,7 @@ class TestWaterCrawlWebExtractor: assert crawl_extractor.extract()[0].page_content == "crawl" assert scrape_extractor.extract()[0].page_content == "scrape" - def test_extract_crawl_returns_empty_when_service_returns_none(self, monkeypatch): + def test_extract_crawl_returns_empty_when_service_returns_none(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( "core.rag.extractor.watercrawl.extractor.WebsiteService.get_crawl_url_data", lambda job_id, provider, url, tenant_id: None, diff --git a/api/tests/unit_tests/core/telemetry/test_facade.py b/api/tests/unit_tests/core/telemetry/test_facade.py index 36e8e1bbb1..95d653f55b 100644 --- a/api/tests/unit_tests/core/telemetry/test_facade.py +++ b/api/tests/unit_tests/core/telemetry/test_facade.py @@ -14,7 +14,7 @@ from core.telemetry.events import TelemetryContext, TelemetryEvent @pytest.fixture -def telemetry_test_setup(monkeypatch): +def telemetry_test_setup(monkeypatch: pytest.MonkeyPatch): module_name = "core.ops.ops_trace_manager" ops_stub = types.ModuleType(module_name) diff --git a/api/tests/unit_tests/core/tools/test_builtin_tool_provider.py b/api/tests/unit_tests/core/tools/test_builtin_tool_provider.py index ad6d5906ae..b21a5c3e24 100644 --- a/api/tests/unit_tests/core/tools/test_builtin_tool_provider.py +++ b/api/tests/unit_tests/core/tools/test_builtin_tool_provider.py @@ -78,7 +78,7 @@ def _tool_yaml() -> dict[str, Any]: } -def test_builtin_tool_provider_init_load_tools_and_basic_accessors(monkeypatch): +def test_builtin_tool_provider_init_load_tools_and_basic_accessors(monkeypatch: pytest.MonkeyPatch): yaml_payloads = [_provider_yaml(), _tool_yaml()] def _load_yaml(*args, **kwargs): diff --git a/api/tests/unit_tests/core/tools/test_builtin_tools_extra.py b/api/tests/unit_tests/core/tools/test_builtin_tools_extra.py index c7829fc0d7..3f6b1ec154 100644 --- a/api/tests/unit_tests/core/tools/test_builtin_tools_extra.py +++ b/api/tests/unit_tests/core/tools/test_builtin_tools_extra.py @@ -115,7 +115,7 @@ def test_weekday_tool(): list(weekday_tool.invoke(user_id="u", tool_parameters={"year": 2024, "day": 1})) -def test_simple_code_valid_execution(monkeypatch): +def test_simple_code_valid_execution(monkeypatch: pytest.MonkeyPatch): simple_code = _build_builtin_tool(SimpleCode) monkeypatch.setattr( @@ -138,7 +138,7 @@ def test_simple_code_invalid_language(): list(simple_code.invoke(user_id="u", tool_parameters={"language": "go", "code": "fmt.Println(1)"})) -def test_simple_code_execution_error(monkeypatch): +def test_simple_code_execution_error(monkeypatch: pytest.MonkeyPatch): simple_code = _build_builtin_tool(SimpleCode) monkeypatch.setattr( @@ -155,14 +155,14 @@ def test_webscraper_empty_url(): assert empty == "Please input url" -def test_webscraper_fetch(monkeypatch): +def test_webscraper_fetch(monkeypatch: pytest.MonkeyPatch): webscraper = _build_builtin_tool(WebscraperTool) monkeypatch.setattr("core.tools.builtin_tool.providers.webscraper.tools.webscraper.get_url", lambda *a, **k: "page") full = list(webscraper.invoke(user_id="u", tool_parameters={"url": "https://example.com"}))[0].message.text assert full == "page" -def test_webscraper_summary(monkeypatch): +def test_webscraper_summary(monkeypatch: pytest.MonkeyPatch): webscraper = _build_builtin_tool(WebscraperTool) monkeypatch.setattr("core.tools.builtin_tool.providers.webscraper.tools.webscraper.get_url", lambda *a, **k: "page") monkeypatch.setattr(webscraper, "summary", lambda user_id, content: "summary") @@ -175,7 +175,7 @@ def test_webscraper_summary(monkeypatch): assert summarized == "summary" -def test_webscraper_fetch_error(monkeypatch): +def test_webscraper_fetch_error(monkeypatch: pytest.MonkeyPatch): webscraper = _build_builtin_tool(WebscraperTool) monkeypatch.setattr( "core.tools.builtin_tool.providers.webscraper.tools.webscraper.get_url", @@ -192,7 +192,7 @@ def test_asr_invalid_file(): assert "not a valid audio file" in invalid_file -def test_asr_valid_file_invocation(monkeypatch): +def test_asr_valid_file_invocation(monkeypatch: pytest.MonkeyPatch): asr = _build_builtin_tool(ASRTool) model_instance = type("M", (), {"invoke_speech2text": lambda self, file: "transcript"})() model_manager = type("Mgr", (), {"get_model_instance": lambda *a, **k: model_instance})() @@ -209,7 +209,7 @@ def test_asr_valid_file_invocation(monkeypatch): assert captured_manager_kwargs == {"tenant_id": "tenant-1", "user_id": "u"} -def test_asr_available_models_and_runtime_parameters(monkeypatch): +def test_asr_available_models_and_runtime_parameters(monkeypatch: pytest.MonkeyPatch): asr = _build_builtin_tool(ASRTool) provider_model = type("PM", (), {"provider": "p", "models": [type("Model", (), {"model": "m"})()]})() monkeypatch.setattr( @@ -220,7 +220,7 @@ def test_asr_available_models_and_runtime_parameters(monkeypatch): assert asr.get_runtime_parameters()[0].name == "model" -def test_tts_invoke_returns_messages(monkeypatch): +def test_tts_invoke_returns_messages(monkeypatch: pytest.MonkeyPatch): tts = _build_builtin_tool(TTSTool) captured_manager_kwargs = {} voices_model_instance = type( @@ -280,7 +280,7 @@ def test_tts_tool_raises_when_voice_unavailable(monkeypatch, voices): list(tts.invoke(user_id="u", tool_parameters={"model": "p#m", "text": "hello"})) -def test_tts_tool_get_available_models_and_runtime_parameters(monkeypatch): +def test_tts_tool_get_available_models_and_runtime_parameters(monkeypatch: pytest.MonkeyPatch): tts = _build_builtin_tool(TTSTool) model_1 = SimpleNamespace( @@ -307,7 +307,7 @@ def test_tts_tool_get_available_models_and_runtime_parameters(monkeypatch): assert runtime_parameters[1].name == "voice#provider-a#model-a" -def test_provider_classes_and_builtin_sort(monkeypatch): +def test_provider_classes_and_builtin_sort(monkeypatch: pytest.MonkeyPatch): # Use object.__new__ to avoid YAML-loading __init__; only pass-through validation is exercised. # Ensure pass-through _validate_credentials methods are executed. AudioToolProvider._validate_credentials(object.__new__(AudioToolProvider), "u", {}) diff --git a/api/tests/unit_tests/core/tools/test_custom_tool.py b/api/tests/unit_tests/core/tools/test_custom_tool.py index f35546b025..f525baeaf2 100644 --- a/api/tests/unit_tests/core/tools/test_custom_tool.py +++ b/api/tests/unit_tests/core/tools/test_custom_tool.py @@ -47,7 +47,7 @@ def test_parsed_response_to_string(): assert ParsedResponse("ok", False).to_string() == "ok" -def test_api_tool_fork_runtime_and_validate_credentials(monkeypatch): +def test_api_tool_fork_runtime_and_validate_credentials(monkeypatch: pytest.MonkeyPatch): tool = _build_tool() forked = tool.fork_tool_runtime(ToolRuntime(tenant_id="tenant-2")) assert isinstance(forked, ApiTool) @@ -184,7 +184,7 @@ def test_get_parameter_value_and_type_conversion_helpers(): assert tool._convert_body_property_type({"anyOf": [{"type": "integer"}]}, "2") == 2 -def test_do_http_request_builds_arguments_and_handles_invalid_method(monkeypatch): +def test_do_http_request_builds_arguments_and_handles_invalid_method(monkeypatch: pytest.MonkeyPatch): openapi = { "parameters": [ {"name": "id", "in": "path", "required": True, "schema": {"type": "string"}}, @@ -236,7 +236,7 @@ def test_do_http_request_builds_arguments_and_handles_invalid_method(monkeypatch invalid_method_tool.do_http_request("https://api.example.com", "TRACE", headers={}, parameters={}) -def test_do_http_request_handles_file_upload_and_invoke_paths(monkeypatch): +def test_do_http_request_handles_file_upload_and_invoke_paths(monkeypatch: pytest.MonkeyPatch): openapi = { "parameters": [], "requestBody": { diff --git a/api/tests/unit_tests/core/tools/test_tool_manager.py b/api/tests/unit_tests/core/tools/test_tool_manager.py index c9b3dfb186..7c7d6eec2d 100644 --- a/api/tests/unit_tests/core/tools/test_tool_manager.py +++ b/api/tests/unit_tests/core/tools/test_tool_manager.py @@ -648,7 +648,7 @@ def test_list_default_builtin_providers_for_postgres_and_mysql(): assert providers == provider_records -def test_list_providers_from_api_covers_builtin_api_workflow_and_mcp(monkeypatch): +def test_list_providers_from_api_covers_builtin_api_workflow_and_mcp(monkeypatch: pytest.MonkeyPatch): hardcoded_controller = SimpleNamespace(entity=SimpleNamespace(identity=SimpleNamespace(name="hardcoded"))) plugin_controller = object.__new__(PluginToolProviderController) plugin_controller.entity = SimpleNamespace(identity=SimpleNamespace(name="plugin-provider")) diff --git a/api/tests/unit_tests/core/tools/utils/test_configuration.py b/api/tests/unit_tests/core/tools/utils/test_configuration.py index ae5638784c..9e179536de 100644 --- a/api/tests/unit_tests/core/tools/utils/test_configuration.py +++ b/api/tests/unit_tests/core/tools/utils/test_configuration.py @@ -4,6 +4,8 @@ from collections.abc import Generator from typing import Any from unittest.mock import patch +import pytest + from core.app.entities.app_invoke_entities import InvokeFrom from core.helper.tool_parameter_cache import ToolParameterCache from core.tools.__base.tool import Tool @@ -110,7 +112,7 @@ def test_encrypt_tool_parameters(): assert encrypted["plain"] == "x" -def test_decrypt_tool_parameters_cache_hit_and_miss(monkeypatch): +def test_decrypt_tool_parameters_cache_hit_and_miss(monkeypatch: pytest.MonkeyPatch): manager = _build_manager() with ( @@ -139,7 +141,7 @@ def test_delete_tool_parameters_cache(): mock_delete.assert_called_once() -def test_configuration_manager_decrypt_suppresses_errors(monkeypatch): +def test_configuration_manager_decrypt_suppresses_errors(monkeypatch: pytest.MonkeyPatch): manager = _build_manager() with ( patch.object(ToolParameterCache, "get", return_value=None), diff --git a/api/tests/unit_tests/core/tools/utils/test_message_transformer.py b/api/tests/unit_tests/core/tools/utils/test_message_transformer.py index 5f34135af4..354b395504 100644 --- a/api/tests/unit_tests/core/tools/utils/test_message_transformer.py +++ b/api/tests/unit_tests/core/tools/utils/test_message_transformer.py @@ -42,7 +42,7 @@ class _FakeToolFileManager: @pytest.fixture(autouse=True) -def _patch_tool_file_manager(monkeypatch): +def _patch_tool_file_manager(monkeypatch: pytest.MonkeyPatch): # Patch the manager used inside the transformer module monkeypatch.setattr(mt, "ToolFileManager", _FakeToolFileManager) # also ensure predictable URL generation (no need to patch; uses id and extension only) diff --git a/api/tests/unit_tests/core/tools/utils/test_system_oauth_encryption.py b/api/tests/unit_tests/core/tools/utils/test_system_oauth_encryption.py index 6bb86ebe78..081b189745 100644 --- a/api/tests/unit_tests/core/tools/utils/test_system_oauth_encryption.py +++ b/api/tests/unit_tests/core/tools/utils/test_system_oauth_encryption.py @@ -34,7 +34,7 @@ def test_system_encrypter_raises_error_for_invalid_ciphertext(): encrypter.decrypt_params("not-base64") -def test_system_helpers_use_global_cached_instance(monkeypatch): +def test_system_helpers_use_global_cached_instance(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(encryption, "_encrypter", None) monkeypatch.setattr("core.tools.utils.system_encryption.dify_config.SECRET_KEY", "global-secret") diff --git a/api/tests/unit_tests/core/variables/test_segment_type.py b/api/tests/unit_tests/core/variables/test_segment_type.py index d4e862220a..baa2ac2dc7 100644 --- a/api/tests/unit_tests/core/variables/test_segment_type.py +++ b/api/tests/unit_tests/core/variables/test_segment_type.py @@ -233,7 +233,7 @@ class TestSegmentTypeAdditionalMethods: assert SegmentType.GROUP.is_valid([StringSegment(value="b")]) is True assert SegmentType.GROUP.is_valid(["not-segment"]) is False - def test_unreachable_assertion_branch(self, monkeypatch): + def test_unreachable_assertion_branch(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(SegmentType, "is_array_type", lambda self: False) with pytest.raises(AssertionError, match="unreachable"): diff --git a/api/tests/unit_tests/core/workflow/nodes/datasource/test_datasource_node.py b/api/tests/unit_tests/core/workflow/nodes/datasource/test_datasource_node.py index d7ef781732..a18a36a099 100644 --- a/api/tests/unit_tests/core/workflow/nodes/datasource/test_datasource_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/datasource/test_datasource_node.py @@ -1,3 +1,5 @@ +from pytest_mock import MockerFixture + from core.app.entities.app_invoke_entities import DIFY_RUN_CONTEXT_KEY from core.workflow.nodes.datasource.datasource_node import DatasourceNode from core.workflow.nodes.datasource.entities import DatasourceNodeData @@ -44,7 +46,7 @@ class _GraphParams: call_depth = 0 -def test_datasource_node_delegates_to_manager_stream(mocker): +def test_datasource_node_delegates_to_manager_stream(mocker: MockerFixture): # prepare sys variables sys_vars = { "sys": { diff --git a/api/tests/unit_tests/core/workflow/nodes/knowledge_index/test_knowledge_index_node.py b/api/tests/unit_tests/core/workflow/nodes/knowledge_index/test_knowledge_index_node.py index f254fc3d09..89433b34e6 100644 --- a/api/tests/unit_tests/core/workflow/nodes/knowledge_index/test_knowledge_index_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/knowledge_index/test_knowledge_index_node.py @@ -3,6 +3,7 @@ import uuid from unittest.mock import Mock import pytest +from pytest_mock import MockerFixture from core.app.entities.app_invoke_entities import InvokeFrom, UserFrom from core.rag.index_processor.constant.index_type import IndexTechniqueType @@ -50,7 +51,7 @@ def mock_graph_runtime_state(): @pytest.fixture -def mock_index_processor(mocker): +def mock_index_processor(mocker: MockerFixture): """Create mock IndexProcessorProtocol.""" mock_processor = Mock(spec=IndexProcessorProtocol) mocker.patch( @@ -61,7 +62,7 @@ def mock_index_processor(mocker): @pytest.fixture -def mock_summary_index_service(mocker): +def mock_summary_index_service(mocker: MockerFixture): """Create mock SummaryIndexServiceProtocol.""" mock_service = Mock(spec=SummaryIndexServiceProtocol) mocker.patch( diff --git a/api/tests/unit_tests/core/workflow/nodes/knowledge_retrieval/test_knowledge_retrieval_node.py b/api/tests/unit_tests/core/workflow/nodes/knowledge_retrieval/test_knowledge_retrieval_node.py index e923ee761b..d77a2ce363 100644 --- a/api/tests/unit_tests/core/workflow/nodes/knowledge_retrieval/test_knowledge_retrieval_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/knowledge_retrieval/test_knowledge_retrieval_node.py @@ -3,6 +3,7 @@ import uuid from unittest.mock import Mock import pytest +from pytest_mock import MockerFixture from core.app.entities.app_invoke_entities import InvokeFrom, UserFrom from core.workflow.nodes.knowledge_retrieval.entities import ( @@ -56,7 +57,7 @@ def mock_graph_runtime_state(): @pytest.fixture -def mock_rag_retrieval(mocker): +def mock_rag_retrieval(mocker: MockerFixture): """Create mock RAGRetrievalProtocol.""" mock_retrieval = Mock(spec=RAGRetrievalProtocol) mock_retrieval.knowledge_retrieval.return_value = [] diff --git a/api/tests/unit_tests/core/workflow/nodes/llm/test_node.py b/api/tests/unit_tests/core/workflow/nodes/llm/test_node.py index c707cf28cd..c09f2d3fb6 100644 --- a/api/tests/unit_tests/core/workflow/nodes/llm/test_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/llm/test_node.py @@ -222,7 +222,7 @@ def llm_node( @pytest.fixture -def model_config(monkeypatch): +def model_config(monkeypatch: pytest.MonkeyPatch): from tests.integration_tests.model_runtime.__mock.plugin_model import MockModelClass def mock_model_providers(_self): @@ -1276,7 +1276,7 @@ class TestSaveMultimodalOutputAndConvertResultToMarkdown: mock_file_saver.save_binary_string.assert_not_called() mock_file_saver.save_remote_url.assert_not_called() - def test_image_content_with_inline_data(self, llm_node_for_multimodal, monkeypatch): + def test_image_content_with_inline_data(self, llm_node_for_multimodal, monkeypatch: pytest.MonkeyPatch): llm_node, mock_file_saver = llm_node_for_multimodal image_raw_data = b"PNG_DATA" diff --git a/api/tests/unit_tests/core/workflow/test_node_factory.py b/api/tests/unit_tests/core/workflow/test_node_factory.py index 1821f72e0c..e93a7c7ccd 100644 --- a/api/tests/unit_tests/core/workflow/test_node_factory.py +++ b/api/tests/unit_tests/core/workflow/test_node_factory.py @@ -88,7 +88,7 @@ class TestFetchMemory: assert result is None - def test_returns_none_when_conversation_does_not_exist(self, monkeypatch): + def test_returns_none_when_conversation_does_not_exist(self, monkeypatch: pytest.MonkeyPatch): class FakeSelect: def where(self, *_args): return self @@ -119,7 +119,7 @@ class TestFetchMemory: assert result is None - def test_builds_token_buffer_memory_for_existing_conversation(self, monkeypatch): + def test_builds_token_buffer_memory_for_existing_conversation(self, monkeypatch: pytest.MonkeyPatch): conversation = sentinel.conversation memory = sentinel.memory @@ -189,7 +189,7 @@ class TestDifyGraphInitContext: class TestDefaultWorkflowCodeExecutor: - def test_execute_delegates_to_code_executor(self, monkeypatch): + def test_execute_delegates_to_code_executor(self, monkeypatch: pytest.MonkeyPatch): executor = node_factory.DefaultWorkflowCodeExecutor() execute_workflow_code_template = MagicMock(return_value={"answer": "ok"}) monkeypatch.setattr( @@ -219,7 +219,7 @@ class TestDefaultWorkflowCodeExecutor: class TestCodeExecutorJinja2TemplateRenderer: - def test_render_template_delegates_to_code_executor(self, monkeypatch): + def test_render_template_delegates_to_code_executor(self, monkeypatch: pytest.MonkeyPatch): renderer = workflow_template_rendering.CodeExecutorJinja2TemplateRenderer() execute_workflow_code_template = MagicMock(return_value={"result": "Hello workflow"}) monkeypatch.setattr( @@ -237,7 +237,7 @@ class TestCodeExecutorJinja2TemplateRenderer: inputs={"name": "workflow"}, ) - def test_render_template_wraps_code_execution_errors(self, monkeypatch): + def test_render_template_wraps_code_execution_errors(self, monkeypatch: pytest.MonkeyPatch): renderer = workflow_template_rendering.CodeExecutorJinja2TemplateRenderer() monkeypatch.setattr( workflow_template_rendering.CodeExecutor, @@ -434,7 +434,7 @@ class TestDifyNodeFactoryCreateNode: with pytest.raises(ValueError, match="No class mapping found for node type: missing"): factory.create_node({"id": "node-id", "data": {"type": "missing"}}) - def test_rejects_missing_class_mapping(self, monkeypatch, factory): + def test_rejects_missing_class_mapping(self, monkeypatch: pytest.MonkeyPatch, factory): monkeypatch.setattr( factory, "_resolve_node_class", @@ -444,7 +444,7 @@ class TestDifyNodeFactoryCreateNode: with pytest.raises(ValueError, match="No class mapping found for node type: start"): factory.create_node({"id": "node-id", "data": {"type": BuiltinNodeTypes.START}}) - def test_rejects_missing_latest_class(self, monkeypatch, factory): + def test_rejects_missing_latest_class(self, monkeypatch: pytest.MonkeyPatch, factory): monkeypatch.setattr( factory, "_resolve_node_class", @@ -454,7 +454,7 @@ class TestDifyNodeFactoryCreateNode: with pytest.raises(ValueError, match="No latest version class found for node type: start"): factory.create_node({"id": "node-id", "data": {"type": BuiltinNodeTypes.START}}) - def test_uses_version_specific_class_when_available(self, monkeypatch, factory): + def test_uses_version_specific_class_when_available(self, monkeypatch: pytest.MonkeyPatch, factory): matched_node = sentinel.matched_node latest_node_class = _node_constructor(return_value=sentinel.latest_node) matched_node_class = _node_constructor(return_value=matched_node) @@ -475,7 +475,9 @@ class TestDifyNodeFactoryCreateNode: assert kwargs["graph_runtime_state"] is factory.graph_runtime_state latest_node_class.assert_not_called() - def test_falls_back_to_latest_class_when_version_specific_mapping_is_missing(self, monkeypatch, factory): + def test_falls_back_to_latest_class_when_version_specific_mapping_is_missing( + self, monkeypatch: pytest.MonkeyPatch, factory + ): latest_node = sentinel.latest_node latest_node_class = _node_constructor(return_value=latest_node) monkeypatch.setattr( @@ -507,7 +509,7 @@ class TestDifyNodeFactoryCreateNode: (BuiltinNodeTypes.DOCUMENT_EXTRACTOR, "DocumentExtractorNode"), ], ) - def test_creates_specialized_nodes(self, monkeypatch, factory, node_type, constructor_name): + def test_creates_specialized_nodes(self, monkeypatch: pytest.MonkeyPatch, factory, node_type, constructor_name): created_node = object() constructor = _node_constructor(return_value=created_node) constructor._mock_name = constructor_name @@ -597,7 +599,9 @@ class TestDifyNodeFactoryCreateNode: prepared_llm.assert_called_once_with(sentinel.model_instance) assert kwargs["model_instance"] is wrapped_model_instance - def test_create_node_passes_alias_preserving_llm_config_to_constructor(self, monkeypatch, factory): + def test_create_node_passes_alias_preserving_llm_config_to_constructor( + self, monkeypatch: pytest.MonkeyPatch, factory + ): created_node = object() constructor = _node_constructor(return_value=created_node) monkeypatch.setattr(factory, "_resolve_node_class", MagicMock(return_value=constructor)) @@ -665,7 +669,7 @@ class TestDifyNodeFactoryCreateNode: ) def test_creates_model_backed_nodes( self, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, factory, node_type, constructor_name, @@ -726,7 +730,7 @@ class TestDifyNodeFactoryModelInstance: factory._llm_model_factory = sentinel.model_factory return factory - def test_delegates_to_fetch_model_config(self, monkeypatch, factory): + def test_delegates_to_fetch_model_config(self, monkeypatch: pytest.MonkeyPatch, factory): node_data_model = SimpleNamespace( provider="provider", name="model", @@ -755,7 +759,7 @@ class TestDifyNodeFactoryModelInstance: model_factory=sentinel.model_factory, ) - def test_propagates_fetch_model_config_errors(self, monkeypatch, factory): + def test_propagates_fetch_model_config_errors(self, monkeypatch: pytest.MonkeyPatch, factory): fetch_model_config = MagicMock(side_effect=ValueError("broken model config")) monkeypatch.setattr(node_factory, "fetch_model_config", fetch_model_config) @@ -780,7 +784,7 @@ class TestDifyNodeFactoryMemory: assert result is None factory.graph_runtime_state.variable_pool.get.assert_not_called() - def test_uses_string_segment_conversation_id(self, monkeypatch, factory): + def test_uses_string_segment_conversation_id(self, monkeypatch: pytest.MonkeyPatch, factory): memory_config = sentinel.memory_config factory.graph_runtime_state.variable_pool.get.return_value = StringSegment(value="conversation-id") fetch_memory = MagicMock(return_value=sentinel.memory) @@ -800,7 +804,7 @@ class TestDifyNodeFactoryMemory: model_instance=sentinel.model_instance, ) - def test_ignores_non_string_segment_conversation_ids(self, monkeypatch, factory): + def test_ignores_non_string_segment_conversation_ids(self, monkeypatch: pytest.MonkeyPatch, factory): memory_config = sentinel.memory_config factory.graph_runtime_state.variable_pool.get.return_value = sentinel.segment fetch_memory = MagicMock(return_value=sentinel.memory) diff --git a/api/tests/unit_tests/core/workflow/test_workflow_entry.py b/api/tests/unit_tests/core/workflow/test_workflow_entry.py index 041c5cc612..2e9e3468fd 100644 --- a/api/tests/unit_tests/core/workflow/test_workflow_entry.py +++ b/api/tests/unit_tests/core/workflow/test_workflow_entry.py @@ -19,7 +19,7 @@ from graphon.variables.variables import StringVariable @pytest.fixture(autouse=True) -def _mock_ssrf_head(monkeypatch): +def _mock_ssrf_head(monkeypatch: pytest.MonkeyPatch): """Avoid any real network requests during tests. factories.file_factory.remote.get_remote_file_info() uses ssrf_proxy.head diff --git a/api/tests/unit_tests/core/workflow/test_workflow_entry_helpers.py b/api/tests/unit_tests/core/workflow/test_workflow_entry_helpers.py index 270d0bf90d..3978cbb1a0 100644 --- a/api/tests/unit_tests/core/workflow/test_workflow_entry_helpers.py +++ b/api/tests/unit_tests/core/workflow/test_workflow_entry_helpers.py @@ -603,7 +603,7 @@ class TestWorkflowEntryHelpers: user_inputs={}, ) - def test_run_free_node_rejects_missing_node_class(self, monkeypatch): + def test_run_free_node_rejects_missing_node_class(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr( workflow_entry, "resolve_workflow_node_class", @@ -619,7 +619,9 @@ class TestWorkflowEntryHelpers: user_inputs={}, ) - def test_run_free_node_uses_empty_mapping_when_selector_extraction_is_not_implemented(self, monkeypatch): + def test_run_free_node_uses_empty_mapping_when_selector_extraction_is_not_implemented( + self, monkeypatch: pytest.MonkeyPatch + ): class FakeNodeClass: @staticmethod def extract_variable_selector_to_variable_mapping(**_kwargs): @@ -707,7 +709,7 @@ class TestWorkflowEntryHelpers: tenant_id="tenant-id", ) - def test_run_free_node_wraps_execution_failures(self, monkeypatch): + def test_run_free_node_wraps_execution_failures(self, monkeypatch: pytest.MonkeyPatch): class FakeNodeClass: @staticmethod def extract_variable_selector_to_variable_mapping(**_kwargs): diff --git a/api/tests/unit_tests/extensions/test_ext_request_logging.py b/api/tests/unit_tests/extensions/test_ext_request_logging.py index dcb457c806..03479686bb 100644 --- a/api/tests/unit_tests/extensions/test_ext_request_logging.py +++ b/api/tests/unit_tests/extensions/test_ext_request_logging.py @@ -71,7 +71,7 @@ def enable_request_logging(monkeypatch: pytest.MonkeyPatch): class TestRequestLoggingExtension: def test_receiver_should_not_be_invoked_if_configuration_is_disabled( self, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, mock_request_receiver, mock_response_receiver, ): @@ -266,7 +266,9 @@ class TestResponseUnmodified: class TestRequestFinishedInfoAccessLine: - def test_info_access_log_includes_method_path_status_duration_trace_id(self, monkeypatch, caplog): + def test_info_access_log_includes_method_path_status_duration_trace_id( + self, monkeypatch: pytest.MonkeyPatch, caplog + ): """Ensure INFO access line contains expected fields with computed duration and trace id.""" app = _get_test_app() # Push a real request context so flask.request and g are available @@ -299,7 +301,7 @@ class TestRequestFinishedInfoAccessLine: assert "123.456" in msg # rounded to 3 decimals assert "trace-xyz" in msg - def test_info_access_log_uses_dash_without_start_timestamp(self, monkeypatch, caplog): + def test_info_access_log_uses_dash_without_start_timestamp(self, monkeypatch: pytest.MonkeyPatch, caplog): app = _get_test_app() with app.test_request_context("/bar", method="POST"): # No g.__request_started_ts set -> duration should be '-' diff --git a/api/tests/unit_tests/extensions/test_pubsub_channel.py b/api/tests/unit_tests/extensions/test_pubsub_channel.py index 926c406ad4..24bbf55cb3 100644 --- a/api/tests/unit_tests/extensions/test_pubsub_channel.py +++ b/api/tests/unit_tests/extensions/test_pubsub_channel.py @@ -1,10 +1,12 @@ +import pytest + from configs import dify_config from extensions import ext_redis from libs.broadcast_channel.redis.channel import BroadcastChannel as RedisBroadcastChannel from libs.broadcast_channel.redis.sharded_channel import ShardedRedisBroadcastChannel -def test_get_pubsub_broadcast_channel_defaults_to_pubsub(monkeypatch): +def test_get_pubsub_broadcast_channel_defaults_to_pubsub(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "pubsub") monkeypatch.setattr(ext_redis, "_pubsub_redis_client", object()) @@ -13,7 +15,7 @@ def test_get_pubsub_broadcast_channel_defaults_to_pubsub(monkeypatch): assert isinstance(channel, RedisBroadcastChannel) -def test_get_pubsub_broadcast_channel_sharded(monkeypatch): +def test_get_pubsub_broadcast_channel_sharded(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "sharded") monkeypatch.setattr(ext_redis, "_pubsub_redis_client", object()) diff --git a/api/tests/unit_tests/libs/broadcast_channel/redis/test_channel_unit_tests.py b/api/tests/unit_tests/libs/broadcast_channel/redis/test_channel_unit_tests.py index 8bef01c1ed..7c7f20374e 100644 --- a/api/tests/unit_tests/libs/broadcast_channel/redis/test_channel_unit_tests.py +++ b/api/tests/unit_tests/libs/broadcast_channel/redis/test_channel_unit_tests.py @@ -673,7 +673,7 @@ class TestRedisShardedSubscription: """Test cases for the _RedisShardedSubscription class.""" @pytest.fixture(autouse=True) - def patch_sharded_redis_type(self, monkeypatch): + def patch_sharded_redis_type(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr("libs.broadcast_channel.redis.sharded_channel.Redis", FakeRedisClient) @pytest.fixture @@ -889,7 +889,9 @@ class TestRedisShardedSubscription: assert not sharded_subscription._queue.empty() assert sharded_subscription._queue.get_nowait() == b"test sharded payload" - def test_get_message_uses_target_node_for_cluster_client(self, mock_pubsub: MagicMock, monkeypatch): + def test_get_message_uses_target_node_for_cluster_client( + self, mock_pubsub: MagicMock, monkeypatch: pytest.MonkeyPatch + ): """Test that cluster clients use target_node for sharded messages.""" class DummyRedisCluster: @@ -1177,7 +1179,7 @@ class TestRedisSubscriptionCommon: return request.param @pytest.fixture(autouse=True) - def patch_sharded_redis_type(self, monkeypatch): + def patch_sharded_redis_type(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr("libs.broadcast_channel.redis.sharded_channel.Redis", FakeRedisClient) @pytest.fixture diff --git a/api/tests/unit_tests/libs/test_archive_storage.py b/api/tests/unit_tests/libs/test_archive_storage.py index de3c9c4737..4363c23571 100644 --- a/api/tests/unit_tests/libs/test_archive_storage.py +++ b/api/tests/unit_tests/libs/test_archive_storage.py @@ -34,7 +34,7 @@ def _client_error(code: str) -> ClientError: return ClientError({"Error": {"Code": code}}, "Operation") -def _mock_client(monkeypatch): +def _mock_client(monkeypatch: pytest.MonkeyPatch): client = MagicMock() client.head_bucket.return_value = None # Configure put_object to return a proper ETag that matches the MD5 hash @@ -56,19 +56,19 @@ def _mock_client(monkeypatch): return client, boto_client -def test_init_disabled(monkeypatch): +def test_init_disabled(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch, ARCHIVE_STORAGE_ENABLED=False) with pytest.raises(ArchiveStorageNotConfiguredError, match="not enabled"): ArchiveStorage(bucket=BUCKET_NAME) -def test_init_missing_config(monkeypatch): +def test_init_missing_config(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch, ARCHIVE_STORAGE_ENDPOINT=None) with pytest.raises(ArchiveStorageNotConfiguredError, match="incomplete"): ArchiveStorage(bucket=BUCKET_NAME) -def test_init_bucket_not_found(monkeypatch): +def test_init_bucket_not_found(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.head_bucket.side_effect = _client_error("404") @@ -77,7 +77,7 @@ def test_init_bucket_not_found(monkeypatch): ArchiveStorage(bucket=BUCKET_NAME) -def test_init_bucket_access_denied(monkeypatch): +def test_init_bucket_access_denied(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.head_bucket.side_effect = _client_error("403") @@ -86,7 +86,7 @@ def test_init_bucket_access_denied(monkeypatch): ArchiveStorage(bucket=BUCKET_NAME) -def test_init_bucket_other_error(monkeypatch): +def test_init_bucket_other_error(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.head_bucket.side_effect = _client_error("500") @@ -95,7 +95,7 @@ def test_init_bucket_other_error(monkeypatch): ArchiveStorage(bucket=BUCKET_NAME) -def test_init_sets_client(monkeypatch): +def test_init_sets_client(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, boto_client = _mock_client(monkeypatch) @@ -113,7 +113,7 @@ def test_init_sets_client(monkeypatch): assert storage.bucket == BUCKET_NAME -def test_put_object_returns_checksum(monkeypatch): +def test_put_object_returns_checksum(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) storage = ArchiveStorage(bucket=BUCKET_NAME) @@ -132,7 +132,7 @@ def test_put_object_returns_checksum(monkeypatch): assert checksum == expected_md5 -def test_put_object_raises_on_error(monkeypatch): +def test_put_object_raises_on_error(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) storage = ArchiveStorage(bucket=BUCKET_NAME) @@ -142,7 +142,7 @@ def test_put_object_raises_on_error(monkeypatch): storage.put_object("key", b"data") -def test_get_object_returns_bytes(monkeypatch): +def test_get_object_returns_bytes(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) body = MagicMock() @@ -153,7 +153,7 @@ def test_get_object_returns_bytes(monkeypatch): assert storage.get_object("key") == b"payload" -def test_get_object_missing(monkeypatch): +def test_get_object_missing(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.get_object.side_effect = _client_error("NoSuchKey") @@ -163,7 +163,7 @@ def test_get_object_missing(monkeypatch): storage.get_object("missing") -def test_get_object_stream(monkeypatch): +def test_get_object_stream(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) body = MagicMock() @@ -174,7 +174,7 @@ def test_get_object_stream(monkeypatch): assert list(storage.get_object_stream("key")) == [b"a", b"b"] -def test_get_object_stream_missing(monkeypatch): +def test_get_object_stream_missing(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.get_object.side_effect = _client_error("NoSuchKey") @@ -184,7 +184,7 @@ def test_get_object_stream_missing(monkeypatch): list(storage.get_object_stream("missing")) -def test_object_exists(monkeypatch): +def test_object_exists(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) storage = ArchiveStorage(bucket=BUCKET_NAME) @@ -194,7 +194,7 @@ def test_object_exists(monkeypatch): assert storage.object_exists("missing") is False -def test_delete_object_error(monkeypatch): +def test_delete_object_error(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.delete_object.side_effect = _client_error("500") @@ -204,7 +204,7 @@ def test_delete_object_error(monkeypatch): storage.delete_object("key") -def test_list_objects(monkeypatch): +def test_list_objects(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) paginator = MagicMock() @@ -219,7 +219,7 @@ def test_list_objects(monkeypatch): paginator.paginate.assert_called_once_with(Bucket="archive-bucket", Prefix="prefix") -def test_list_objects_error(monkeypatch): +def test_list_objects_error(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) paginator = MagicMock() @@ -231,7 +231,7 @@ def test_list_objects_error(monkeypatch): storage.list_objects("prefix") -def test_generate_presigned_url(monkeypatch): +def test_generate_presigned_url(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.generate_presigned_url.return_value = "http://signed-url" @@ -247,7 +247,7 @@ def test_generate_presigned_url(monkeypatch): assert url == "http://signed-url" -def test_generate_presigned_url_error(monkeypatch): +def test_generate_presigned_url_error(monkeypatch: pytest.MonkeyPatch): _configure_storage(monkeypatch) client, _ = _mock_client(monkeypatch) client.generate_presigned_url.side_effect = _client_error("500") diff --git a/api/tests/unit_tests/libs/test_pandas.py b/api/tests/unit_tests/libs/test_pandas.py index 21c2f0781d..a4739dbbc2 100644 --- a/api/tests/unit_tests/libs/test_pandas.py +++ b/api/tests/unit_tests/libs/test_pandas.py @@ -1,7 +1,8 @@ import pandas as pd +import pytest -def test_pandas_csv(tmp_path, monkeypatch): +def test_pandas_csv(tmp_path, monkeypatch: pytest.MonkeyPatch): monkeypatch.chdir(tmp_path) data = {"col1": [1, 2.2, -3.3, 4.0, 5], "col2": ["A", "B", "C", "D", "E"]} df1 = pd.DataFrame(data) @@ -16,7 +17,7 @@ def test_pandas_csv(tmp_path, monkeypatch): assert df2[df2.columns[1]].to_list() == data["col2"] -def test_pandas_xlsx(tmp_path, monkeypatch): +def test_pandas_xlsx(tmp_path, monkeypatch: pytest.MonkeyPatch): monkeypatch.chdir(tmp_path) data = {"col1": [1, 2.2, -3.3, 4.0, 5], "col2": ["A", "B", "C", "D", "E"]} df1 = pd.DataFrame(data) @@ -31,7 +32,7 @@ def test_pandas_xlsx(tmp_path, monkeypatch): assert df2[df2.columns[1]].to_list() == data["col2"] -def test_pandas_xlsx_with_sheets(tmp_path, monkeypatch): +def test_pandas_xlsx_with_sheets(tmp_path, monkeypatch: pytest.MonkeyPatch): monkeypatch.chdir(tmp_path) data1 = {"col1": [1, 2, 3, 4, 5], "col2": ["A", "B", "C", "D", "E"]} df1 = pd.DataFrame(data1) diff --git a/api/tests/unit_tests/libs/test_rate_limiter.py b/api/tests/unit_tests/libs/test_rate_limiter.py index 9d44b07b5e..5052033db8 100644 --- a/api/tests/unit_tests/libs/test_rate_limiter.py +++ b/api/tests/unit_tests/libs/test_rate_limiter.py @@ -1,5 +1,7 @@ from unittest.mock import MagicMock +import pytest + from libs import helper as helper_module @@ -31,7 +33,7 @@ class _FakeRedis: return True -def test_rate_limiter_counts_attempts_within_same_second(monkeypatch): +def test_rate_limiter_counts_attempts_within_same_second(monkeypatch: pytest.MonkeyPatch): fake_redis = _FakeRedis() monkeypatch.setattr(helper_module.time, "time", lambda: 1000) @@ -48,7 +50,7 @@ def test_rate_limiter_counts_attempts_within_same_second(monkeypatch): assert limiter.is_rate_limited("203.0.113.10") is True -def test_rate_limiter_uses_injected_redis(monkeypatch): +def test_rate_limiter_uses_injected_redis(monkeypatch: pytest.MonkeyPatch): redis_client = MagicMock() redis_client.zcard.return_value = 1 monkeypatch.setattr(helper_module.time, "time", lambda: 1000) diff --git a/api/tests/unit_tests/libs/test_token.py b/api/tests/unit_tests/libs/test_token.py index 6a65b5faa0..734568d37b 100644 --- a/api/tests/unit_tests/libs/test_token.py +++ b/api/tests/unit_tests/libs/test_token.py @@ -1,5 +1,6 @@ from unittest.mock import MagicMock +import pytest from werkzeug.wrappers import Response from constants import COOKIE_NAME_ACCESS_TOKEN, COOKIE_NAME_WEBAPP_ACCESS_TOKEN @@ -30,7 +31,7 @@ def test_extract_access_token(): assert extract_webapp_access_token(request) == expected_webapp # pyright: ignore[reportArgumentType] -def test_real_cookie_name_uses_host_prefix_without_domain(monkeypatch): +def test_real_cookie_name_uses_host_prefix_without_domain(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(token.dify_config, "CONSOLE_WEB_URL", "https://console.example.com", raising=False) monkeypatch.setattr(token.dify_config, "CONSOLE_API_URL", "https://api.example.com", raising=False) monkeypatch.setattr(token.dify_config, "COOKIE_DOMAIN", "", raising=False) @@ -38,7 +39,7 @@ def test_real_cookie_name_uses_host_prefix_without_domain(monkeypatch): assert token._real_cookie_name("csrf_token") == "__Host-csrf_token" -def test_real_cookie_name_without_host_prefix_when_domain_present(monkeypatch): +def test_real_cookie_name_without_host_prefix_when_domain_present(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(token.dify_config, "CONSOLE_WEB_URL", "https://console.example.com", raising=False) monkeypatch.setattr(token.dify_config, "CONSOLE_API_URL", "https://api.example.com", raising=False) monkeypatch.setattr(token.dify_config, "COOKIE_DOMAIN", ".example.com", raising=False) @@ -46,7 +47,7 @@ def test_real_cookie_name_without_host_prefix_when_domain_present(monkeypatch): assert token._real_cookie_name("csrf_token") == "csrf_token" -def test_set_csrf_cookie_includes_domain_when_configured(monkeypatch): +def test_set_csrf_cookie_includes_domain_when_configured(monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(token.dify_config, "CONSOLE_WEB_URL", "https://console.example.com", raising=False) monkeypatch.setattr(token.dify_config, "CONSOLE_API_URL", "https://api.example.com", raising=False) monkeypatch.setattr(token.dify_config, "COOKIE_DOMAIN", ".example.com", raising=False) diff --git a/api/tests/unit_tests/services/plugin/conftest.py b/api/tests/unit_tests/services/plugin/conftest.py index 80c6077b0c..9dc4fa0390 100644 --- a/api/tests/unit_tests/services/plugin/conftest.py +++ b/api/tests/unit_tests/services/plugin/conftest.py @@ -21,7 +21,7 @@ def make_features( @pytest.fixture -def mock_installer(monkeypatch): +def mock_installer(monkeypatch: pytest.MonkeyPatch): """Patch PluginInstaller at the service import site.""" mock = MagicMock() monkeypatch.setattr("services.plugin.plugin_service.PluginInstaller", lambda: mock) diff --git a/api/tests/unit_tests/services/rag_pipeline/test_rag_pipeline_task_proxy.py b/api/tests/unit_tests/services/rag_pipeline/test_rag_pipeline_task_proxy.py index 1a2d062208..287391c24c 100644 --- a/api/tests/unit_tests/services/rag_pipeline/test_rag_pipeline_task_proxy.py +++ b/api/tests/unit_tests/services/rag_pipeline/test_rag_pipeline_task_proxy.py @@ -2,12 +2,13 @@ from types import SimpleNamespace from unittest.mock import Mock import pytest +from pytest_mock import MockerFixture from services.rag_pipeline.rag_pipeline_task_proxy import RagPipelineTaskProxy @pytest.fixture -def proxy(mocker): +def proxy(mocker: MockerFixture): """Create a RagPipelineTaskProxy with mocked dependencies.""" mocker.patch("services.rag_pipeline.rag_pipeline_task_proxy.TenantIsolatedTaskQueue") entity = Mock() diff --git a/api/tests/unit_tests/services/test_app_generate_service.py b/api/tests/unit_tests/services/test_app_generate_service.py index d3f9c5dd9f..216c5d9db6 100644 --- a/api/tests/unit_tests/services/test_app_generate_service.py +++ b/api/tests/unit_tests/services/test_app_generate_service.py @@ -20,6 +20,7 @@ from contextlib import contextmanager from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture import services.app_generate_service as ags_module from core.app.entities.app_invoke_entities import InvokeFrom @@ -96,7 +97,7 @@ def _noop_rate_limit_context(rate_limit, request_id): class TestBuildStreamingTaskOnSubscribe: """Tests for AppGenerateService._build_streaming_task_on_subscribe.""" - def test_streams_mode_starts_immediately(self, monkeypatch): + def test_streams_mode_starts_immediately(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "streams") called = [] cb = AppGenerateService._build_streaming_task_on_subscribe(lambda: called.append(1)) @@ -106,7 +107,7 @@ class TestBuildStreamingTaskOnSubscribe: cb() assert called == [1] # not called again - def test_pubsub_mode_starts_on_subscribe(self, monkeypatch): + def test_pubsub_mode_starts_on_subscribe(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "pubsub") monkeypatch.setattr(ags_module, "SSE_TASK_START_FALLBACK_MS", 60_000) # large to prevent timer called = [] @@ -118,7 +119,7 @@ class TestBuildStreamingTaskOnSubscribe: cb() assert called == [1] - def test_sharded_mode_starts_on_subscribe(self, monkeypatch): + def test_sharded_mode_starts_on_subscribe(self, monkeypatch: pytest.MonkeyPatch): """sharded is treated like pubsub (i.e. not 'streams').""" monkeypatch.setattr(ags_module.dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "sharded") monkeypatch.setattr(ags_module, "SSE_TASK_START_FALLBACK_MS", 60_000) @@ -128,7 +129,7 @@ class TestBuildStreamingTaskOnSubscribe: cb() assert called == [1] - def test_pubsub_fallback_timer_fires(self, monkeypatch): + def test_pubsub_fallback_timer_fires(self, monkeypatch: pytest.MonkeyPatch): """When nobody subscribes fast enough the fallback timer fires.""" monkeypatch.setattr(ags_module.dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "pubsub") monkeypatch.setattr(ags_module, "SSE_TASK_START_FALLBACK_MS", 50) # 50 ms @@ -137,7 +138,7 @@ class TestBuildStreamingTaskOnSubscribe: time.sleep(0.2) # give the timer time to fire assert called == [1] - def test_exception_in_start_task_returns_false(self, monkeypatch): + def test_exception_in_start_task_returns_false(self, monkeypatch: pytest.MonkeyPatch): """When start_task raises, _try_start returns False and next call retries.""" monkeypatch.setattr(ags_module.dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "streams") call_count = 0 @@ -154,7 +155,7 @@ class TestBuildStreamingTaskOnSubscribe: cb() assert call_count == 2 - def test_concurrent_subscribe_only_starts_once(self, monkeypatch): + def test_concurrent_subscribe_only_starts_once(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "PUBSUB_REDIS_CHANNEL_TYPE", "pubsub") monkeypatch.setattr(ags_module, "SSE_TASK_START_FALLBACK_MS", 60_000) call_count = 0 @@ -176,31 +177,31 @@ class TestBuildStreamingTaskOnSubscribe: # _get_max_active_requests # --------------------------------------------------------------------------- class TestGetMaxActiveRequests: - def test_both_zero_returns_zero(self, monkeypatch): + def test_both_zero_returns_zero(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "APP_MAX_ACTIVE_REQUESTS", 0) monkeypatch.setattr(ags_module.dify_config, "APP_DEFAULT_ACTIVE_REQUESTS", 0) app = _make_app(AppMode.CHAT, max_active_requests=0) assert AppGenerateService._get_max_active_requests(app) == 0 - def test_app_limit_only(self, monkeypatch): + def test_app_limit_only(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "APP_MAX_ACTIVE_REQUESTS", 0) monkeypatch.setattr(ags_module.dify_config, "APP_DEFAULT_ACTIVE_REQUESTS", 0) app = _make_app(AppMode.CHAT, max_active_requests=5) assert AppGenerateService._get_max_active_requests(app) == 5 - def test_config_limit_only(self, monkeypatch): + def test_config_limit_only(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "APP_MAX_ACTIVE_REQUESTS", 10) monkeypatch.setattr(ags_module.dify_config, "APP_DEFAULT_ACTIVE_REQUESTS", 0) app = _make_app(AppMode.CHAT, max_active_requests=0) assert AppGenerateService._get_max_active_requests(app) == 10 - def test_both_non_zero_returns_min(self, monkeypatch): + def test_both_non_zero_returns_min(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "APP_MAX_ACTIVE_REQUESTS", 20) monkeypatch.setattr(ags_module.dify_config, "APP_DEFAULT_ACTIVE_REQUESTS", 0) app = _make_app(AppMode.CHAT, max_active_requests=5) assert AppGenerateService._get_max_active_requests(app) == 5 - def test_default_active_requests_used_when_app_has_none(self, monkeypatch): + def test_default_active_requests_used_when_app_has_none(self, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "APP_MAX_ACTIVE_REQUESTS", 0) monkeypatch.setattr(ags_module.dify_config, "APP_DEFAULT_ACTIVE_REQUESTS", 15) app = _make_app(AppMode.CHAT, max_active_requests=0) @@ -214,7 +215,7 @@ class TestGenerate: """Tests for AppGenerateService.generate covering each mode.""" @pytest.fixture(autouse=True) - def _common(self, mocker, monkeypatch): + def _common(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "BILLING_ENABLED", False) mocker.patch("services.app_generate_service.RateLimit", _DummyRateLimit) # Prevent AppExecutionParams.new from touching real models via isinstance @@ -224,7 +225,7 @@ class TestGenerate: ) # -- COMPLETION --------------------------------------------------------- - def test_completion_mode(self, mocker): + def test_completion_mode(self, mocker: MockerFixture): gen_spy = mocker.patch( "services.app_generate_service.CompletionAppGenerator.generate", return_value={"result": "ok"}, @@ -244,7 +245,7 @@ class TestGenerate: gen_spy.assert_called_once() # -- AGENT_CHAT via mode ------------------------------------------------ - def test_agent_chat_mode(self, mocker): + def test_agent_chat_mode(self, mocker: MockerFixture): gen_spy = mocker.patch( "services.app_generate_service.AgentChatAppGenerator.generate", return_value={"result": "agent"}, @@ -264,7 +265,7 @@ class TestGenerate: gen_spy.assert_called_once() # -- AGENT_CHAT via is_agent flag (non-AGENT_CHAT mode) ----------------- - def test_agent_via_is_agent_flag(self, mocker): + def test_agent_via_is_agent_flag(self, mocker: MockerFixture): gen_spy = mocker.patch( "services.app_generate_service.AgentChatAppGenerator.generate", return_value={"result": "agent-via-flag"}, @@ -285,7 +286,7 @@ class TestGenerate: gen_spy.assert_called_once() # -- CHAT --------------------------------------------------------------- - def test_chat_mode(self, mocker): + def test_chat_mode(self, mocker: MockerFixture): gen_spy = mocker.patch( "services.app_generate_service.ChatAppGenerator.generate", return_value={"result": "chat"}, @@ -306,7 +307,7 @@ class TestGenerate: gen_spy.assert_called_once() # -- ADVANCED_CHAT blocking --------------------------------------------- - def test_advanced_chat_blocking(self, mocker): + def test_advanced_chat_blocking(self, mocker: MockerFixture): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) @@ -333,7 +334,7 @@ class TestGenerate: retrieve_spy.assert_not_called() # -- ADVANCED_CHAT streaming -------------------------------------------- - def test_advanced_chat_streaming(self, mocker, monkeypatch): + def test_advanced_chat_streaming(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) mocker.patch( @@ -365,7 +366,7 @@ class TestGenerate: delay_spy.assert_called_once() # -- WORKFLOW blocking -------------------------------------------------- - def test_workflow_blocking(self, mocker): + def test_workflow_blocking(self, mocker: MockerFixture): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) gen_spy = mocker.patch( @@ -390,7 +391,7 @@ class TestGenerate: assert call_kwargs["pause_state_config"].state_owner_user_id == "owner-id" # -- WORKFLOW streaming ------------------------------------------------- - def test_workflow_streaming(self, mocker, monkeypatch): + def test_workflow_streaming(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) mocker.patch( @@ -422,7 +423,7 @@ class TestGenerate: delay_spy.assert_called_once() # -- Invalid mode ------------------------------------------------------- - def test_invalid_mode_raises(self, mocker): + def test_invalid_mode_raises(self, mocker: MockerFixture): app = _make_app("invalid-mode", is_agent=False) with pytest.raises(ValueError, match="Invalid app mode"): AppGenerateService.generate( @@ -439,14 +440,14 @@ class TestGenerate: # --------------------------------------------------------------------------- class TestGenerateBilling: @pytest.fixture(autouse=True) - def _common(self, mocker, monkeypatch): + def _common(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): mocker.patch("services.app_generate_service.RateLimit", _DummyRateLimit) mocker.patch( "services.app_generate_service.rate_limit_context", _noop_rate_limit_context, ) - def test_billing_enabled_consumes_quota(self, mocker, monkeypatch): + def test_billing_enabled_consumes_quota(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "BILLING_ENABLED", True) quota_charge = MagicMock() reserve_mock = mocker.patch( @@ -472,7 +473,9 @@ class TestGenerateBilling: reserve_mock.assert_called_once_with(QuotaType.WORKFLOW, "tenant-id") quota_charge.commit.assert_called_once() - def test_billing_quota_exceeded_raises_rate_limit_error(self, mocker, monkeypatch): + def test_billing_quota_exceeded_raises_rate_limit_error( + self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch + ): from services.errors.app import QuotaExceededError from services.errors.llm import InvokeRateLimitError @@ -491,7 +494,7 @@ class TestGenerateBilling: streaming=False, ) - def test_exception_refunds_quota_and_exits_rate_limit(self, mocker, monkeypatch): + def test_exception_refunds_quota_and_exits_rate_limit(self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch): monkeypatch.setattr(ags_module.dify_config, "BILLING_ENABLED", True) quota_charge = MagicMock() mocker.patch( @@ -517,7 +520,9 @@ class TestGenerateBilling: ) quota_charge.refund.assert_called_once() - def test_rate_limit_exit_called_in_finally_for_blocking(self, mocker, monkeypatch): + def test_rate_limit_exit_called_in_finally_for_blocking( + self, mocker: MockerFixture, monkeypatch: pytest.MonkeyPatch + ): """For non-streaming (blocking) calls, rate_limit.exit should be called in finally.""" monkeypatch.setattr(ags_module.dify_config, "BILLING_ENABLED", False) @@ -552,7 +557,7 @@ class TestGenerateBilling: # _get_workflow # --------------------------------------------------------------------------- class TestGetWorkflow: - def test_debugger_fetches_draft(self, mocker): + def test_debugger_fetches_draft(self, mocker: MockerFixture): draft_wf = _make_workflow() ws = MagicMock() ws.get_draft_workflow.return_value = draft_wf @@ -562,7 +567,7 @@ class TestGetWorkflow: assert result is draft_wf ws.get_draft_workflow.assert_called_once() - def test_debugger_raises_when_no_draft(self, mocker): + def test_debugger_raises_when_no_draft(self, mocker: MockerFixture): ws = MagicMock() ws.get_draft_workflow.return_value = None mocker.patch("services.app_generate_service.WorkflowService", return_value=ws) @@ -570,7 +575,7 @@ class TestGetWorkflow: with pytest.raises(ValueError, match="Workflow not initialized"): AppGenerateService._get_workflow(_make_app(AppMode.WORKFLOW), InvokeFrom.DEBUGGER) - def test_non_debugger_fetches_published(self, mocker): + def test_non_debugger_fetches_published(self, mocker: MockerFixture): pub_wf = _make_workflow() ws = MagicMock() ws.get_published_workflow.return_value = pub_wf @@ -580,7 +585,7 @@ class TestGetWorkflow: assert result is pub_wf ws.get_published_workflow.assert_called_once() - def test_non_debugger_raises_when_no_published(self, mocker): + def test_non_debugger_raises_when_no_published(self, mocker: MockerFixture): ws = MagicMock() ws.get_published_workflow.return_value = None mocker.patch("services.app_generate_service.WorkflowService", return_value=ws) @@ -588,7 +593,7 @@ class TestGetWorkflow: with pytest.raises(ValueError, match="Workflow not published"): AppGenerateService._get_workflow(_make_app(AppMode.WORKFLOW), InvokeFrom.SERVICE_API) - def test_specific_workflow_id_valid_uuid(self, mocker): + def test_specific_workflow_id_valid_uuid(self, mocker: MockerFixture): valid_uuid = str(uuid.uuid4()) specific_wf = _make_workflow(workflow_id=valid_uuid) ws = MagicMock() @@ -601,7 +606,7 @@ class TestGetWorkflow: assert result is specific_wf ws.get_published_workflow_by_id.assert_called_once() - def test_specific_workflow_id_invalid_uuid(self, mocker): + def test_specific_workflow_id_invalid_uuid(self, mocker: MockerFixture): ws = MagicMock() mocker.patch("services.app_generate_service.WorkflowService", return_value=ws) @@ -610,7 +615,7 @@ class TestGetWorkflow: _make_app(AppMode.WORKFLOW), InvokeFrom.SERVICE_API, workflow_id="not-a-uuid" ) - def test_specific_workflow_id_not_found(self, mocker): + def test_specific_workflow_id_not_found(self, mocker: MockerFixture): valid_uuid = str(uuid.uuid4()) ws = MagicMock() ws.get_published_workflow_by_id.return_value = None @@ -626,7 +631,7 @@ class TestGetWorkflow: # generate_single_iteration # --------------------------------------------------------------------------- class TestGenerateSingleIteration: - def test_advanced_chat_mode(self, mocker): + def test_advanced_chat_mode(self, mocker: MockerFixture): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) gen_spy = mocker.patch( @@ -644,7 +649,7 @@ class TestGenerateSingleIteration: iter_spy.assert_called_once() assert result == {"event": "iteration"} - def test_workflow_mode(self, mocker): + def test_workflow_mode(self, mocker: MockerFixture): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) mocker.patch( @@ -662,7 +667,7 @@ class TestGenerateSingleIteration: iter_spy.assert_called_once() assert result == {"event": "wf-iteration"} - def test_invalid_mode_raises(self, mocker): + def test_invalid_mode_raises(self, mocker: MockerFixture): app = _make_app(AppMode.CHAT) with pytest.raises(ValueError, match="Invalid app mode"): AppGenerateService.generate_single_iteration(app_model=app, user=_make_user(), node_id="n1", args={}) @@ -672,7 +677,7 @@ class TestGenerateSingleIteration: # generate_single_loop # --------------------------------------------------------------------------- class TestGenerateSingleLoop: - def test_advanced_chat_mode(self, mocker): + def test_advanced_chat_mode(self, mocker: MockerFixture): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) mocker.patch( @@ -690,7 +695,7 @@ class TestGenerateSingleLoop: loop_spy.assert_called_once() assert result == {"event": "loop"} - def test_workflow_mode(self, mocker): + def test_workflow_mode(self, mocker: MockerFixture): workflow = _make_workflow() mocker.patch.object(AppGenerateService, "_get_workflow", return_value=workflow) mocker.patch( @@ -708,7 +713,7 @@ class TestGenerateSingleLoop: loop_spy.assert_called_once() assert result == {"event": "wf-loop"} - def test_invalid_mode_raises(self, mocker): + def test_invalid_mode_raises(self, mocker: MockerFixture): app = _make_app(AppMode.COMPLETION) with pytest.raises(ValueError, match="Invalid app mode"): AppGenerateService.generate_single_loop(app_model=app, user=_make_user(), node_id="n1", args=MagicMock()) @@ -718,7 +723,7 @@ class TestGenerateSingleLoop: # generate_more_like_this # --------------------------------------------------------------------------- class TestGenerateMoreLikeThis: - def test_delegates_to_completion_generator(self, mocker): + def test_delegates_to_completion_generator(self, mocker: MockerFixture): gen_spy = mocker.patch( "services.app_generate_service.CompletionAppGenerator.generate_more_like_this", return_value={"result": "similar"}, @@ -739,7 +744,7 @@ class TestGenerateMoreLikeThis: # get_response_generator # --------------------------------------------------------------------------- class TestGetResponseGenerator: - def test_non_ended_workflow_run(self, mocker): + def test_non_ended_workflow_run(self, mocker: MockerFixture): app = _make_app(AppMode.ADVANCED_CHAT) workflow_run = MagicMock() workflow_run.id = "run-1" @@ -756,7 +761,7 @@ class TestGetResponseGenerator: result = AppGenerateService.get_response_generator(app_model=app, workflow_run=workflow_run) gen_instance.retrieve_events.assert_called_once() - def test_ended_workflow_run_still_returns_generator(self, mocker): + def test_ended_workflow_run_still_returns_generator(self, mocker: MockerFixture): """Even when the run is ended, the current code still returns a generator (TODO branch).""" app = _make_app(AppMode.WORKFLOW) workflow_run = MagicMock() diff --git a/api/tests/unit_tests/services/test_app_generate_service_streaming_integration.py b/api/tests/unit_tests/services/test_app_generate_service_streaming_integration.py index 30aa359b45..4293be8f72 100644 --- a/api/tests/unit_tests/services/test_app_generate_service_streaming_integration.py +++ b/api/tests/unit_tests/services/test_app_generate_service_streaming_integration.py @@ -89,7 +89,7 @@ class _FakeStreams: @pytest.fixture -def _patch_get_channel_streams(monkeypatch): +def _patch_get_channel_streams(monkeypatch: pytest.MonkeyPatch): from libs.broadcast_channel.redis.streams_channel import StreamsBroadcastChannel fake = _FakeStreams() @@ -108,7 +108,7 @@ def _patch_get_channel_streams(monkeypatch): @pytest.fixture -def _patch_get_channel_pubsub(monkeypatch): +def _patch_get_channel_pubsub(monkeypatch: pytest.MonkeyPatch): from libs.broadcast_channel.redis.channel import BroadcastChannel as RedisBroadcastChannel store: dict[str, deque[bytes]] = defaultdict(deque) @@ -163,7 +163,7 @@ def test_streams_full_flow_prepublish_and_replay(): @pytest.mark.usefixtures("_patch_get_channel_pubsub") -def test_pubsub_full_flow_start_on_subscribe_gated(monkeypatch): +def test_pubsub_full_flow_start_on_subscribe_gated(monkeypatch: pytest.MonkeyPatch): # Speed up any potential timer if it accidentally triggers monkeypatch.setattr("services.app_generate_service.SSE_TASK_START_FALLBACK_MS", 50) diff --git a/api/tests/unit_tests/services/test_dataset_service_lock_not_owned.py b/api/tests/unit_tests/services/test_dataset_service_lock_not_owned.py index 9a513c3fe6..f5879d973d 100644 --- a/api/tests/unit_tests/services/test_dataset_service_lock_not_owned.py +++ b/api/tests/unit_tests/services/test_dataset_service_lock_not_owned.py @@ -22,7 +22,7 @@ class FakeLock: @pytest.fixture -def fake_current_user(monkeypatch): +def fake_current_user(monkeypatch: pytest.MonkeyPatch): user = create_autospec(Account, instance=True) user.id = "user-1" user.current_tenant_id = "tenant-1" @@ -31,7 +31,7 @@ def fake_current_user(monkeypatch): @pytest.fixture -def fake_features(monkeypatch): +def fake_features(monkeypatch: pytest.MonkeyPatch): """Features.billing.enabled == False to skip quota logic.""" features = types.SimpleNamespace( billing=types.SimpleNamespace(enabled=False, subscription=types.SimpleNamespace(plan="ENTERPRISE")), @@ -45,7 +45,7 @@ def fake_features(monkeypatch): @pytest.fixture -def fake_lock(monkeypatch): +def fake_lock(monkeypatch: pytest.MonkeyPatch): """Patch redis_client.lock to always raise LockNotOwnedError on enter.""" def _fake_lock(name, timeout=None, *args, **kwargs): @@ -61,7 +61,7 @@ def fake_lock(monkeypatch): def test_save_document_with_dataset_id_ignores_lock_not_owned( - monkeypatch, + monkeypatch: pytest.MonkeyPatch, fake_current_user, fake_features, fake_lock, @@ -118,7 +118,7 @@ def test_save_document_with_dataset_id_ignores_lock_not_owned( def test_add_segment_ignores_lock_not_owned( - monkeypatch, + monkeypatch: pytest.MonkeyPatch, fake_current_user, fake_lock, ): @@ -161,7 +161,7 @@ def test_add_segment_ignores_lock_not_owned( def test_multi_create_segment_ignores_lock_not_owned( - monkeypatch, + monkeypatch: pytest.MonkeyPatch, fake_current_user, fake_lock, ): diff --git a/api/tests/unit_tests/services/test_human_input_service.py b/api/tests/unit_tests/services/test_human_input_service.py index 55af564821..9fc818f789 100644 --- a/api/tests/unit_tests/services/test_human_input_service.py +++ b/api/tests/unit_tests/services/test_human_input_service.py @@ -3,6 +3,7 @@ from datetime import datetime, timedelta from unittest.mock import MagicMock import pytest +from pytest_mock import MockerFixture import services.human_input_service as human_input_service_module from core.repositories.human_input_repository import ( @@ -177,7 +178,9 @@ def test_get_form_definition_by_token_for_console_uses_repository(sample_form_re assert form.get_definition() == console_record.definition -def test_submit_form_by_token_calls_repository_and_enqueue(sample_form_record, mock_session_factory, mocker): +def test_submit_form_by_token_calls_repository_and_enqueue( + sample_form_record, mock_session_factory, mocker: MockerFixture +): session_factory, _ = mock_session_factory repo = MagicMock(spec=HumanInputFormSubmissionRepository) repo.get_by_token.return_value = sample_form_record @@ -204,7 +207,9 @@ def test_submit_form_by_token_calls_repository_and_enqueue(sample_form_record, m enqueue_spy.assert_called_once_with(sample_form_record.workflow_run_id) -def test_submit_form_by_token_skips_enqueue_for_delivery_test(sample_form_record, mock_session_factory, mocker): +def test_submit_form_by_token_skips_enqueue_for_delivery_test( + sample_form_record, mock_session_factory, mocker: MockerFixture +): session_factory, _ = mock_session_factory repo = MagicMock(spec=HumanInputFormSubmissionRepository) test_record = dataclasses.replace( @@ -227,7 +232,9 @@ def test_submit_form_by_token_skips_enqueue_for_delivery_test(sample_form_record enqueue_spy.assert_not_called() -def test_submit_form_by_token_passes_submission_user_id(sample_form_record, mock_session_factory, mocker): +def test_submit_form_by_token_passes_submission_user_id( + sample_form_record, mock_session_factory, mocker: MockerFixture +): session_factory, _ = mock_session_factory repo = MagicMock(spec=HumanInputFormSubmissionRepository) repo.get_by_token.return_value = sample_form_record @@ -314,7 +321,7 @@ def test_form_submitted_error_init(): assert error.code == 412 -def test_human_input_service_init_with_engine(mocker): +def test_human_input_service_init_with_engine(mocker: MockerFixture): engine = MagicMock(spec=human_input_service_module.Engine) sessionmaker_mock = mocker.patch("services.human_input_service.sessionmaker") @@ -371,7 +378,7 @@ def test_submit_form_by_token_delivery_not_enabled(mock_session_factory): service.submit_form_by_token(RecipientType.STANDALONE_WEB_APP, "token", "action", {}) -def test_submit_form_by_token_no_workflow_run_id(sample_form_record, mock_session_factory, mocker): +def test_submit_form_by_token_no_workflow_run_id(sample_form_record, mock_session_factory, mocker: MockerFixture): session_factory, _ = mock_session_factory repo = MagicMock(spec=HumanInputFormSubmissionRepository) repo.get_by_token.return_value = sample_form_record diff --git a/api/tests/unit_tests/services/test_message_service.py b/api/tests/unit_tests/services/test_message_service.py index 7adc15d63e..51f8b3ef5b 100644 --- a/api/tests/unit_tests/services/test_message_service.py +++ b/api/tests/unit_tests/services/test_message_service.py @@ -906,7 +906,7 @@ class TestMessageServiceSuggestedQuestions: ): """Test successful suggested questions generation in basic Chat mode.""" # Arrange - app = factory.create_app_mock(mode=AppMode.CHAT.value) + app = factory.create_app_mock(mode=AppMode.CHAT) user = factory.create_end_user_mock() message = factory.create_message_mock() mock_get_message.return_value = message @@ -953,7 +953,7 @@ class TestMessageServiceSuggestedQuestions: """Test suggested question generation uses frontend configured model and prompt.""" from core.app.entities.app_invoke_entities import InvokeFrom - app = factory.create_app_mock(mode=AppMode.CHAT.value) + app = factory.create_app_mock(mode=AppMode.CHAT) app.tenant_id = "tenant-123" user = factory.create_end_user_mock() message = factory.create_message_mock() @@ -1024,7 +1024,7 @@ class TestMessageServiceSuggestedQuestions: factory, ): """Test invalid frontend configured model falls back to tenant default model.""" - app = factory.create_app_mock(mode=AppMode.CHAT.value) + app = factory.create_app_mock(mode=AppMode.CHAT) app.tenant_id = "tenant-123" user = factory.create_end_user_mock() message = factory.create_message_mock() diff --git a/api/tests/unit_tests/services/test_model_load_balancing_service.py b/api/tests/unit_tests/services/test_model_load_balancing_service.py index 3119af40a2..beecf73caa 100644 --- a/api/tests/unit_tests/services/test_model_load_balancing_service.py +++ b/api/tests/unit_tests/services/test_model_load_balancing_service.py @@ -104,7 +104,7 @@ def test_enable_disable_model_load_balancing_should_call_provider_configuration_ service.provider_manager.get_configurations.return_value = {"openai": provider_configuration} # Act - getattr(service, method_name)("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM.value) + getattr(service, method_name)("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM) # Assert getattr(provider_configuration, expected_provider_method).assert_called_once_with( @@ -125,7 +125,7 @@ def test_enable_disable_model_load_balancing_should_raise_value_error_when_provi # Act + Assert with pytest.raises(ValueError, match="Provider openai does not exist"): - getattr(service, method_name)("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM.value) + getattr(service, method_name)("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM) def test_get_load_balancing_configs_should_raise_value_error_when_provider_missing( @@ -136,7 +136,7 @@ def test_get_load_balancing_configs_should_raise_value_error_when_provider_missi # Act + Assert with pytest.raises(ValueError, match="Provider openai does not exist"): - service.get_load_balancing_configs("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM.value) + service.get_load_balancing_configs("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM) def test_get_load_balancing_configs_should_insert_inherit_config_when_missing_for_custom_provider( @@ -177,7 +177,7 @@ def test_get_load_balancing_configs_should_insert_inherit_config_when_missing_fo "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, ) # Assert @@ -238,7 +238,7 @@ def test_get_load_balancing_configs_should_reorder_existing_inherit_and_tolerate "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, config_from="predefined-model", ) @@ -259,7 +259,7 @@ def test_get_load_balancing_config_should_raise_value_error_when_provider_missin # Act + Assert with pytest.raises(ValueError, match="Provider openai does not exist"): - service.get_load_balancing_config("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM.value, "cfg-1") + service.get_load_balancing_config("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM, "cfg-1") def test_get_load_balancing_config_should_return_none_when_config_not_found( @@ -272,7 +272,7 @@ def test_get_load_balancing_config_should_return_none_when_config_not_found( mock_db.session.scalar.return_value = None # Act - result = service.get_load_balancing_config("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM.value, "cfg-1") + result = service.get_load_balancing_config("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM, "cfg-1") # Assert assert result is None @@ -292,7 +292,7 @@ def test_get_load_balancing_config_should_return_obfuscated_payload_when_config_ mock_db.session.scalar.return_value = config # Act - result = service.get_load_balancing_config("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM.value, "cfg-1") + result = service.get_load_balancing_config("tenant-1", "openai", "gpt-4o-mini", ModelType.LLM, "cfg-1") # Assert assert result == { @@ -335,7 +335,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_provider_mi "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [], "custom-model", ) @@ -354,7 +354,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_configs_is_ "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, cast(list[dict[str, object]], "invalid-configs"), "custom-model", ) @@ -375,7 +375,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_config_item "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, cast(list[dict[str, object]], ["bad-item"]), "custom-model", ) @@ -397,7 +397,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_credential_ "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"credential_id": "cred-1", "enabled": True}], "predefined-model", ) @@ -418,7 +418,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_name_or_ena "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"enabled": True}], "custom-model", ) @@ -428,7 +428,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_name_or_ena "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"name": "cfg-without-enabled"}], "custom-model", ) @@ -450,7 +450,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_existing_co "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"id": "cfg-2", "name": "invalid", "enabled": True}], "custom-model", ) @@ -472,7 +472,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_credentials "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"id": "cfg-1", "name": "new", "enabled": True, "credentials": "bad"}], "custom-model", ) @@ -482,7 +482,7 @@ def test_update_load_balancing_configs_should_raise_value_error_when_credentials "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"name": "new-config", "enabled": True, "credentials": "bad"}], "custom-model", ) @@ -519,7 +519,7 @@ def test_update_load_balancing_configs_should_update_existing_create_new_and_del "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [ {"id": "cfg-1", "name": "updated-name", "enabled": False, "credentials": {"api_key": "plain"}}, {"name": "new-config", "enabled": True, "credentials": {"api_key": "plain"}}, @@ -553,7 +553,7 @@ def test_update_load_balancing_configs_should_raise_value_error_for_invalid_new_ "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"name": "__inherit__", "enabled": True, "credentials": {"api_key": "x"}}], "custom-model", ) @@ -563,7 +563,7 @@ def test_update_load_balancing_configs_should_raise_value_error_for_invalid_new_ "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"name": "new", "enabled": True}], "custom-model", ) @@ -585,7 +585,7 @@ def test_update_load_balancing_configs_should_create_from_existing_provider_cred "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, [{"credential_id": "cred-1", "enabled": True}], "predefined-model", ) @@ -611,7 +611,7 @@ def test_validate_load_balancing_credentials_should_raise_value_error_when_provi "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, {"api_key": "plain"}, ) @@ -631,7 +631,7 @@ def test_validate_load_balancing_credentials_should_raise_value_error_when_confi "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, {"api_key": "plain"}, config_id="cfg-1", ) @@ -654,7 +654,7 @@ def test_validate_load_balancing_credentials_should_delegate_to_custom_validate_ "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, {"api_key": "plain"}, config_id="cfg-1", ) @@ -662,7 +662,7 @@ def test_validate_load_balancing_credentials_should_delegate_to_custom_validate_ "tenant-1", "openai", "gpt-4o-mini", - ModelType.LLM.value, + ModelType.LLM, {"api_key": "plain"}, ) diff --git a/api/tests/unit_tests/services/test_model_provider_service.py b/api/tests/unit_tests/services/test_model_provider_service.py index 28d459eac9..9e4eeb2d6e 100644 --- a/api/tests/unit_tests/services/test_model_provider_service.py +++ b/api/tests/unit_tests/services/test_model_provider_service.py @@ -90,7 +90,7 @@ class TestModelProviderServiceConfiguration: ) manager.get_configurations.return_value = {"openai": allowed, "embedding": filtered} - result = service.get_provider_list(tenant_id="tenant-1", model_type=ModelType.LLM.value) + result = service.get_provider_list(tenant_id="tenant-1", model_type=ModelType.LLM) assert len(result) == 1 assert result[0].provider == "openai" @@ -232,7 +232,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credential_id": "cred-1", }, @@ -245,7 +245,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credentials": {"api_key": "x"}, }, @@ -258,7 +258,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credentials": {"api_key": "x"}, "credential_name": "cred-a", @@ -277,7 +277,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credentials": {"api_key": "x"}, "credential_id": "cred-1", @@ -298,7 +298,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credential_id": "cred-1", }, @@ -311,7 +311,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credential_id": "cred-1", }, @@ -324,7 +324,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", "credential_id": "cred-1", }, @@ -337,7 +337,7 @@ class TestModelProviderServiceDelegation: { "tenant_id": "tenant-1", "provider": "openai", - "model_type": ModelType.LLM.value, + "model_type": ModelType.LLM, "model": "gpt-4o", }, "delete_custom_model", @@ -425,7 +425,7 @@ class TestModelProviderServiceListingsAndDefaults: provider_configurations = SimpleNamespace(get_models=MagicMock(return_value=models)) manager.get_configurations.return_value = provider_configurations - result = service.get_models_by_model_type(tenant_id="tenant-1", model_type=ModelType.LLM.value) + result = service.get_models_by_model_type(tenant_id="tenant-1", model_type=ModelType.LLM) provider_configurations.get_models.assert_called_once_with(model_type=ModelType.LLM, only_active=True) assert len(result) == 1 @@ -495,7 +495,7 @@ class TestModelProviderServiceListingsAndDefaults: ), ) - result = service.get_default_model_of_model_type(tenant_id="tenant-1", model_type=ModelType.LLM.value) + result = service.get_default_model_of_model_type(tenant_id="tenant-1", model_type=ModelType.LLM) assert result is not None assert result.model == "gpt-4o" @@ -506,7 +506,7 @@ class TestModelProviderServiceListingsAndDefaults: service, manager = _create_service_with_mocked_manager() manager.get_default_model.return_value = None - result = service.get_default_model_of_model_type(tenant_id="tenant-1", model_type=ModelType.LLM.value) + result = service.get_default_model_of_model_type(tenant_id="tenant-1", model_type=ModelType.LLM) assert result is None @@ -514,7 +514,7 @@ class TestModelProviderServiceListingsAndDefaults: service, manager = _create_service_with_mocked_manager() manager.get_default_model.side_effect = RuntimeError("boom") - result = service.get_default_model_of_model_type(tenant_id="tenant-1", model_type=ModelType.LLM.value) + result = service.get_default_model_of_model_type(tenant_id="tenant-1", model_type=ModelType.LLM) assert result is None @@ -523,7 +523,7 @@ class TestModelProviderServiceListingsAndDefaults: service.update_default_model_of_model_type( tenant_id="tenant-1", - model_type=ModelType.LLM.value, + model_type=ModelType.LLM, provider="openai", model="gpt-4o", ) @@ -593,7 +593,7 @@ class TestModelProviderServiceListingsAndDefaults: tenant_id="tenant-1", provider="openai", model="gpt-4o", - model_type=ModelType.LLM.value, + model_type=ModelType.LLM, ) getattr(provider_configuration, provider_method_name).assert_called_once_with( diff --git a/api/tests/unit_tests/services/test_trigger_provider_service.py b/api/tests/unit_tests/services/test_trigger_provider_service.py index 6eba60e5f1..4da4af2d93 100644 --- a/api/tests/unit_tests/services/test_trigger_provider_service.py +++ b/api/tests/unit_tests/services/test_trigger_provider_service.py @@ -325,7 +325,7 @@ def test_update_trigger_subscription_should_raise_error_when_name_conflicts( id="sub-1", name="old", provider_id="langgenius/github/github", - credential_type=CredentialType.API_KEY.value, + credential_type=CredentialType.API_KEY, ) mock_session.scalar.side_effect = [subscription, object()] # found sub, name conflict _mock_get_trigger_provider(mocker, provider_controller) @@ -350,7 +350,7 @@ def test_update_trigger_subscription_should_update_fields_and_clear_cache( properties={"project": "enc-old"}, parameters={"event": "old"}, credentials={"api_key": "enc-old"}, - credential_type=CredentialType.API_KEY.value, + credential_type=CredentialType.API_KEY, credential_expires_at=0, expires_at=0, ) @@ -456,7 +456,7 @@ def test_delete_trigger_provider_should_delete_and_clear_cache_even_if_unsubscri id="sub-1", user_id="user-1", provider_id=str(provider_id), - credential_type=CredentialType.OAUTH2.value, + credential_type=CredentialType.OAUTH2, credentials={"token": "enc"}, to_entity=lambda: SimpleNamespace(id="sub-1"), ) @@ -492,7 +492,7 @@ def test_delete_trigger_provider_should_skip_unsubscribe_for_unauthorized( id="sub-2", user_id="user-1", provider_id=str(provider_id), - credential_type=CredentialType.UNAUTHORIZED.value, + credential_type=CredentialType.UNAUTHORIZED, credentials={}, to_entity=lambda: SimpleNamespace(id="sub-2"), ) @@ -527,7 +527,7 @@ def test_refresh_oauth_token_should_raise_error_for_non_oauth_credentials( mocker: MockerFixture, mock_session: MagicMock ) -> None: # Arrange - subscription = SimpleNamespace(credential_type=CredentialType.API_KEY.value) + subscription = SimpleNamespace(credential_type=CredentialType.API_KEY) mock_session.scalar.return_value = subscription # Act + Assert @@ -545,7 +545,7 @@ def test_refresh_oauth_token_should_refresh_and_persist_new_credentials( subscription = SimpleNamespace( provider_id=str(provider_id), user_id="user-1", - credential_type=CredentialType.OAUTH2.value, + credential_type=CredentialType.OAUTH2, credentials={"access_token": "enc"}, credential_expires_at=0, ) @@ -613,7 +613,7 @@ def test_refresh_subscription_should_refresh_and_persist_properties( parameters={"event": "push"}, properties={"p": "enc"}, credentials={"c": "enc"}, - credential_type=CredentialType.API_KEY.value, + credential_type=CredentialType.API_KEY, ) mock_session.scalar.return_value = subscription _mock_get_trigger_provider(mocker, provider_controller) @@ -989,7 +989,7 @@ def test_verify_subscription_credentials_should_raise_when_api_key_validation_fa provider_controller: MagicMock, ) -> None: # Arrange - subscription = SimpleNamespace(credential_type=CredentialType.API_KEY.value, credentials={"api_key": "old"}) + subscription = SimpleNamespace(credential_type=CredentialType.API_KEY, credentials={"api_key": "old"}) _mock_get_trigger_provider(mocker, provider_controller) mocker.patch.object(TriggerProviderService, "get_subscription_by_id", return_value=subscription) provider_controller.validate_credentials.side_effect = RuntimeError("bad credentials") @@ -1012,7 +1012,7 @@ def test_verify_subscription_credentials_should_return_verified_when_api_key_val provider_controller: MagicMock, ) -> None: # Arrange - subscription = SimpleNamespace(credential_type=CredentialType.API_KEY.value, credentials={"api_key": "old"}) + subscription = SimpleNamespace(credential_type=CredentialType.API_KEY, credentials={"api_key": "old"}) _mock_get_trigger_provider(mocker, provider_controller) mocker.patch.object(TriggerProviderService, "get_subscription_by_id", return_value=subscription) @@ -1036,7 +1036,7 @@ def test_verify_subscription_credentials_should_return_verified_for_non_api_key_ provider_controller: MagicMock, ) -> None: # Arrange - subscription = SimpleNamespace(credential_type=CredentialType.OAUTH2.value, credentials={}) + subscription = SimpleNamespace(credential_type=CredentialType.OAUTH2, credentials={}) _mock_get_trigger_provider(mocker, provider_controller) mocker.patch.object(TriggerProviderService, "get_subscription_by_id", return_value=subscription) @@ -1100,7 +1100,7 @@ def test_rebuild_trigger_subscription_should_raise_for_unsupported_credential_ty provider_controller: MagicMock, ) -> None: # Arrange - subscription = SimpleNamespace(credential_type=CredentialType.UNAUTHORIZED.value) + subscription = SimpleNamespace(credential_type=CredentialType.UNAUTHORIZED) _mock_get_trigger_provider(mocker, provider_controller) mocker.patch.object(TriggerProviderService, "get_subscription_by_id", return_value=subscription) @@ -1126,7 +1126,7 @@ def test_rebuild_trigger_subscription_should_raise_when_unsubscribe_fails( id="sub-1", user_id="user-1", endpoint_id="endpoint-1", - credential_type=CredentialType.API_KEY.value, + credential_type=CredentialType.API_KEY, credentials={"api_key": "old"}, to_entity=lambda: SimpleNamespace(id="sub-1"), ) @@ -1159,7 +1159,7 @@ def test_rebuild_trigger_subscription_should_resubscribe_and_update_existing_sub id="sub-1", user_id="user-1", endpoint_id="endpoint-1", - credential_type=CredentialType.API_KEY.value, + credential_type=CredentialType.API_KEY, credentials={"api_key": "old-key"}, to_entity=lambda: SimpleNamespace(id="sub-1"), ) diff --git a/api/tests/unit_tests/services/test_webhook_service.py b/api/tests/unit_tests/services/test_webhook_service.py index ffdcc046f9..95edc436d7 100644 --- a/api/tests/unit_tests/services/test_webhook_service.py +++ b/api/tests/unit_tests/services/test_webhook_service.py @@ -140,7 +140,7 @@ class TestWebhookServiceUnit: assert args[1] == "text/plain" assert args[2] is webhook_trigger - def test_detect_binary_mimetype_uses_magic(self, monkeypatch): + def test_detect_binary_mimetype_uses_magic(self, monkeypatch: pytest.MonkeyPatch): """python-magic output should be used when available.""" fake_magic = MagicMock() fake_magic.from_buffer.return_value = "image/png" @@ -151,7 +151,7 @@ class TestWebhookServiceUnit: assert result == "image/png" fake_magic.from_buffer.assert_called_once() - def test_detect_binary_mimetype_fallback_without_magic(self, monkeypatch): + def test_detect_binary_mimetype_fallback_without_magic(self, monkeypatch: pytest.MonkeyPatch): """Fallback MIME type should be used when python-magic is unavailable.""" monkeypatch.setattr("services.trigger.webhook_service.magic", None) @@ -159,7 +159,7 @@ class TestWebhookServiceUnit: assert result == "application/octet-stream" - def test_detect_binary_mimetype_handles_magic_exception(self, monkeypatch): + def test_detect_binary_mimetype_handles_magic_exception(self, monkeypatch: pytest.MonkeyPatch): """Fallback MIME type should be used when python-magic raises an exception.""" try: import magic as real_magic diff --git a/api/tests/unit_tests/services/test_workflow_service.py b/api/tests/unit_tests/services/test_workflow_service.py index feafada59a..08c6ec76e2 100644 --- a/api/tests/unit_tests/services/test_workflow_service.py +++ b/api/tests/unit_tests/services/test_workflow_service.py @@ -61,7 +61,7 @@ class TestWorkflowAssociatedDataFactory: def create_app_mock( app_id: str = "app-123", tenant_id: str = "tenant-456", - mode: str = AppMode.WORKFLOW.value, + mode: str = AppMode.WORKFLOW, workflow_id: str | None = None, **kwargs, ) -> MagicMock: @@ -93,7 +93,7 @@ class TestWorkflowAssociatedDataFactory: tenant_id: str = "tenant-456", app_id: str = "app-123", version: str = Workflow.VERSION_DRAFT, - workflow_type: str = WorkflowType.WORKFLOW.value, + workflow_type: str = WorkflowType.WORKFLOW, graph: dict[str, Any] | None = None, features: dict[str, Any] | None = None, unique_hash: str | None = None, @@ -584,7 +584,7 @@ class TestWorkflowService: id="published-workflow-id", tenant_id=app.tenant_id, app_id=app.id, - type=WorkflowType.WORKFLOW.value, + type=WorkflowType.WORKFLOW, version="2026-03-19T00:00:00", graph=json.dumps(TestWorkflowAssociatedDataFactory.create_valid_workflow_graph()), features=json.dumps(legacy_features), @@ -597,7 +597,7 @@ class TestWorkflowService: id="draft-workflow-id", tenant_id=app.tenant_id, app_id=app.id, - type=WorkflowType.WORKFLOW.value, + type=WorkflowType.WORKFLOW, version=Workflow.VERSION_DRAFT, graph=json.dumps({"nodes": [], "edges": []}), features=json.dumps({}), @@ -685,7 +685,7 @@ class TestWorkflowService: Different app modes have different feature configurations. This ensures the features match the expected schema for workflow apps. """ - app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW.value) + app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW) features = {"file_upload": {"enabled": False}} with patch("services.workflow_service.WorkflowAppConfigManager.config_validate") as mock_validate: @@ -696,7 +696,7 @@ class TestWorkflowService: def test_validate_features_structure_advanced_chat_mode(self, workflow_service): """Test validate_features_structure for advanced chat mode.""" - app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.ADVANCED_CHAT.value) + app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.ADVANCED_CHAT) features = {"opening_statement": "Hello"} with patch("services.workflow_service.AdvancedChatAppConfigManager.config_validate") as mock_validate: @@ -707,7 +707,7 @@ class TestWorkflowService: def test_validate_features_structure_invalid_mode_raises_error(self, workflow_service): """Test validate_features_structure raises error for invalid mode.""" - app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.COMPLETION.value) + app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.COMPLETION) features = {} with pytest.raises(ValueError, match="Invalid app mode"): @@ -1326,7 +1326,7 @@ class TestWorkflowService: The conversion creates equivalent workflow nodes from the chat configuration, giving users more control and customization options. """ - app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.CHAT.value) + app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.CHAT) account = TestWorkflowAssociatedDataFactory.create_account_mock() args = { "name": "Converted Workflow", @@ -1337,7 +1337,7 @@ class TestWorkflowService: with patch("services.workflow_service.WorkflowConverter") as MockConverter: mock_converter = MockConverter.return_value - mock_new_app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW.value) + mock_new_app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW) mock_converter.convert_to_workflow.return_value = mock_new_app result = workflow_service.convert_to_workflow(app, account, args) @@ -1353,13 +1353,13 @@ class TestWorkflowService: Completion apps are simpler (single prompt-response), so the conversion creates a basic workflow with fewer nodes. """ - app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.COMPLETION.value) + app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.COMPLETION) account = TestWorkflowAssociatedDataFactory.create_account_mock() args = {"name": "Converted Workflow"} with patch("services.workflow_service.WorkflowConverter") as MockConverter: mock_converter = MockConverter.return_value - mock_new_app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW.value) + mock_new_app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW) mock_converter.convert_to_workflow.return_value = mock_new_app result = workflow_service.convert_to_workflow(app, account, args) @@ -1373,7 +1373,7 @@ class TestWorkflowService: Only chat and completion apps can be converted to workflows. Apps that are already workflows or have other modes cannot be converted. """ - app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW.value) + app = TestWorkflowAssociatedDataFactory.create_app_mock(mode=AppMode.WORKFLOW) account = TestWorkflowAssociatedDataFactory.create_account_mock() args = {} @@ -2087,7 +2087,7 @@ class TestSetupVariablePool: This helper initialises the VariablePool used for single-step workflow execution. """ - def _make_workflow(self, workflow_type: str = WorkflowType.WORKFLOW.value) -> MagicMock: + def _make_workflow(self, workflow_type: str = WorkflowType.WORKFLOW) -> MagicMock: wf = MagicMock(spec=Workflow) wf.app_id = "app-1" wf.id = "wf-1" @@ -2176,7 +2176,7 @@ class TestSetupVariablePool: from models.workflow import WorkflowType # Arrange - workflow = self._make_workflow(workflow_type=WorkflowType.CHAT.value) + workflow = self._make_workflow(workflow_type=WorkflowType.CHAT) # Act with ( diff --git a/api/tests/unit_tests/services/workflow/test_workflow_draft_variable_service.py b/api/tests/unit_tests/services/workflow/test_workflow_draft_variable_service.py index 663eec6a06..b5b9f0bd97 100644 --- a/api/tests/unit_tests/services/workflow/test_workflow_draft_variable_service.py +++ b/api/tests/unit_tests/services/workflow/test_workflow_draft_variable_service.py @@ -398,7 +398,7 @@ class TestWorkflowDraftVariableService: self, mock_engine, mock_session, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, ): """Test resetting a node variable when execution record doesn't exist""" mock_repo_session = Mock(spec=Session) @@ -435,7 +435,7 @@ class TestWorkflowDraftVariableService: def test_reset_node_variable_with_valid_execution_record( self, mock_session, - monkeypatch, + monkeypatch: pytest.MonkeyPatch, ): """Test resetting a node variable with valid execution record - should restore from execution""" mock_repo_session = Mock(spec=Session) diff --git a/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service.py b/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service.py index dfdbd9acd6..17e9a077d6 100644 --- a/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service.py +++ b/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service.py @@ -414,8 +414,8 @@ def test_parse_event_message_should_parse_only_json_object( def test_is_terminal_event_should_recognize_finished_and_optional_paused_events() -> None: # Arrange - finished_event = {"event": StreamEvent.WORKFLOW_FINISHED.value} - paused_event = {"event": StreamEvent.WORKFLOW_PAUSED.value} + finished_event = {"event": StreamEvent.WORKFLOW_FINISHED} + paused_event = {"event": StreamEvent.WORKFLOW_PAUSED} # Act is_finished = service_module._is_terminal_event(finished_event, close_on_pause=False) @@ -426,7 +426,7 @@ def test_is_terminal_event_should_recognize_finished_and_optional_paused_events( assert is_finished is True assert paused_without_flag is False assert paused_with_flag is True - assert service_module._is_terminal_event(StreamEvent.PING.value, close_on_pause=True) is False + assert service_module._is_terminal_event(StreamEvent.PING, close_on_pause=True) is False def test_apply_message_context_should_update_payload_when_context_exists() -> None: @@ -569,7 +569,7 @@ def test_build_workflow_event_stream_should_emit_ping_and_terminal_snapshot_even monkeypatch.setattr( service_module, "_build_snapshot_events", - MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED.value, "task_id": "task-1"}]), + MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED, "task_id": "task-1"}]), ) # Act @@ -584,9 +584,9 @@ def test_build_workflow_event_stream_should_emit_ping_and_terminal_snapshot_even ) # Assert - assert events[0] == StreamEvent.PING.value + assert events[0] == StreamEvent.PING finished_event = cast(Mapping[str, Any], events[1]) - assert finished_event["event"] == StreamEvent.WORKFLOW_FINISHED.value + assert finished_event["event"] == StreamEvent.WORKFLOW_FINISHED assert buffer_state.stop_event.is_set() is True node_repo.get_execution_snapshots_by_workflow_run.assert_called_once() called_kwargs = node_repo.get_execution_snapshots_by_workflow_run.call_args.kwargs @@ -643,7 +643,7 @@ def test_build_workflow_event_stream_should_emit_periodic_ping_and_stop_after_id ) # Assert - assert events == [StreamEvent.PING.value, StreamEvent.PING.value] + assert events == [StreamEvent.PING, StreamEvent.PING] assert buffer_state.stop_event.is_set() is True @@ -686,7 +686,7 @@ def test_build_workflow_event_stream_should_exit_when_buffer_done_and_empty( ) # Assert - assert events == [StreamEvent.PING.value] + assert events == [StreamEvent.PING] assert buffer_state.stop_event.is_set() is True @@ -706,7 +706,7 @@ def test_build_workflow_event_stream_should_continue_when_pause_loading_fails( monkeypatch.setattr(service_module.MessageGenerator, "get_response_topic", MagicMock(return_value=topic)) monkeypatch.setattr(service_module, "_load_resumption_context", MagicMock(return_value=None)) monkeypatch.setattr(service_module, "_resolve_task_id", MagicMock(return_value="task-1")) - snapshot_builder = MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED.value}]) + snapshot_builder = MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED}]) monkeypatch.setattr(service_module, "_build_snapshot_events", snapshot_builder) buffer_state = BufferState( queue=queue.Queue(), @@ -729,7 +729,7 @@ def test_build_workflow_event_stream_should_continue_when_pause_loading_fails( ) # Assert - assert events[0] == StreamEvent.PING.value + assert events[0] == StreamEvent.PING assert snapshot_builder.call_args.kwargs["pause_entity"] is None @@ -779,7 +779,7 @@ def test_build_snapshot_events_preserves_public_form_token(monkeypatch: pytest.M session_maker=cast(sessionmaker[Session], session_maker), ) - assert events[-2]["event"] == StreamEvent.HUMAN_INPUT_REQUIRED.value + assert events[-2]["event"] == StreamEvent.HUMAN_INPUT_REQUIRED assert events[-2]["data"]["form_token"] == "wtok" assert events[-2]["data"]["expiration_time"] == int(datetime(2024, 1, 1, tzinfo=UTC).timestamp()) pause_data = events[-1]["data"] @@ -837,6 +837,6 @@ def test_build_workflow_event_stream_loads_pause_tokens_without_flask_app_contex ) pause_event = cast(Mapping[str, Any], events[-1]) - assert pause_event["event"] == StreamEvent.WORKFLOW_PAUSED.value + assert pause_event["event"] == StreamEvent.WORKFLOW_PAUSED assert pause_event["data"]["reasons"][0]["form_token"] == "wtok" assert pause_event["data"]["reasons"][0]["expiration_time"] == int(datetime(2024, 1, 1, tzinfo=UTC).timestamp()) diff --git a/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service_additional.py b/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service_additional.py index d2634d7d7b..4d711f1bf8 100644 --- a/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service_additional.py +++ b/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service_additional.py @@ -215,8 +215,8 @@ class TestWorkflowEventSnapshotHelpers: assert result == expected def test_is_terminal_event_should_recognize_finished_and_optional_paused_events(self) -> None: - finished_event = {"event": StreamEvent.WORKFLOW_FINISHED.value} - paused_event = {"event": StreamEvent.WORKFLOW_PAUSED.value} + finished_event = {"event": StreamEvent.WORKFLOW_FINISHED} + paused_event = {"event": StreamEvent.WORKFLOW_PAUSED} is_finished = service_module._is_terminal_event(finished_event, include_paused=False) paused_without_flag = service_module._is_terminal_event(paused_event, include_paused=False) @@ -225,7 +225,7 @@ class TestWorkflowEventSnapshotHelpers: assert is_finished is True assert paused_without_flag is False assert paused_with_flag is True - assert service_module._is_terminal_event(StreamEvent.PING.value, include_paused=True) is False + assert service_module._is_terminal_event(StreamEvent.PING, include_paused=True) is False def test_apply_message_context_should_update_payload_when_context_exists(self) -> None: payload: dict[str, Any] = {"event": "workflow_started"} @@ -352,7 +352,7 @@ class TestBuildWorkflowEventStream: monkeypatch.setattr( service_module, "_build_snapshot_events", - MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED.value, "task_id": "task-1"}]), + MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED, "task_id": "task-1"}]), ) events = list( @@ -365,9 +365,9 @@ class TestBuildWorkflowEventStream: ) ) - assert events[0] == StreamEvent.PING.value + assert events[0] == StreamEvent.PING finished_event = cast(Mapping[str, Any], events[1]) - assert finished_event["event"] == StreamEvent.WORKFLOW_FINISHED.value + assert finished_event["event"] == StreamEvent.WORKFLOW_FINISHED assert buffer_state.stop_event.is_set() is True node_repo.get_execution_snapshots_by_workflow_run.assert_called_once() called_kwargs = node_repo.get_execution_snapshots_by_workflow_run.call_args.kwargs @@ -421,7 +421,7 @@ class TestBuildWorkflowEventStream: ) ) - assert events == [StreamEvent.PING.value, StreamEvent.PING.value] + assert events == [StreamEvent.PING, StreamEvent.PING] assert buffer_state.stop_event.is_set() is True def test_build_workflow_event_stream_should_exit_when_buffer_done_and_empty( @@ -461,7 +461,7 @@ class TestBuildWorkflowEventStream: ) ) - assert events == [StreamEvent.PING.value] + assert events == [StreamEvent.PING] assert buffer_state.stop_event.is_set() is True def test_build_workflow_event_stream_should_continue_when_pause_loading_fails( @@ -480,7 +480,7 @@ class TestBuildWorkflowEventStream: monkeypatch.setattr(service_module.MessageGenerator, "get_response_topic", MagicMock(return_value=topic)) monkeypatch.setattr(service_module, "_load_resumption_context", MagicMock(return_value=None)) monkeypatch.setattr(service_module, "_resolve_task_id", MagicMock(return_value="task-1")) - snapshot_builder = MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED.value}]) + snapshot_builder = MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED}]) monkeypatch.setattr(service_module, "_build_snapshot_events", snapshot_builder) buffer_state = BufferState( queue=queue.Queue(), @@ -501,5 +501,5 @@ class TestBuildWorkflowEventStream: ) ) - assert events[0] == StreamEvent.PING.value + assert events[0] == StreamEvent.PING assert snapshot_builder.call_args.kwargs["pause_entity"] is None diff --git a/api/tests/unit_tests/tasks/test_workflow_execute_task.py b/api/tests/unit_tests/tasks/test_workflow_execute_task.py index 72508bef52..2544c9d61a 100644 --- a/api/tests/unit_tests/tasks/test_workflow_execute_task.py +++ b/api/tests/unit_tests/tasks/test_workflow_execute_task.py @@ -122,7 +122,7 @@ def test_resume_app_execution_queries_message_by_conversation_and_workflow_run(m workflow_run = SimpleNamespace( workflow_id="wf-id", app_id="app-id", - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by="account-id", tenant_id="tenant-id", ) @@ -208,7 +208,7 @@ def test_resume_app_execution_returns_early_when_advanced_chat_missing_conversat workflow_run = SimpleNamespace( workflow_id="wf-id", app_id="app-id", - created_by_role=CreatorUserRole.ACCOUNT.value, + created_by_role=CreatorUserRole.ACCOUNT, created_by="account-id", tenant_id="tenant-id", ) From c5ac191a7953d59cc772c0c9e3daa32b58e08ffa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 9 May 2026 13:11:09 +0900 Subject: [PATCH 06/13] chore(deps): bump gitpython from 3.1.49 to 3.1.50 in /api (#35958) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- api/uv.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/api/uv.lock b/api/uv.lock index c3db4b514c..10487f6bac 100644 --- a/api/uv.lock +++ b/api/uv.lock @@ -2660,14 +2660,14 @@ wheels = [ [[package]] name = "gitpython" -version = "3.1.49" +version = "3.1.50" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "gitdb" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/e1/63/210aaa302d6a0a78daa67c5c15bbac2cad361722841278b0209b6da20855/gitpython-3.1.49.tar.gz", hash = "sha256:42f9399c9eb33fc581014bedd76049dfbaf6375aa2a5754575966387280315e1", size = 219367, upload-time = "2026-04-29T00:31:20.478Z" } +sdist = { url = "https://files.pythonhosted.org/packages/33/f6/354ae6491228b5eb40e10d89c4d13c651fe1cf7556e35ebdded50cff57ce/gitpython-3.1.50.tar.gz", hash = "sha256:80da2d12504d52e1f998772dc5baf6e553f8d2fcfe1fcc226c9d9a2ee3372dcc", size = 219798, upload-time = "2026-05-06T04:01:26.571Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/fd/6f/b842bfa6f21d6f87c57f9abf7194225e55279d96d869775e19e9f7236fc5/gitpython-3.1.49-py3-none-any.whl", hash = "sha256:024b0422d7f84d15cd794844e029ffebd4c5d42a7eb9b936b458697ef550a02c", size = 212190, upload-time = "2026-04-29T00:31:18.412Z" }, + { url = "https://files.pythonhosted.org/packages/20/7a/1c6e3562dfd8950adbb11ffbc65d21e7c89d01a6e4f137fa981056de25c5/gitpython-3.1.50-py3-none-any.whl", hash = "sha256:d352abe2908d07355014abdd21ddf798c2a961469239afec4962e9da884858f9", size = 212507, upload-time = "2026-05-06T04:01:23.799Z" }, ] [[package]] From 5ebeb34feb56fba200c90a99ba0c3083e7bdac29 Mon Sep 17 00:00:00 2001 From: yyh <92089059+lyzno1@users.noreply.github.com> Date: Sat, 9 May 2026 12:35:29 +0800 Subject: [PATCH 07/13] fix(web): forward csp nonce to theme script (#35960) --- web/app/layout.tsx | 4 ++++ web/proxy.ts | 19 +++++++++++++------ 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/web/app/layout.tsx b/web/app/layout.tsx index 1ec9217296..8bb2069aaf 100644 --- a/web/app/layout.tsx +++ b/web/app/layout.tsx @@ -5,9 +5,11 @@ import { Provider as JotaiProvider } from 'jotai/react' import { ThemeProvider } from 'next-themes' import { NuqsAdapter } from 'nuqs/adapters/next/app' import AmplitudeProvider from '@/app/components/base/amplitude' +import { IS_PROD } from '@/config' import { TanstackQueryInitializer } from '@/context/query-client' import { getDatasetMap } from '@/env' import { getLocaleOnServer } from '@/i18n-config/server' +import { headers } from '@/next/headers' import PartnerStackCookieRecorder from './components/billing/partner-stack/cookie-recorder' import CreateAppAttributionBootstrap from './components/create-app-attribution-bootstrap' import { AgentationLoader } from './components/devtools/agentation-loader' @@ -32,6 +34,7 @@ const LocaleLayout = async ({ }) => { const locale = await getLocaleOnServer() const datasetMap = getDatasetMap() + const nonce = IS_PROD ? (await headers()).get('x-nonce') ?? undefined : undefined return ( @@ -64,6 +67,7 @@ const LocaleLayout = async ({ defaultTheme="system" enableSystem disableTransitionOnChange + nonce={nonce} > diff --git a/web/proxy.ts b/web/proxy.ts index 983713fd0e..d735c9f568 100644 --- a/web/proxy.ts +++ b/web/proxy.ts @@ -18,15 +18,16 @@ const wrapResponseWithXFrameOptions = (response: NextResponse, pathname: string) export function proxy(request: NextRequest) { const { pathname } = request.nextUrl const requestHeaders = new Headers(request.headers) - const response = NextResponse.next({ - request: { - headers: requestHeaders, - }, - }) const isWhiteListEnabled = !!env.NEXT_PUBLIC_CSP_WHITELIST && process.env.NODE_ENV === 'production' - if (!isWhiteListEnabled) + if (!isWhiteListEnabled) { + const response = NextResponse.next({ + request: { + headers: requestHeaders, + }, + }) return wrapResponseWithXFrameOptions(response, pathname) + } const whiteList = `${env.NEXT_PUBLIC_CSP_WHITELIST} ${NECESSARY_DOMAIN}` const nonce = Buffer.from(crypto.randomUUID()).toString('base64') @@ -60,6 +61,12 @@ export function proxy(request: NextRequest) { contentSecurityPolicyHeaderValue, ) + const response = NextResponse.next({ + request: { + headers: requestHeaders, + }, + }) + response.headers.set( 'Content-Security-Policy', contentSecurityPolicyHeaderValue, From d5ad6aedc0054162a95c4a7ac02050c48d74ea90 Mon Sep 17 00:00:00 2001 From: chariri Date: Sat, 9 May 2026 13:52:45 +0900 Subject: [PATCH 08/13] fix(swagger): add util to convert BaseModel to schema for query params (#35959) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- api/controllers/common/schema.py | 117 ++++++++++++++++++ .../console/explore/recommended_app.py | 6 +- api/openapi/markdown/console-swagger.md | 4 +- .../controllers/common/test_schema.py | 85 +++++++++++-- 4 files changed, 196 insertions(+), 16 deletions(-) diff --git a/api/controllers/common/schema.py b/api/controllers/common/schema.py index 0c5e23c29c..57070f1c80 100644 --- a/api/controllers/common/schema.py +++ b/api/controllers/common/schema.py @@ -6,7 +6,9 @@ These helpers keep that translation centralized so models registered through `register_schema_models` emit resolvable Swagger 2.0 references. """ +from collections.abc import Mapping from enum import StrEnum +from typing import Any, NotRequired, TypedDict from flask_restx import Namespace from pydantic import BaseModel, TypeAdapter @@ -14,6 +16,26 @@ from pydantic import BaseModel, TypeAdapter DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" +QueryParamDoc = TypedDict( + "QueryParamDoc", + { + "in": NotRequired[str], + "type": NotRequired[str], + "items": NotRequired[dict[str, object]], + "required": NotRequired[bool], + "description": NotRequired[str], + "enum": NotRequired[list[object]], + "default": NotRequired[object], + "minimum": NotRequired[int | float], + "maximum": NotRequired[int | float], + "minLength": NotRequired[int], + "maxLength": NotRequired[int], + "minItems": NotRequired[int], + "maxItems": NotRequired[int], + }, +) + + def _register_json_schema(namespace: Namespace, name: str, schema: dict) -> None: """Register a JSON schema and promote any nested Pydantic `$defs`.""" @@ -69,9 +91,104 @@ def register_enum_models(namespace: Namespace, *models: type[StrEnum]) -> None: ) +def query_params_from_model(model: type[BaseModel]) -> dict[str, QueryParamDoc]: + """Build Flask-RESTX query parameter docs from a flat Pydantic model. + + `Namespace.expect()` treats Pydantic schema models as request bodies, so GET + endpoints should keep runtime validation on the Pydantic model and feed this + derived mapping to `Namespace.doc(params=...)` for Swagger documentation. + """ + + schema = model.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0) + properties = schema.get("properties", {}) + if not isinstance(properties, Mapping): + return {} + + required = schema.get("required", []) + required_names = set(required) if isinstance(required, list) else set() + + params: dict[str, QueryParamDoc] = {} + for name, property_schema in properties.items(): + if not isinstance(name, str) or not isinstance(property_schema, Mapping): + continue + + params[name] = _query_param_from_property(property_schema, required=name in required_names) + + return params + + +def _query_param_from_property(property_schema: Mapping[str, Any], *, required: bool) -> QueryParamDoc: + param_schema = _nullable_property_schema(property_schema) + param_doc: QueryParamDoc = {"in": "query", "required": required} + + description = param_schema.get("description") + if isinstance(description, str): + param_doc["description"] = description + + schema_type = param_schema.get("type") + if isinstance(schema_type, str) and schema_type in {"array", "boolean", "integer", "number", "string"}: + param_doc["type"] = schema_type + if schema_type == "array": + items = param_schema.get("items") + if isinstance(items, Mapping): + item_type = items.get("type") + if isinstance(item_type, str): + param_doc["items"] = {"type": item_type} + + enum = param_schema.get("enum") + if isinstance(enum, list): + param_doc["enum"] = enum + + default = param_schema.get("default") + if default is not None: + param_doc["default"] = default + + minimum = param_schema.get("minimum") + if isinstance(minimum, int | float): + param_doc["minimum"] = minimum + + maximum = param_schema.get("maximum") + if isinstance(maximum, int | float): + param_doc["maximum"] = maximum + + min_length = param_schema.get("minLength") + if isinstance(min_length, int): + param_doc["minLength"] = min_length + + max_length = param_schema.get("maxLength") + if isinstance(max_length, int): + param_doc["maxLength"] = max_length + + min_items = param_schema.get("minItems") + if isinstance(min_items, int): + param_doc["minItems"] = min_items + + max_items = param_schema.get("maxItems") + if isinstance(max_items, int): + param_doc["maxItems"] = max_items + + return param_doc + + +def _nullable_property_schema(property_schema: Mapping[str, Any]) -> Mapping[str, Any]: + any_of = property_schema.get("anyOf") + if not isinstance(any_of, list): + return property_schema + + non_null_candidates = [ + candidate for candidate in any_of if isinstance(candidate, Mapping) and candidate.get("type") != "null" + ] + + if len(non_null_candidates) == 1: + return {**property_schema, **non_null_candidates[0]} + + return property_schema + + __all__ = [ "DEFAULT_REF_TEMPLATE_SWAGGER_2_0", "get_or_create_model", + "query_params_from_model", "register_enum_models", "register_schema_model", "register_schema_models", diff --git a/api/controllers/console/explore/recommended_app.py b/api/controllers/console/explore/recommended_app.py index fa65c8daf1..572f9773a1 100644 --- a/api/controllers/console/explore/recommended_app.py +++ b/api/controllers/console/explore/recommended_app.py @@ -5,7 +5,7 @@ from flask_restx import Resource from pydantic import BaseModel, Field, computed_field, field_validator from constants.languages import languages -from controllers.common.schema import register_schema_models +from controllers.common.schema import query_params_from_model, register_schema_models from controllers.console import console_ns from controllers.console.wraps import account_initialization_required from fields.base import ResponseModel @@ -15,7 +15,7 @@ from services.recommended_app_service import RecommendedAppService class RecommendedAppsQuery(BaseModel): - language: str | None = Field(default=None) + language: str | None = Field(default=None, description="Language code for recommended app localization") class RecommendedAppInfoResponse(ResponseModel): @@ -74,7 +74,7 @@ register_schema_models( @console_ns.route("/explore/apps") class RecommendedAppListApi(Resource): - @console_ns.expect(console_ns.models[RecommendedAppsQuery.__name__]) + @console_ns.doc(params=query_params_from_model(RecommendedAppsQuery)) @console_ns.response(200, "Success", console_ns.models[RecommendedAppListResponse.__name__]) @login_required @account_initialization_required diff --git a/api/openapi/markdown/console-swagger.md b/api/openapi/markdown/console-swagger.md index a69cecd83c..f4897e93c5 100644 --- a/api/openapi/markdown/console-swagger.md +++ b/api/openapi/markdown/console-swagger.md @@ -5507,7 +5507,7 @@ Delete an API key for a dataset | Name | Located in | Description | Required | Schema | | ---- | ---------- | ----------- | -------- | ------ | -| payload | body | | Yes | [RecommendedAppsQuery](#recommendedappsquery) | +| language | query | Language code for recommended app localization | No | string | ##### Responses @@ -13289,7 +13289,7 @@ Default value types for form inputs. | Name | Type | Description | Required | | ---- | ---- | ----------- | -------- | -| language | | | No | +| language | | Language code for recommended app localization | No | #### RelatedAppList diff --git a/api/tests/unit_tests/controllers/common/test_schema.py b/api/tests/unit_tests/controllers/common/test_schema.py index 6cf36e3bce..575f8c839c 100644 --- a/api/tests/unit_tests/controllers/common/test_schema.py +++ b/api/tests/unit_tests/controllers/common/test_schema.py @@ -1,10 +1,11 @@ import sys from enum import StrEnum +from typing import Literal from unittest.mock import MagicMock, patch import pytest from flask_restx import Namespace -from pydantic import BaseModel +from pydantic import BaseModel, ConfigDict, Field class UserModel(BaseModel): @@ -25,6 +26,27 @@ class ParentModel(BaseModel): child: ChildModel +class StatusEnum(StrEnum): + ACTIVE = "active" + INACTIVE = "inactive" + + +class PriorityEnum(StrEnum): + HIGH = "high" + LOW = "low" + + +class QueryModel(BaseModel): + model_config = ConfigDict(populate_by_name=True) + + page: int = Field(default=1, ge=1, le=100, description="Page number") + keyword: str | None = Field(default=None, min_length=1, max_length=50, description="Search keyword") + status: Literal["active", "inactive"] | None = Field(default=None, description="Status filter") + app_id: str = Field(..., alias="appId", description="Application ID") + tag_ids: list[str] = Field(default_factory=list, min_length=1, max_length=3, description="Tag IDs") + ambiguous: int | str | None = Field(default=None, description="Ambiguous query parameter") + + @pytest.fixture(autouse=True) def mock_console_ns(): """Mock the console_ns to avoid circular imports during test collection.""" @@ -124,16 +146,6 @@ def test_register_schema_models_calls_register_schema_model(monkeypatch: pytest. ] -class StatusEnum(StrEnum): - ACTIVE = "active" - INACTIVE = "inactive" - - -class PriorityEnum(StrEnum): - HIGH = "high" - LOW = "low" - - def test_get_or_create_model_returns_existing_model(mock_console_ns): from controllers.common.schema import get_or_create_model @@ -211,3 +223,54 @@ def test_register_enum_models_uses_correct_ref_template(): # Verify the schema contains enum values assert "enum" in schema or "anyOf" in schema + + +def test_query_params_from_model_builds_flask_restx_doc_params(): + from controllers.common.schema import query_params_from_model + + params = query_params_from_model(QueryModel) + + assert params["page"] == { + "in": "query", + "required": False, + "description": "Page number", + "type": "integer", + "default": 1, + "minimum": 1, + "maximum": 100, + } + assert params["keyword"] == { + "in": "query", + "required": False, + "description": "Search keyword", + "type": "string", + "minLength": 1, + "maxLength": 50, + } + assert params["status"] == { + "in": "query", + "required": False, + "description": "Status filter", + "type": "string", + "enum": ["active", "inactive"], + } + assert params["appId"] == { + "in": "query", + "required": True, + "description": "Application ID", + "type": "string", + } + assert params["tag_ids"] == { + "in": "query", + "required": False, + "description": "Tag IDs", + "type": "array", + "items": {"type": "string"}, + "minItems": 1, + "maxItems": 3, + } + assert params["ambiguous"] == { + "in": "query", + "required": False, + "description": "Ambiguous query parameter", + } From 2bb1f0906b67dc573aa3c2d4abb0fac62100c309 Mon Sep 17 00:00:00 2001 From: yyh <92089059+lyzno1@users.noreply.github.com> Date: Sat, 9 May 2026 13:26:21 +0800 Subject: [PATCH 09/13] refactor(web): migrate legacy tooltip callers (#35961) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- eslint-suppressions.json | 174 +-------- packages/dify-ui/README.md | 7 + .../billing/cloud-plan-payment-flow.test.tsx | 22 +- .../specific-groups-or-members.tsx | 15 +- .../app/configuration/config-var/index.tsx | 12 +- .../config/agent/agent-setting/item-panel.tsx | 13 +- .../app/log/__tests__/list.spec.tsx | 4 - .../app/overview/__tests__/app-card.spec.tsx | 11 +- .../components/app/workflow-log/detail.tsx | 32 +- .../apps/__tests__/app-card.spec.tsx | 5 - .../__tests__/progress-tooltip.spec.tsx | 8 +- .../chat/citation/__tests__/tooltip.spec.tsx | 16 +- .../chat/chat/citation/progress-tooltip.tsx | 18 +- .../base/chat/chat/citation/tooltip.tsx | 29 +- web/app/components/base/copy-icon/index.tsx | 29 +- .../annotation-reply/config-param.tsx | 10 +- .../__tests__/param-config-content.spec.tsx | 3 +- .../text-to-speech/param-config-content.tsx | 21 +- web/app/components/base/file-thumb/index.tsx | 68 ++-- .../form/components/__tests__/label.spec.tsx | 4 +- .../components/base/form/components/label.tsx | 12 +- .../components/base/input-with-copy/index.tsx | 32 +- .../components/base/tooltip/TooltipManager.ts | 27 -- .../tooltip/__tests__/TooltipManager.spec.ts | 129 ------- .../base/tooltip/__tests__/content.spec.tsx | 49 --- .../base/tooltip/__tests__/index.spec.tsx | 333 ------------------ web/app/components/base/tooltip/content.tsx | 22 -- .../components/base/tooltip/index.stories.tsx | 60 ---- web/app/components/base/tooltip/index.tsx | 231 ------------ .../cloud-plan-item/__tests__/index.spec.tsx | 2 +- .../list/__tests__/index.spec.tsx | 7 +- .../list/item/__tests__/index.spec.tsx | 7 +- .../list/item/__tests__/tooltip.spec.tsx | 7 +- .../cloud-plan-item/list/item/tooltip.tsx | 21 +- .../documents/components/operations.tsx | 42 ++- .../file-list/list/__tests__/item.spec.tsx | 6 - .../online-drive/file-list/list/item.tsx | 70 ++-- .../processing/embedding-process/index.tsx | 21 +- .../detail/completed/display-toggle.tsx | 41 ++- .../detail/completed/segment-card/index.tsx | 66 ++-- .../metadata/edit-metadata-batch/modal.tsx | 16 +- .../dataset-metadata-drawer.tsx | 6 +- ...itch-credential-in-load-balancing.spec.tsx | 8 +- .../model-auth/config-provider.tsx | 12 +- .../switch-credential-in-load-balancing.tsx | 12 +- .../__tests__/status-indicators.spec.tsx | 18 +- .../status-indicators.tsx | 50 ++- .../__tests__/popup-item.spec.tsx | 4 - .../provider-added-card/model-list-item.tsx | 16 +- .../subscription-list/subscription-card.tsx | 35 +- .../__tests__/reasoning-config-form.spec.tsx | 8 +- .../components/reasoning-config-form.tsx | 55 +-- .../tool-selector/components/tool-item.tsx | 38 +- .../components/plugins/plugin-item/index.tsx | 20 +- .../components/panel/input-field/index.tsx | 10 +- .../config-credentials.tsx | 32 +- .../workflow-tool/__tests__/index.spec.tsx | 16 - .../block-selector/__tests__/tabs.spec.tsx | 21 +- .../workflow/block-selector/tabs.tsx | 31 +- .../mcp-tool-not-support-tooltip.tsx | 21 +- .../components/switch-plugin-version.tsx | 157 +++++---- .../workflow/nodes/iteration-start/index.tsx | 22 +- .../search-method-option.tsx | 13 +- .../metadata/metadata-filter/index.tsx | 12 +- .../workflow/nodes/loop-start/index.tsx | 22 +- .../nodes/parameter-extractor/panel.tsx | 18 +- .../__tests__/integration.spec.tsx | 4 +- .../__tests__/node.spec.tsx | 23 +- .../components/advanced-setting.tsx | 18 +- .../nodes/question-classifier/node.tsx | 23 +- .../components/trigger-form/item.tsx | 18 +- .../trigger-webhook/__tests__/panel.spec.tsx | 4 - .../workflow/nodes/trigger-webhook/panel.tsx | 58 +-- .../panel/env-panel/variable-modal.tsx | 18 +- web/app/components/workflow/run/node.tsx | 15 +- .../workflow/variable-inspect/listening.tsx | 48 +-- .../components/nodes/base.tsx | 22 +- .../nodes/iteration-start/index.tsx | 12 +- .../components/nodes/loop-start/index.tsx | 12 +- web/docs/overlay-migration.md | 6 +- web/eslint.constants.mjs | 7 - 81 files changed, 841 insertions(+), 1806 deletions(-) delete mode 100644 web/app/components/base/tooltip/TooltipManager.ts delete mode 100644 web/app/components/base/tooltip/__tests__/TooltipManager.spec.ts delete mode 100644 web/app/components/base/tooltip/__tests__/content.spec.tsx delete mode 100644 web/app/components/base/tooltip/__tests__/index.spec.tsx delete mode 100644 web/app/components/base/tooltip/content.tsx delete mode 100644 web/app/components/base/tooltip/index.stories.tsx delete mode 100644 web/app/components/base/tooltip/index.tsx diff --git a/eslint-suppressions.json b/eslint-suppressions.json index cb41ef5f83..2326e92d2f 100644 --- a/eslint-suppressions.json +++ b/eslint-suppressions.json @@ -272,11 +272,6 @@ "count": 1 } }, - "web/app/components/app/app-access-control/specific-groups-or-members.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/app/app-publisher/features-wrapper.tsx": { "ts/no-explicit-any": { "count": 4 @@ -323,11 +318,6 @@ "count": 4 } }, - "web/app/components/app/configuration/config-var/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/app/configuration/config-var/select-var-type.tsx": { "ts/no-explicit-any": { "count": 1 @@ -341,11 +331,6 @@ "count": 1 } }, - "web/app/components/app/configuration/config/agent/agent-setting/item-panel.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/app/configuration/config/agent/agent-tools/index.tsx": { "ts/no-explicit-any": { "count": 9 @@ -593,11 +578,6 @@ "count": 2 } }, - "web/app/components/app/workflow-log/detail.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/app/workflow-log/filter.tsx": { "react-refresh/only-export-components": { "count": 1 @@ -967,11 +947,6 @@ "count": 1 } }, - "web/app/components/base/features/new-feature-panel/annotation-reply/config-param.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/base/features/new-feature-panel/annotation-reply/index.tsx": { "ts/no-explicit-any": { "count": 3 @@ -1005,11 +980,6 @@ "count": 2 } }, - "web/app/components/base/features/new-feature-panel/text-to-speech/param-config-content.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/base/features/types.ts": { "erasable-syntax-only/enums": { "count": 2 @@ -2096,11 +2066,6 @@ "count": 4 } }, - "web/app/components/datasets/documents/components/operations.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/documents/components/rename-modal.tsx": { "no-restricted-imports": { "count": 1 @@ -2116,11 +2081,6 @@ "count": 1 } }, - "web/app/components/datasets/documents/create-from-pipeline/data-source/online-drive/file-list/list/item.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/documents/create-from-pipeline/data-source/online-drive/index.tsx": { "react/set-state-in-effect": { "count": 5 @@ -2166,11 +2126,6 @@ "count": 2 } }, - "web/app/components/datasets/documents/create-from-pipeline/processing/embedding-process/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/documents/create-from-pipeline/steps/index.ts": { "no-barrel-files/no-barrel-files": { "count": 3 @@ -2196,11 +2151,6 @@ "count": 1 } }, - "web/app/components/datasets/documents/detail/completed/display-toggle.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/documents/detail/completed/hooks/index.ts": { "no-barrel-files/no-barrel-files": { "count": 5 @@ -2217,11 +2167,6 @@ "count": 1 } }, - "web/app/components/datasets/documents/detail/completed/segment-card/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/documents/detail/context.ts": { "ts/no-explicit-any": { "count": 1 @@ -2310,7 +2255,7 @@ }, "web/app/components/datasets/metadata/edit-metadata-batch/modal.tsx": { "no-restricted-imports": { - "count": 2 + "count": 1 } }, "web/app/components/datasets/metadata/hooks/use-edit-dataset-metadata.ts": { @@ -2338,7 +2283,7 @@ }, "web/app/components/datasets/metadata/metadata-dataset/dataset-metadata-drawer.tsx": { "no-restricted-imports": { - "count": 3 + "count": 2 } }, "web/app/components/datasets/metadata/metadata-dataset/select-metadata-modal.tsx": { @@ -2571,11 +2516,6 @@ "count": 4 } }, - "web/app/components/header/account-setting/model-provider-page/model-auth/config-provider.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/header/account-setting/model-provider-page/model-auth/hooks/index.ts": { "no-barrel-files/no-barrel-files": { "count": 6 @@ -2602,9 +2542,6 @@ } }, "web/app/components/header/account-setting/model-provider-page/model-auth/switch-credential-in-load-balancing.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 3 } @@ -2630,9 +2567,6 @@ } }, "web/app/components/header/account-setting/model-provider-page/model-parameter-modal/status-indicators.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 2 } @@ -2647,11 +2581,6 @@ "count": 2 } }, - "web/app/components/header/account-setting/model-provider-page/provider-added-card/model-list-item.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/header/account-setting/model-provider-page/provider-added-card/model-load-balancing-configs.tsx": { "ts/no-explicit-any": { "count": 5 @@ -2900,11 +2829,6 @@ "count": 2 } }, - "web/app/components/plugins/plugin-detail-panel/subscription-list/subscription-card.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/plugins/plugin-detail-panel/subscription-list/types.ts": { "erasable-syntax-only/enums": { "count": 1 @@ -2915,11 +2839,6 @@ "count": 7 } }, - "web/app/components/plugins/plugin-detail-panel/tool-selector/components/tool-item.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/plugins/plugin-detail-panel/tool-selector/hooks/index.ts": { "no-barrel-files/no-barrel-files": { "count": 2 @@ -2934,9 +2853,6 @@ } }, "web/app/components/plugins/plugin-item/index.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 1 } @@ -3028,11 +2944,6 @@ "count": 1 } }, - "web/app/components/rag-pipeline/components/panel/input-field/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/rag-pipeline/components/panel/test-run/preparation/document-processing/index.tsx": { "ts/no-explicit-any": { "count": 1 @@ -3193,7 +3104,7 @@ }, "web/app/components/tools/edit-custom-collection-modal/config-credentials.tsx": { "no-restricted-imports": { - "count": 2 + "count": 1 } }, "web/app/components/tools/edit-custom-collection-modal/get-schema.tsx": { @@ -3380,11 +3291,6 @@ "count": 1 } }, - "web/app/components/workflow/block-selector/tabs.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/block-selector/tool/tool-list-flat-view/list.tsx": { "ts/no-explicit-any": { "count": 1 @@ -3651,11 +3557,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/_base/components/mcp-tool-not-support-tooltip.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/_base/components/memory-config.tsx": { "unicorn/prefer-number-properties": { "count": 1 @@ -3691,11 +3592,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/_base/components/switch-plugin-version.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/_base/components/variable/match-schema-type.ts": { "ts/no-explicit-any": { "count": 8 @@ -4050,11 +3946,6 @@ "count": 5 } }, - "web/app/components/workflow/nodes/iteration-start/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/iteration/default.ts": { "ts/no-explicit-any": { "count": 1 @@ -4075,11 +3966,6 @@ "count": 4 } }, - "web/app/components/workflow/nodes/knowledge-base/components/retrieval-setting/search-method-option.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/knowledge-base/components/retrieval-setting/type.ts": { "ts/no-explicit-any": { "count": 2 @@ -4113,11 +3999,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/knowledge-retrieval/components/metadata/metadata-filter/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/knowledge-retrieval/default.ts": { "ts/no-explicit-any": { "count": 1 @@ -4240,11 +4121,6 @@ "count": 7 } }, - "web/app/components/workflow/nodes/loop-start/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/loop/components/condition-list/condition-input.tsx": { "ts/no-explicit-any": { "count": 1 @@ -4306,11 +4182,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/parameter-extractor/panel.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/parameter-extractor/types.ts": { "erasable-syntax-only/enums": { "count": 2 @@ -4329,11 +4200,6 @@ "count": 9 } }, - "web/app/components/workflow/nodes/question-classifier/components/advanced-setting.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/question-classifier/components/class-item.tsx": { "react/set-state-in-effect": { "count": 1 @@ -4352,11 +4218,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/question-classifier/node.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/question-classifier/use-config.ts": { "react/set-state-in-effect": { "count": 2 @@ -4464,9 +4325,6 @@ } }, "web/app/components/workflow/nodes/trigger-plugin/components/trigger-form/item.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 1 } @@ -4522,11 +4380,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/trigger-webhook/panel.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/utils.ts": { "ts/no-explicit-any": { "count": 1 @@ -4637,9 +4490,6 @@ } }, "web/app/components/workflow/panel/env-panel/variable-modal.tsx": { - "no-restricted-imports": { - "count": 1 - }, "react/set-state-in-effect": { "count": 4 }, @@ -4870,9 +4720,6 @@ } }, "web/app/components/workflow/variable-inspect/listening.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 2 } @@ -4911,26 +4758,11 @@ "count": 5 } }, - "web/app/components/workflow/workflow-preview/components/nodes/base.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/workflow-preview/components/nodes/constants.ts": { "ts/no-explicit-any": { "count": 1 } }, - "web/app/components/workflow/workflow-preview/components/nodes/iteration-start/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/workflow/workflow-preview/components/nodes/loop-start/index.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/workflow-preview/components/zoom-in-out.tsx": { "erasable-syntax-only/enums": { "count": 1 diff --git a/packages/dify-ui/README.md b/packages/dify-ui/README.md index 2915fe5db7..c78faede89 100644 --- a/packages/dify-ui/README.md +++ b/packages/dify-ui/README.md @@ -99,6 +99,13 @@ See `[web/docs/overlay-migration.md](../../web/docs/overlay-migration.md)` for t - Never create an extra manual portal on top of our primitives — use the exported content / portal parts such as `DialogContent`, `PopoverContent`, and `DrawerPortal`. Base UI handles focus management, scroll-locking, and dismissal. - When a primitive needs additional presentation chrome (e.g. a custom backdrop), add it **inside** the exported component, not at call sites. +### Tooltip, infotip, and popover semantics + +- Use `Tooltip` only for short, non-interactive visual labels. The trigger must already have visible text or an `aria-label`; the tooltip is not the accessible name and must not contain links, buttons, forms, or structured prose. +- Use `Popover` for explanatory content, long text, rich layout, or anything users may need to reach on touch or with assistive technology. In `web/`, the `Infotip` wrapper is the preferred pattern for a `?` help glyph backed by `Popover`. +- Pick a `placement` and let the primitive own spacing. Avoid per-call-site offsets unless the component API explicitly needs a measured layout exception. +- When passing a Base UI trigger `render` prop, render a real `
)} diff --git a/web/app/components/app/configuration/config/agent/agent-setting/item-panel.tsx b/web/app/components/app/configuration/config/agent/agent-setting/item-panel.tsx index 56f1863ec1..54ffee0600 100644 --- a/web/app/components/app/configuration/config/agent/agent-setting/item-panel.tsx +++ b/web/app/components/app/configuration/config/agent/agent-setting/item-panel.tsx @@ -2,7 +2,7 @@ import type { FC } from 'react' import { cn } from '@langgenius/dify-ui/cn' import * as React from 'react' -import Tooltip from '@/app/components/base/tooltip' +import { Infotip } from '@/app/components/base/infotip' type Props = { className?: string @@ -24,14 +24,9 @@ const ItemPanel: FC = ({
{icon}
{name}
- - {description} -
- )} - > - + + {description} +
{children} diff --git a/web/app/components/app/log/__tests__/list.spec.tsx b/web/app/components/app/log/__tests__/list.spec.tsx index 25512ed689..fe589b599a 100644 --- a/web/app/components/app/log/__tests__/list.spec.tsx +++ b/web/app/components/app/log/__tests__/list.spec.tsx @@ -84,10 +84,6 @@ vi.mock('@/app/components/app/store', () => ({ }), })) -vi.mock('@/app/components/base/tooltip', () => ({ - default: ({ children }: { children: ReactNode }) => <>{children}, -})) - vi.mock('@/app/components/base/drawer', () => ({ default: ({ children, isOpen, onClose }: { children: ReactNode, isOpen: boolean, onClose: () => void }) => ( isOpen diff --git a/web/app/components/app/overview/__tests__/app-card.spec.tsx b/web/app/components/app/overview/__tests__/app-card.spec.tsx index a6bacce887..1e9ba71a4f 100644 --- a/web/app/components/app/overview/__tests__/app-card.spec.tsx +++ b/web/app/components/app/overview/__tests__/app-card.spec.tsx @@ -1,4 +1,4 @@ -import type { ReactElement, ReactNode } from 'react' +import type { ReactElement } from 'react' import type { AppDetailResponse } from '@/models/app' import { fireEvent, screen, waitFor } from '@testing-library/react' import { renderWithSystemFeatures } from '@/__tests__/utils/mock-system-features' @@ -98,15 +98,6 @@ vi.mock('../../app-access-control', () => ({ ), })) -vi.mock('@/app/components/base/tooltip', () => ({ - default: ({ children, popupContent }: { children: ReactNode, popupContent?: ReactNode }) => ( -
- {children} - {popupContent} -
- ), -})) - const mockWindowOpen = vi.fn() Object.defineProperty(window, 'open', { writable: true, diff --git a/web/app/components/app/workflow-log/detail.tsx b/web/app/components/app/workflow-log/detail.tsx index e191bfb794..05cd6f1676 100644 --- a/web/app/components/app/workflow-log/detail.tsx +++ b/web/app/components/app/workflow-log/detail.tsx @@ -1,9 +1,9 @@ 'use client' import type { FC } from 'react' +import { Tooltip, TooltipContent, TooltipTrigger } from '@langgenius/dify-ui/tooltip' import { RiCloseLine, RiPlayLargeLine } from '@remixicon/react' import { useTranslation } from 'react-i18next' import { useStore } from '@/app/components/app/store' -import TooltipPlus from '@/app/components/base/tooltip' import { WorkflowContextProvider } from '@/app/components/workflow/context' import Run from '@/app/components/workflow/run' import { useRouter } from '@/next/navigation' @@ -33,19 +33,23 @@ const DetailPanel: FC = ({ runID, onClose, canReplay = false }) => {

{t('runDetail.workflowTitle', { ns: 'appLog' })}

{canReplay && ( - - - + + + + + )} + /> + + {t('runDetail.testWithParams', { ns: 'appLog' })} + + )}
diff --git a/web/app/components/apps/__tests__/app-card.spec.tsx b/web/app/components/apps/__tests__/app-card.spec.tsx index c841617474..d61ca306ae 100644 --- a/web/app/components/apps/__tests__/app-card.spec.tsx +++ b/web/app/components/apps/__tests__/app-card.spec.tsx @@ -296,11 +296,6 @@ vi.mock('@langgenius/dify-ui/dropdown-menu', () => { } }) -// Tooltip uses portals - minimal mock preserving popup content as title attribute -vi.mock('@/app/components/base/tooltip', () => ({ - default: ({ children, popupContent }: { children: React.ReactNode, popupContent: React.ReactNode }) => React.createElement('div', { title: popupContent }, children), -})) - // AppCardTags has tag API dependencies - mock for isolated testing vi.mock('@/features/tag-management/components/app-card-tags', () => ({ AppCardTags: ({ tags }: { tags?: { id: string, name: string }[] }) => { diff --git a/web/app/components/base/chat/chat/citation/__tests__/progress-tooltip.spec.tsx b/web/app/components/base/chat/chat/citation/__tests__/progress-tooltip.spec.tsx index a47123aafd..f53e7d15c3 100644 --- a/web/app/components/base/chat/chat/citation/__tests__/progress-tooltip.spec.tsx +++ b/web/app/components/base/chat/chat/citation/__tests__/progress-tooltip.spec.tsx @@ -55,7 +55,7 @@ describe('ProgressTooltip', () => { await user.hover(screen.getByTestId('progress-trigger-content')) - expect(screen.getByTestId('progress-tooltip-popup')).toBeInTheDocument() + expect(await screen.findByTestId('progress-tooltip-popup')).toBeInTheDocument() }) it('should hide the tooltip popup on mouse leave', async () => { @@ -74,7 +74,7 @@ describe('ProgressTooltip', () => { await user.hover(screen.getByTestId('progress-trigger-content')) - expect(screen.getByTestId('progress-tooltip-popup')).toHaveTextContent(/hitScore/i) + expect(await screen.findByTestId('progress-tooltip-popup')).toHaveTextContent(/hitScore/i) }) it('should show the data value inside the tooltip popup', async () => { @@ -83,7 +83,7 @@ describe('ProgressTooltip', () => { await user.hover(screen.getByTestId('progress-trigger-content')) - expect(screen.getByTestId('progress-tooltip-popup')).toHaveTextContent('0.8') + expect(await screen.findByTestId('progress-tooltip-popup')).toHaveTextContent('0.8') }) }) @@ -126,7 +126,7 @@ describe('ProgressTooltip', () => { await user.unhover(screen.getByTestId('progress-trigger-content')) await user.hover(screen.getByTestId('progress-trigger-content')) - expect(screen.getByTestId('progress-tooltip-popup')).toBeInTheDocument() + expect(await screen.findByTestId('progress-tooltip-popup')).toBeInTheDocument() }) it('should keep tooltip closed without any interaction', () => { diff --git a/web/app/components/base/chat/chat/citation/__tests__/tooltip.spec.tsx b/web/app/components/base/chat/chat/citation/__tests__/tooltip.spec.tsx index 45ac4b4fb4..58a3c5c654 100644 --- a/web/app/components/base/chat/chat/citation/__tests__/tooltip.spec.tsx +++ b/web/app/components/base/chat/chat/citation/__tests__/tooltip.spec.tsx @@ -41,7 +41,7 @@ describe('Tooltip', () => { await user.hover(screen.getByTestId('tooltip-trigger-content')) - expect(screen.getByTestId('tooltip-popup')).toHaveTextContent('Word Count') + expect(await screen.findByTestId('tooltip-popup')).toHaveTextContent('Word Count') }) it('should render the data value inside the tooltip popup', async () => { @@ -50,7 +50,7 @@ describe('Tooltip', () => { await user.hover(screen.getByTestId('tooltip-trigger-content')) - expect(screen.getByTestId('tooltip-popup')).toHaveTextContent('99') + expect(await screen.findByTestId('tooltip-popup')).toHaveTextContent('99') }) it('should render a string data value inside the tooltip popup', async () => { @@ -59,7 +59,7 @@ describe('Tooltip', () => { await user.hover(screen.getByTestId('tooltip-trigger-content')) - expect(screen.getByTestId('tooltip-popup')).toHaveTextContent('abc1234') + expect(await screen.findByTestId('tooltip-popup')).toHaveTextContent('abc1234') }) it('should render both text and data together inside the tooltip popup', async () => { @@ -68,7 +68,7 @@ describe('Tooltip', () => { await user.hover(screen.getByTestId('tooltip-trigger-content')) - const popup = screen.getByTestId('tooltip-popup') + const popup = await screen.findByTestId('tooltip-popup') expect(popup).toHaveTextContent('Characters') expect(popup).toHaveTextContent('55') }) @@ -90,10 +90,10 @@ describe('Tooltip', () => { const user = userEvent.setup() const { rerender } = render(} />) await user.hover(screen.getByTestId('tooltip-trigger-content')) - expect(screen.getByTestId('tooltip-popup')).toHaveTextContent('Original') + expect(await screen.findByTestId('tooltip-popup')).toHaveTextContent('Original') rerender(} />) - expect(screen.getByTestId('tooltip-popup')).toHaveTextContent('Updated') + expect(await screen.findByTestId('tooltip-popup')).toHaveTextContent('Updated') }) }) @@ -104,7 +104,7 @@ describe('Tooltip', () => { await user.hover(screen.getByTestId('tooltip-trigger-content')) - expect(screen.getByTestId('tooltip-popup')).toBeInTheDocument() + expect(await screen.findByTestId('tooltip-popup')).toBeInTheDocument() }) it('should hide the tooltip popup on mouse leave', async () => { @@ -125,7 +125,7 @@ describe('Tooltip', () => { await user.unhover(screen.getByTestId('tooltip-trigger-content')) await user.hover(screen.getByTestId('tooltip-trigger-content')) - expect(screen.getByTestId('tooltip-popup')).toBeInTheDocument() + expect(await screen.findByTestId('tooltip-popup')).toBeInTheDocument() }) }) diff --git a/web/app/components/base/chat/chat/citation/progress-tooltip.tsx b/web/app/components/base/chat/chat/citation/progress-tooltip.tsx index 75211b706e..be9a4b2661 100644 --- a/web/app/components/base/chat/chat/citation/progress-tooltip.tsx +++ b/web/app/components/base/chat/chat/citation/progress-tooltip.tsx @@ -4,7 +4,6 @@ import { TooltipContent, TooltipTrigger, } from '@langgenius/dify-ui/tooltip' -import { useState } from 'react' import { useTranslation } from 'react-i18next' type ProgressTooltipProps = { @@ -15,22 +14,12 @@ const ProgressTooltip: FC = ({ data, }) => { const { t } = useTranslation() - const [open, setOpen] = useState(false) return ( - + setOpen(true)} - onMouseLeave={() => setOpen(false)} - /> - )} + data-testid="progress-trigger-content" + className="flex grow items-center border-0 bg-transparent p-0 text-left" >
= ({ {t('chat.citation.hitScore', { ns: 'common' })} diff --git a/web/app/components/base/chat/chat/citation/tooltip.tsx b/web/app/components/base/chat/chat/citation/tooltip.tsx index e1d76a9383..f3460abd22 100644 --- a/web/app/components/base/chat/chat/citation/tooltip.tsx +++ b/web/app/components/base/chat/chat/citation/tooltip.tsx @@ -1,39 +1,27 @@ import type { FC } from 'react' import { - Tooltip as DifyTooltip, + Tooltip, TooltipContent, TooltipTrigger, } from '@langgenius/dify-ui/tooltip' import * as React from 'react' -import { useState } from 'react' -type TooltipProps = { +type CitationTooltipProps = { data: number | string text: string icon: React.ReactNode } -const Tooltip: FC = ({ +const CitationTooltip: FC = ({ data, text, icon, }) => { - const [open, setOpen] = useState(false) - return ( - + setOpen(true)} - onMouseLeave={() => setOpen(false)} - /> - )} + data-testid="tooltip-trigger-content" + className="mr-6 flex items-center border-0 bg-transparent p-0 text-left" > {icon} {data} @@ -41,15 +29,14 @@ const Tooltip: FC = ({ {text} {' '} {data} - + ) } -export default Tooltip +export default CitationTooltip diff --git a/web/app/components/base/copy-icon/index.tsx b/web/app/components/base/copy-icon/index.tsx index b0b4635a39..a770430580 100644 --- a/web/app/components/base/copy-icon/index.tsx +++ b/web/app/components/base/copy-icon/index.tsx @@ -1,8 +1,8 @@ 'use client' +import { Tooltip, TooltipContent, TooltipTrigger } from '@langgenius/dify-ui/tooltip' import { useCallback } from 'react' import { useTranslation } from 'react-i18next' import { useClipboard } from '@/hooks/use-clipboard' -import Tooltip from '../tooltip' type Props = { content: string @@ -25,14 +25,25 @@ const CopyIcon = ({ content }: Props) => { const safeTooltipText = tooltipText || '' return ( - -
- {!copied - ? () - : ()} -
+ + + {!copied + ? () + : ()} + + )} + /> + + {safeTooltipText} + ) } diff --git a/web/app/components/base/features/new-feature-panel/annotation-reply/config-param.tsx b/web/app/components/base/features/new-feature-panel/annotation-reply/config-param.tsx index 0335587af0..16cbefe87a 100644 --- a/web/app/components/base/features/new-feature-panel/annotation-reply/config-param.tsx +++ b/web/app/components/base/features/new-feature-panel/annotation-reply/config-param.tsx @@ -1,7 +1,7 @@ 'use client' import type { FC } from 'react' import * as React from 'react' -import Tooltip from '@/app/components/base/tooltip' +import { Infotip } from '@/app/components/base/infotip' export const Item: FC<{ title: string, tooltip: string, children: React.JSX.Element }> = ({ title, @@ -12,11 +12,9 @@ export const Item: FC<{ title: string, tooltip: string, children: React.JSX.Elem
{title}
- {tooltip}
- } - /> + + {tooltip} +
{children}
diff --git a/web/app/components/base/features/new-feature-panel/text-to-speech/__tests__/param-config-content.spec.tsx b/web/app/components/base/features/new-feature-panel/text-to-speech/__tests__/param-config-content.spec.tsx index 754bde98a6..b4d5beefa6 100644 --- a/web/app/components/base/features/new-feature-panel/text-to-speech/__tests__/param-config-content.spec.tsx +++ b/web/app/components/base/features/new-feature-panel/text-to-speech/__tests__/param-config-content.spec.tsx @@ -110,8 +110,7 @@ describe('ParamConfigContent', () => { const languageLabel = screen.getByText(/voice\.voiceSettings\.language/) expect(languageLabel)!.toBeInTheDocument() - const tooltip = languageLabel.parentElement as HTMLElement - expect(tooltip.querySelector('svg'))!.toBeInTheDocument() + expect(screen.getByRole('button', { name: /voice\.voiceSettings\.resolutionTooltip/ }))!.toBeInTheDocument() }) it('should display language listbox button', () => { diff --git a/web/app/components/base/features/new-feature-panel/text-to-speech/param-config-content.tsx b/web/app/components/base/features/new-feature-panel/text-to-speech/param-config-content.tsx index 199cbecccb..f7c3b738a9 100644 --- a/web/app/components/base/features/new-feature-panel/text-to-speech/param-config-content.tsx +++ b/web/app/components/base/features/new-feature-panel/text-to-speech/param-config-content.tsx @@ -10,7 +10,7 @@ import { useTranslation } from 'react-i18next' import { replace } from 'string-ts' import AudioBtn from '@/app/components/base/audio-btn' import { useFeatures, useFeaturesStore } from '@/app/components/base/features/hooks' -import Tooltip from '@/app/components/base/tooltip' +import { Infotip } from '@/app/components/base/infotip' import { languages } from '@/i18n-config/language' import { usePathname } from '@/next/navigation' import { useAppVoices } from '@/service/use-apps' @@ -89,17 +89,16 @@ const VoiceParamConfig = ({
{t('voice.voiceSettings.language', { ns: 'appDebug' })} - - {t('voice.voiceSettings.resolutionTooltip', { ns: 'appDebug' }).split('\n').map(item => ( -
- {item} -
- ))} + + {t('voice.voiceSettings.resolutionTooltip', { ns: 'appDebug' }).split('\n').map(item => ( +
+ {item}
- )} - /> + ))} +
) => { + const handleClick = useCallback((e: React.MouseEvent) => { e.stopPropagation() e.preventDefault() onClick?.(file) }, [onClick, file]) return ( - -
+ + { + isImage + ? ( + + ) + : ( + + ) + } + )} - onClick={handleClick} - > - { - isImage - ? ( - - ) - : ( - - ) - } -
+ /> + + {name} +
) } diff --git a/web/app/components/base/form/components/__tests__/label.spec.tsx b/web/app/components/base/form/components/__tests__/label.spec.tsx index a3f564dafe..99471e5171 100644 --- a/web/app/components/base/form/components/__tests__/label.spec.tsx +++ b/web/app/components/base/form/components/__tests__/label.spec.tsx @@ -41,8 +41,8 @@ describe('Label', () => { const tooltipText = 'Test Tooltip' render(
- } - triggerClassName="ml-0.5 w-4 h-4" - triggerTestId={`${htmlFor}-tooltip`} - /> + + {tooltip} + )}
) diff --git a/web/app/components/base/input-with-copy/index.tsx b/web/app/components/base/input-with-copy/index.tsx index 2da2e547c2..38a7ceed9c 100644 --- a/web/app/components/base/input-with-copy/index.tsx +++ b/web/app/components/base/input-with-copy/index.tsx @@ -1,11 +1,11 @@ 'use client' import type { InputProps } from '../input' import { cn } from '@langgenius/dify-ui/cn' +import { Tooltip, TooltipContent, TooltipTrigger } from '@langgenius/dify-ui/tooltip' import * as React from 'react' import { useTranslation } from 'react-i18next' import { useClipboard } from '@/hooks/use-clipboard' import ActionButton from '../action-button' -import Tooltip from '../tooltip' type InputWithCopyProps = { showCopyButton?: boolean @@ -64,18 +64,24 @@ const InputWithCopy = React.forwardRef(( onMouseLeave={reset} data-testid="copy-button-wrapper" > - - - {copied - ? () - : ()} - + + + {copied + ? () + : ()} + + )} + /> + + {safeTooltipText} +
)} diff --git a/web/app/components/base/tooltip/TooltipManager.ts b/web/app/components/base/tooltip/TooltipManager.ts deleted file mode 100644 index b0138af4b3..0000000000 --- a/web/app/components/base/tooltip/TooltipManager.ts +++ /dev/null @@ -1,27 +0,0 @@ -class TooltipManager { - private activeCloser: (() => void) | null = null - - register(closeFn: () => void) { - if (this.activeCloser) - this.activeCloser() - this.activeCloser = closeFn - } - - clear(closeFn: () => void) { - if (this.activeCloser === closeFn) - this.activeCloser = null - } - - /** - * Closes the currently active tooltip by calling its closer function - * and clearing the reference to it - */ - closeActiveTooltip() { - if (this.activeCloser) { - this.activeCloser() - this.activeCloser = null - } - } -} - -export const tooltipManager = new TooltipManager() diff --git a/web/app/components/base/tooltip/__tests__/TooltipManager.spec.ts b/web/app/components/base/tooltip/__tests__/TooltipManager.spec.ts deleted file mode 100644 index 406c48259a..0000000000 --- a/web/app/components/base/tooltip/__tests__/TooltipManager.spec.ts +++ /dev/null @@ -1,129 +0,0 @@ -import { tooltipManager } from '../TooltipManager' - -describe('TooltipManager', () => { - // Test the singleton instance directly - let manager: typeof tooltipManager - - beforeEach(() => { - // Get fresh reference to the singleton - manager = tooltipManager - // Clean up any active tooltip by calling closeActiveTooltip - // This ensures each test starts with a clean state - manager.closeActiveTooltip() - }) - - describe('register', () => { - it('should register a close function', () => { - const closeFn = vi.fn() - manager.register(closeFn) - expect(closeFn).not.toHaveBeenCalled() - }) - - it('should call the existing close function when registering a new one', () => { - const firstCloseFn = vi.fn() - const secondCloseFn = vi.fn() - - manager.register(firstCloseFn) - manager.register(secondCloseFn) - - expect(firstCloseFn).toHaveBeenCalledTimes(1) - expect(secondCloseFn).not.toHaveBeenCalled() - }) - - it('should replace the active closer with the new one', () => { - const firstCloseFn = vi.fn() - const secondCloseFn = vi.fn() - - // Register first function - manager.register(firstCloseFn) - - // Register second function - this should call firstCloseFn and replace it - manager.register(secondCloseFn) - - // Verify firstCloseFn was called during register (replacement behavior) - expect(firstCloseFn).toHaveBeenCalledTimes(1) - - // Now close the active tooltip - this should call secondCloseFn - manager.closeActiveTooltip() - - // Verify secondCloseFn was called, not firstCloseFn - expect(secondCloseFn).toHaveBeenCalledTimes(1) - }) - }) - - describe('clear', () => { - it('should not clear if the close function does not match', () => { - const closeFn = vi.fn() - const otherCloseFn = vi.fn() - - manager.register(closeFn) - manager.clear(otherCloseFn) - - manager.closeActiveTooltip() - expect(closeFn).toHaveBeenCalledTimes(1) - }) - - it('should clear the close function if it matches', () => { - const closeFn = vi.fn() - - manager.register(closeFn) - manager.clear(closeFn) - - manager.closeActiveTooltip() - expect(closeFn).not.toHaveBeenCalled() - }) - - it('should not call the close function when clearing', () => { - const closeFn = vi.fn() - - manager.register(closeFn) - manager.clear(closeFn) - - expect(closeFn).not.toHaveBeenCalled() - }) - }) - - describe('closeActiveTooltip', () => { - it('should do nothing when no active closer is registered', () => { - expect(() => manager.closeActiveTooltip()).not.toThrow() - }) - - it('should call the active closer function', () => { - const closeFn = vi.fn() - manager.register(closeFn) - - manager.closeActiveTooltip() - - expect(closeFn).toHaveBeenCalledTimes(1) - }) - - it('should clear the active closer after calling it', () => { - const closeFn = vi.fn() - manager.register(closeFn) - - manager.closeActiveTooltip() - manager.closeActiveTooltip() - - expect(closeFn).toHaveBeenCalledTimes(1) - }) - - it('should handle multiple register and close cycles', () => { - const closeFn1 = vi.fn() - const closeFn2 = vi.fn() - const closeFn3 = vi.fn() - - manager.register(closeFn1) - manager.closeActiveTooltip() - - manager.register(closeFn2) - manager.closeActiveTooltip() - - manager.register(closeFn3) - manager.closeActiveTooltip() - - expect(closeFn1).toHaveBeenCalledTimes(1) - expect(closeFn2).toHaveBeenCalledTimes(1) - expect(closeFn3).toHaveBeenCalledTimes(1) - }) - }) -}) diff --git a/web/app/components/base/tooltip/__tests__/content.spec.tsx b/web/app/components/base/tooltip/__tests__/content.spec.tsx deleted file mode 100644 index fa5d86756e..0000000000 --- a/web/app/components/base/tooltip/__tests__/content.spec.tsx +++ /dev/null @@ -1,49 +0,0 @@ -import { render, screen } from '@testing-library/react' -import userEvent from '@testing-library/user-event' -import { describe, expect, it, vi } from 'vitest' -import { ToolTipContent } from '../content' - -describe('ToolTipContent', () => { - it('should render children correctly', () => { - render( - - Tooltip body text - , - ) - expect(screen.getByTestId('tooltip-content')).toBeInTheDocument() - expect(screen.getByTestId('tooltip-content-body')).toHaveTextContent('Tooltip body text') - expect(screen.queryByTestId('tooltip-content-title')).not.toBeInTheDocument() - expect(screen.queryByTestId('tooltip-content-action')).not.toBeInTheDocument() - }) - - it('should render title when provided', () => { - render( - - Tooltip body text - , - ) - expect(screen.getByTestId('tooltip-content-title')).toHaveTextContent('Tooltip Title') - }) - - it('should render action when provided', () => { - render( - Action Text}> - Tooltip body text - , - ) - expect(screen.getByTestId('tooltip-content-action')).toHaveTextContent('Action Text') - }) - - it('should handle action click', async () => { - const user = userEvent.setup() - const handleActionClick = vi.fn() - render( - Action Text}> - Tooltip body text - , - ) - - await user.click(screen.getByText('Action Text')) - expect(handleActionClick).toHaveBeenCalledTimes(1) - }) -}) diff --git a/web/app/components/base/tooltip/__tests__/index.spec.tsx b/web/app/components/base/tooltip/__tests__/index.spec.tsx deleted file mode 100644 index 39f8f1b503..0000000000 --- a/web/app/components/base/tooltip/__tests__/index.spec.tsx +++ /dev/null @@ -1,333 +0,0 @@ -import { act, cleanup, fireEvent, render, screen } from '@testing-library/react' -import * as React from 'react' -import Tooltip from '../index' -import { tooltipManager } from '../TooltipManager' - -afterEach(() => { - cleanup() - vi.clearAllTimers() - vi.useRealTimers() -}) - -describe('Tooltip', () => { - describe('Rendering', () => { - it('should render default tooltip with question icon', () => { - const triggerClassName = 'custom-trigger' - const { container } = render() - const trigger = container.querySelector(`.${triggerClassName}`) - expect(trigger).not.toBeNull() - expect(trigger?.querySelector('svg')).not.toBeNull() // question icon - }) - - it('should render with custom children', () => { - const { getByText } = render( - - - , - ) - expect(getByText('Hover me').textContent).toBe('Hover me') - }) - - it('should render correctly when asChild is false', () => { - const { container } = render( - - Trigger - , - ) - const trigger = container.querySelector('.custom-parent-trigger') - expect(trigger).not.toBeNull() - }) - - it('should render with a fallback question icon when children are null', () => { - const { container } = render( - - {null} - , - ) - const trigger = container.querySelector('.custom-fallback-trigger') - expect(trigger).not.toBeNull() - expect(trigger?.querySelector('svg')).not.toBeNull() - }) - }) - - describe('Disabled state', () => { - it('should not show tooltip when disabled', () => { - const triggerClassName = 'custom-trigger' - const { container } = render() - const trigger = container.querySelector(`.${triggerClassName}`) - act(() => { - fireEvent.mouseEnter(trigger!) - }) - expect(screen.queryByText('Tooltip content')).not.toBeInTheDocument() - }) - }) - - describe('Trigger methods', () => { - beforeEach(() => { - vi.useFakeTimers() - }) - - it('should open on hover when triggerMethod is hover', () => { - const triggerClassName = 'custom-trigger' - const { container } = render() - const trigger = container.querySelector(`.${triggerClassName}`) - act(() => { - fireEvent.mouseEnter(trigger!) - }) - expect(screen.queryByText('Tooltip content')).toBeInTheDocument() - }) - - it('should close on mouse leave when triggerMethod is hover and needsDelay is false', () => { - const triggerClassName = 'custom-trigger' - const { container } = render() - const trigger = container.querySelector(`.${triggerClassName}`) - act(() => { - fireEvent.mouseEnter(trigger!) - fireEvent.mouseLeave(trigger!) - }) - expect(screen.queryByText('Tooltip content')).not.toBeInTheDocument() - }) - - it('should toggle on click when triggerMethod is click', () => { - const triggerClassName = 'custom-trigger' - const { container } = render() - const trigger = container.querySelector(`.${triggerClassName}`) - act(() => { - fireEvent.click(trigger!) - }) - expect(screen.queryByText('Tooltip content')).toBeInTheDocument() - - // Test toggle off - act(() => { - fireEvent.click(trigger!) - }) - expect(screen.queryByText('Tooltip content')).not.toBeInTheDocument() - }) - - it('should do nothing on mouse enter if triggerMethod is click', () => { - const triggerClassName = 'custom-trigger' - const { container } = render() - const trigger = container.querySelector(`.${triggerClassName}`) - act(() => { - fireEvent.mouseEnter(trigger!) - }) - expect(screen.queryByText('Tooltip content')).not.toBeInTheDocument() - }) - - it('should delay closing on mouse leave when needsDelay is true', () => { - const triggerClassName = 'custom-trigger' - const { container } = render() - const trigger = container.querySelector(`.${triggerClassName}`) - - act(() => { - fireEvent.mouseEnter(trigger!) - }) - expect(screen.getByText('Tooltip content')).toBeInTheDocument() - - act(() => { - fireEvent.mouseLeave(trigger!) - }) - // Shouldn't close immediately - expect(screen.getByText('Tooltip content')).toBeInTheDocument() - - act(() => { - vi.advanceTimersByTime(350) - }) - // Should close after delay - expect(screen.queryByText('Tooltip content')).not.toBeInTheDocument() - }) - - it('should not close if mouse enters popup before delay finishes', () => { - const triggerClassName = 'custom-trigger' - const { container } = render() - const trigger = container.querySelector(`.${triggerClassName}`) - - act(() => { - fireEvent.mouseEnter(trigger!) - }) - - const popup = screen.getByText('Tooltip content') - expect(popup).toBeInTheDocument() - - act(() => { - fireEvent.mouseLeave(trigger!) - }) - - act(() => { - vi.advanceTimersByTime(150) - // Simulate mouse entering popup area itself during the delay timeframe - fireEvent.mouseEnter(popup) - }) - - act(() => { - vi.advanceTimersByTime(200) // Complete the 300ms original delay - }) - - // Should still be open because we are hovering the popup - expect(screen.getByText('Tooltip content')).toBeInTheDocument() - - // Now mouse leaves popup - act(() => { - fireEvent.mouseLeave(popup) - }) - - act(() => { - vi.advanceTimersByTime(350) - }) - // Should now close - expect(screen.queryByText('Tooltip content')).not.toBeInTheDocument() - }) - - it('should do nothing on mouse enter/leave of popup when triggerMethod is not hover', () => { - const triggerClassName = 'custom-trigger' - const { container } = render() - const trigger = container.querySelector(`.${triggerClassName}`) - - act(() => { - fireEvent.click(trigger!) - }) - - const popup = screen.getByText('Tooltip content') - - act(() => { - fireEvent.mouseEnter(popup) - fireEvent.mouseLeave(popup) - vi.advanceTimersByTime(350) - }) - - // Should still be open because click method requires another click to close, not hover leave - expect(screen.getByText('Tooltip content')).toBeInTheDocument() - }) - - it('should clear close timeout if trigger is hovered again before delay finishes', () => { - const triggerClassName = 'custom-trigger' - const { container } = render() - const trigger = container.querySelector(`.${triggerClassName}`) - - act(() => { - fireEvent.mouseEnter(trigger!) - }) - expect(screen.getByText('Tooltip content')).toBeInTheDocument() - - act(() => { - fireEvent.mouseLeave(trigger!) - }) - - act(() => { - vi.advanceTimersByTime(150) - // Re-hover trigger before it closes - fireEvent.mouseEnter(trigger!) - }) - - act(() => { - vi.advanceTimersByTime(200) // Original 300ms would be up - }) - - // Should still be open because we reset it - expect(screen.getByText('Tooltip content')).toBeInTheDocument() - }) - - it('should test clear close timeout if trigger is hovered again before delay finishes and isHoverPopupRef is true', () => { - const triggerClassName = 'custom-trigger' - const { container } = render() - const trigger = container.querySelector(`.${triggerClassName}`) - - act(() => { - fireEvent.mouseEnter(trigger!) - }) - - const popup = screen.getByText('Tooltip content') - expect(popup).toBeInTheDocument() - - act(() => { - fireEvent.mouseEnter(popup) - fireEvent.mouseLeave(trigger!) - }) - - act(() => { - vi.advanceTimersByTime(350) - }) - - // Should still be open because we are hovering the popup - expect(screen.getByText('Tooltip content')).toBeInTheDocument() - }) - }) - - describe('TooltipManager', () => { - it('should close active tooltips when triggered centrally, overriding other closes', () => { - const triggerClassName1 = 'custom-trigger-1' - const triggerClassName2 = 'custom-trigger-2' - - const { container } = render( -
- - -
, - ) - - const trigger1 = container.querySelector(`.${triggerClassName1}`) - const trigger2 = container.querySelector(`.${triggerClassName2}`) - - expect(trigger2).not.toBeNull() - - // Open first tooltip - act(() => { - fireEvent.mouseEnter(trigger1!) - }) - expect(screen.queryByText('Tooltip content 1')).toBeInTheDocument() - - // TooltipManager should keep track of it - // Next, immediately open the second one without leaving first (e.g., via TooltipManager) - // TooltipManager registers the newest one and closes the old one when doing full external operations, but internally the manager allows direct closing - - act(() => { - tooltipManager.closeActiveTooltip() - }) - - expect(screen.queryByText('Tooltip content 1')).not.toBeInTheDocument() - - // Safe to call again - expect(() => tooltipManager.closeActiveTooltip()).not.toThrow() - }) - }) - - describe('Styling and positioning', () => { - it('should apply custom trigger className', () => { - const triggerClassName = 'custom-trigger' - const { container } = render() - const trigger = container.querySelector(`.${triggerClassName}`) - expect(trigger?.className).toContain('custom-trigger') - }) - - it('should pass triggerTestId to the fallback icon wrapper', () => { - render() - expect(screen.getByTestId('test-tooltip-icon')).toBeInTheDocument() - }) - - it('should apply custom popup className', async () => { - const triggerClassName = 'custom-trigger' - const { container } = render() - const trigger = container.querySelector(`.${triggerClassName}`) - act(() => { - fireEvent.mouseEnter(trigger!) - }) - expect((await screen.findByText('Tooltip content'))?.className).toContain('custom-popup') - }) - - it('should apply noDecoration when specified', async () => { - const triggerClassName = 'custom-trigger' - const { container } = render( - , - ) - const trigger = container.querySelector(`.${triggerClassName}`) - act(() => { - fireEvent.mouseEnter(trigger!) - }) - expect((await screen.findByText('Tooltip content'))?.className).not.toContain('bg-components-panel-bg') - }) - }) -}) diff --git a/web/app/components/base/tooltip/content.tsx b/web/app/components/base/tooltip/content.tsx deleted file mode 100644 index 191ee933f1..0000000000 --- a/web/app/components/base/tooltip/content.tsx +++ /dev/null @@ -1,22 +0,0 @@ -import type { FC, PropsWithChildren, ReactNode } from 'react' - -type ToolTipContentProps = { - title?: ReactNode - action?: ReactNode -} & PropsWithChildren - -export const ToolTipContent: FC = ({ - title, - action, - children, -}) => { - return ( -
- {!!title && ( -
{title}
- )} -
{children}
- {!!action &&
{action}
} -
- ) -} diff --git a/web/app/components/base/tooltip/index.stories.tsx b/web/app/components/base/tooltip/index.stories.tsx deleted file mode 100644 index 69d0c5d2b6..0000000000 --- a/web/app/components/base/tooltip/index.stories.tsx +++ /dev/null @@ -1,60 +0,0 @@ -import type { Meta, StoryObj } from '@storybook/nextjs-vite' -import Tooltip from '.' - -const TooltipGrid = () => { - return ( -
-
Hover tooltips
-
- - - - - - Right tooltip - - -
-
Click tooltips
-
- - - - - - Plain content - - -
-
- ) -} - -const meta = { - title: 'Base/Feedback/Tooltip', - component: TooltipGrid, - parameters: { - layout: 'centered', - docs: { - description: { - component: 'Portal-based tooltip component supporting hover and click triggers, custom placements, and decorated content.', - }, - }, - }, - tags: ['autodocs'], -} satisfies Meta - -export default meta -type Story = StoryObj - -export const Playground: Story = {} diff --git a/web/app/components/base/tooltip/index.tsx b/web/app/components/base/tooltip/index.tsx deleted file mode 100644 index 85c63cdeaf..0000000000 --- a/web/app/components/base/tooltip/index.tsx +++ /dev/null @@ -1,231 +0,0 @@ -'use client' -import type { Placement } from '@langgenius/dify-ui/popover' -/** - * @deprecated Use `@langgenius/dify-ui/tooltip` instead. - * This component will be removed after migration is complete. - * See: https://github.com/langgenius/dify/issues/32767 - */ -import type { FC } from 'react' -import { cn } from '@langgenius/dify-ui/cn' -import { - Popover, - PopoverContent, - PopoverTrigger, -} from '@langgenius/dify-ui/popover' -import { RiQuestionLine } from '@remixicon/react' -import { useBoolean } from 'ahooks' -import * as React from 'react' -import { useCallback, useEffect, useRef, useState } from 'react' -import { tooltipManager } from './TooltipManager' - -type TooltipOffset = number | { - mainAxis?: number - crossAxis?: number -} - -type TooltipProps = { - position?: Placement - triggerMethod?: 'hover' | 'click' - triggerClassName?: string - triggerTestId?: string - disabled?: boolean - popupContent?: React.ReactNode - children?: React.ReactNode - popupClassName?: string - portalContentClassName?: string - noDecoration?: boolean - offset?: TooltipOffset - needsDelay?: boolean - asChild?: boolean -} - -const Tooltip: FC = ({ - position = 'top', - triggerMethod = 'hover', - triggerClassName, - triggerTestId, - disabled = false, - popupContent, - children, - popupClassName, - portalContentClassName, - noDecoration, - offset, - asChild = true, - needsDelay = true, -}) => { - const [open, setOpen] = useState(false) - const resolvedOffset = offset ?? 8 - const sideOffset = typeof resolvedOffset === 'number' ? resolvedOffset : (resolvedOffset.mainAxis ?? 0) - const alignOffset = typeof resolvedOffset === 'number' ? 0 : (resolvedOffset.crossAxis ?? 0) - const [isHoverPopup, { - setTrue: setHoverPopup, - setFalse: setNotHoverPopup, - }] = useBoolean(false) - - const isHoverPopupRef = useRef(isHoverPopup) - useEffect(() => { - isHoverPopupRef.current = isHoverPopup - }, [isHoverPopup]) - - const [isHoverTrigger, { - setTrue: setHoverTrigger, - setFalse: setNotHoverTrigger, - }] = useBoolean(false) - - const isHoverTriggerRef = useRef(isHoverTrigger) - useEffect(() => { - isHoverTriggerRef.current = isHoverTrigger - }, [isHoverTrigger]) - - const closeTimeoutRef = useRef | null>(null) - const clearCloseTimeout = useCallback(() => { - if (closeTimeoutRef.current) { - clearTimeout(closeTimeoutRef.current) - closeTimeoutRef.current = null - } - }, []) - - useEffect(() => { - return () => { - clearCloseTimeout() - } - }, [clearCloseTimeout]) - - const close = () => setOpen(false) - const handleOpenChange = (nextOpen: boolean) => { - if (disabled) { - setOpen(false) - return - } - if (triggerMethod === 'click') - setOpen(nextOpen) - else if (!nextOpen) - setOpen(false) - } - - const handleLeave = (isTrigger: boolean) => { - if (isTrigger) - setNotHoverTrigger() - else - setNotHoverPopup() - - // give time to move to the popup - if (needsDelay) { - clearCloseTimeout() - closeTimeoutRef.current = setTimeout(() => { - closeTimeoutRef.current = null - if (!isHoverPopupRef.current && !isHoverTriggerRef.current) { - setOpen(false) - tooltipManager.clear(close) - } - }, 300) - } - else { - clearCloseTimeout() - setOpen(false) - tooltipManager.clear(close) - } - } - const handleTriggerMouseEnter = () => { - if (triggerMethod === 'hover') { - clearCloseTimeout() - setHoverTrigger() - tooltipManager.register(close) - setOpen(true) - } - } - const handleTriggerMouseLeave = () => { - if (triggerMethod === 'hover') - handleLeave(true) - } - const handlePopupMouseEnter = () => { - if (triggerMethod === 'hover') { - clearCloseTimeout() - setHoverPopup() - } - } - const handlePopupMouseLeave = () => { - if (triggerMethod === 'hover') - handleLeave(false) - } - - const fallbackTrigger = ( -
- -
- ) - const triggerContent = children || fallbackTrigger - const childElement = React.isValidElement>(triggerContent) - ? triggerContent - : fallbackTrigger - const nativeButton = typeof childElement.type !== 'string' || childElement.type === 'button' - - const renderAsChildTrigger = () => { - const childProps = childElement.props - return React.cloneElement(childElement, { - onMouseEnter: (event: React.MouseEvent) => { - childProps.onMouseEnter?.(event) - handleTriggerMouseEnter() - }, - onMouseLeave: (event: React.MouseEvent) => { - childProps.onMouseLeave?.(event) - handleTriggerMouseLeave() - }, - }) - } - const effectiveOpen = !disabled && open - - return ( - - {asChild - ? ( - - ) - : ( - - )} - > - {triggerContent} - - )} - {effectiveOpen && !!popupContent && ( - - {popupContent} - - )} - - ) -} - -export default React.memo(Tooltip) diff --git a/web/app/components/billing/pricing/plans/cloud-plan-item/__tests__/index.spec.tsx b/web/app/components/billing/pricing/plans/cloud-plan-item/__tests__/index.spec.tsx index 615579bc6c..568a2656ba 100644 --- a/web/app/components/billing/pricing/plans/cloud-plan-item/__tests__/index.spec.tsx +++ b/web/app/components/billing/pricing/plans/cloud-plan-item/__tests__/index.spec.tsx @@ -241,7 +241,7 @@ describe('CloudPlanItem', () => { ) // Sandbox viewed from a higher plan is disabled, but let's verify no API calls - const button = screen.getByRole('button') + const button = screen.getByRole('button', { name: 'billing.plansCommon.startForFree' }) fireEvent.click(button) await waitFor(() => { diff --git a/web/app/components/billing/pricing/plans/cloud-plan-item/list/__tests__/index.spec.tsx b/web/app/components/billing/pricing/plans/cloud-plan-item/list/__tests__/index.spec.tsx index 5a06509355..e6a0d78273 100644 --- a/web/app/components/billing/pricing/plans/cloud-plan-item/list/__tests__/index.spec.tsx +++ b/web/app/components/billing/pricing/plans/cloud-plan-item/list/__tests__/index.spec.tsx @@ -1,4 +1,5 @@ import { render, screen } from '@testing-library/react' +import userEvent from '@testing-library/user-event' import * as React from 'react' import { Plan } from '../../../../../type' import List from '../index' @@ -12,11 +13,13 @@ describe('CloudPlanItem/List', () => { expect(screen.getByText('billing.plansCommon.startNodes.limited:{"count":2}')).toBeInTheDocument() }) - it('should show professional monthly quotas and tooltips', () => { + it('should show professional monthly quotas and tooltips', async () => { + const user = userEvent.setup() render() expect(screen.getByText('billing.plansCommon.messageRequest.titlePerMonth:{"count":5000}')).toBeInTheDocument() - expect(screen.getByText('billing.plansCommon.vectorSpaceTooltip')).toBeInTheDocument() + await user.hover(screen.getByRole('button', { name: 'billing.plansCommon.vectorSpaceTooltip' })) + expect(await screen.findByText('billing.plansCommon.vectorSpaceTooltip')).toBeInTheDocument() expect(screen.getByText('billing.plansCommon.workflowExecution.faster')).toBeInTheDocument() }) diff --git a/web/app/components/billing/pricing/plans/cloud-plan-item/list/item/__tests__/index.spec.tsx b/web/app/components/billing/pricing/plans/cloud-plan-item/list/item/__tests__/index.spec.tsx index e1aada80f8..f75b334fd9 100644 --- a/web/app/components/billing/pricing/plans/cloud-plan-item/list/item/__tests__/index.spec.tsx +++ b/web/app/components/billing/pricing/plans/cloud-plan-item/list/item/__tests__/index.spec.tsx @@ -1,4 +1,5 @@ import { render, screen } from '@testing-library/react' +import userEvent from '@testing-library/user-event' import Item from '../index' describe('Item', () => { @@ -20,14 +21,16 @@ describe('Item', () => { // Toggling the optional tooltip indicator describe('Tooltip behavior', () => { - it('should render tooltip content when tooltip text is provided', () => { + it('should render tooltip content when tooltip text is provided', async () => { + const user = userEvent.setup() const label = 'Workspace seats' const tooltip = 'Seats define how many teammates can join the workspace.' const { container } = render() expect(screen.getByText(label)).toBeInTheDocument() - expect(screen.getByText(tooltip)).toBeInTheDocument() + await user.hover(screen.getByRole('button', { name: tooltip })) + expect(await screen.findByText(tooltip)).toBeInTheDocument() expect(container.querySelector('.group')).not.toBeNull() }) diff --git a/web/app/components/billing/pricing/plans/cloud-plan-item/list/item/__tests__/tooltip.spec.tsx b/web/app/components/billing/pricing/plans/cloud-plan-item/list/item/__tests__/tooltip.spec.tsx index 86e4cb1061..c744fdb60e 100644 --- a/web/app/components/billing/pricing/plans/cloud-plan-item/list/item/__tests__/tooltip.spec.tsx +++ b/web/app/components/billing/pricing/plans/cloud-plan-item/list/item/__tests__/tooltip.spec.tsx @@ -1,4 +1,5 @@ import { render, screen } from '@testing-library/react' +import userEvent from '@testing-library/user-event' import Tooltip from '../tooltip' describe('Tooltip', () => { @@ -8,12 +9,14 @@ describe('Tooltip', () => { // Rendering the info tooltip container describe('Rendering', () => { - it('should render the content panel when provide with text', () => { + it('should render the content panel when hovered', async () => { + const user = userEvent.setup() const content = 'Usage resets on the first day of every month.' render() + await user.hover(screen.getByRole('button', { name: content })) - expect(() => screen.getByText(content)).not.toThrow() + expect(await screen.findByText(content)).toBeInTheDocument() }) }) diff --git a/web/app/components/billing/pricing/plans/cloud-plan-item/list/item/tooltip.tsx b/web/app/components/billing/pricing/plans/cloud-plan-item/list/item/tooltip.tsx index fe6aa9c2cb..be53ef6b1b 100644 --- a/web/app/components/billing/pricing/plans/cloud-plan-item/list/item/tooltip.tsx +++ b/web/app/components/billing/pricing/plans/cloud-plan-item/list/item/tooltip.tsx @@ -1,3 +1,4 @@ +import { Popover, PopoverContent, PopoverTrigger } from '@langgenius/dify-ui/popover' import { RiInfoI } from '@remixicon/react' import * as React from 'react' @@ -11,14 +12,20 @@ const Tooltip = ({ if (!content) return null return ( -
-
- {content} -
-
+ + -
-
+ + + {content} + + ) } diff --git a/web/app/components/datasets/documents/components/operations.tsx b/web/app/components/datasets/documents/components/operations.tsx index 8692da927d..7dc184aee4 100644 --- a/web/app/components/datasets/documents/components/operations.tsx +++ b/web/app/components/datasets/documents/components/operations.tsx @@ -16,15 +16,16 @@ import { DropdownMenuContent, DropdownMenuTrigger, } from '@langgenius/dify-ui/dropdown-menu' +import { Popover, PopoverContent, PopoverTrigger } from '@langgenius/dify-ui/popover' import { Switch } from '@langgenius/dify-ui/switch' import { toast } from '@langgenius/dify-ui/toast' +import { Tooltip, TooltipContent, TooltipTrigger } from '@langgenius/dify-ui/tooltip' import { useBoolean, useDebounceFn } from 'ahooks' import { noop } from 'es-toolkit/function' import * as React from 'react' import { useCallback, useState } from 'react' import { useTranslation } from 'react-i18next' import Divider from '@/app/components/base/divider' -import Tooltip from '@/app/components/base/tooltip' import { IS_CE_EDITION } from '@/config' import { DataSourceType, DocumentActionType } from '@/models/datasets' import { useRouter } from '@/next/navigation' @@ -205,11 +206,12 @@ const Operations = ({ embeddingAvailable, datasetId, detail, selectedIds, onSele <> {archived ? ( - -
- -
-
+ + } /> + + {t('list.action.enableWarning', { ns: 'datasetDocuments' })} + + ) : handleSwitch(v ? 'enable' : 'disable')} size="md" />} @@ -217,16 +219,24 @@ const Operations = ({ embeddingAvailable, datasetId, detail, selectedIds, onSele )} {embeddingAvailable && ( <> - - + + router.push(`/datasets/${datasetId}/documents/${detail.id}/settings`)} + > + + + )} + /> + + {t('list.action.settings', { ns: 'datasetDocuments' })} + ({ ), })) -vi.mock('@/app/components/base/tooltip', () => ({ - default: ({ children, popupContent }: { children: React.ReactNode, popupContent: string }) => ( -
{children}
- ), -})) - vi.mock('../file-icon', () => ({ default: () => , })) diff --git a/web/app/components/datasets/documents/create-from-pipeline/data-source/online-drive/file-list/list/item.tsx b/web/app/components/datasets/documents/create-from-pipeline/data-source/online-drive/file-list/list/item.tsx index 5018806265..8d9bfe0dff 100644 --- a/web/app/components/datasets/documents/create-from-pipeline/data-source/online-drive/file-list/list/item.tsx +++ b/web/app/components/datasets/documents/create-from-pipeline/data-source/online-drive/file-list/list/item.tsx @@ -1,12 +1,10 @@ -import type { Placement } from '@floating-ui/react' import type { OnlineDriveFile } from '@/models/pipeline' -import { cn } from '@langgenius/dify-ui/cn' +import { Popover, PopoverContent, PopoverTrigger } from '@langgenius/dify-ui/popover' import * as React from 'react' import { useCallback } from 'react' import { useTranslation } from 'react-i18next' import Checkbox from '@/app/components/base/checkbox' import Radio from '@/app/components/base/radio/ui' -import Tooltip from '@/app/components/base/tooltip' import { formatFileSize } from '@/utils/format' import FileIcon from './file-icon' @@ -33,14 +31,7 @@ const Item = ({ const isBucket = type === 'bucket' const isFolder = type === 'folder' - const Wrapper = disabled ? Tooltip : React.Fragment - const wrapperProps = disabled - ? { - popupContent: t('onlineDrive.notSupportedFileType', { ns: 'datasetPipeline' }), - position: 'top-end' as Placement, - offset: { mainAxis: 4, crossAxis: -104 }, - } - : {} + const disabledTip = t('onlineDrive.notSupportedFileType', { ns: 'datasetPipeline' }) const handleSelect = useCallback((e: React.MouseEvent | React.KeyboardEvent) => { e.stopPropagation() @@ -80,27 +71,44 @@ const Item = ({ onCheck={handleSelect} /> )} - -
+ + + + {name} + + {!isFolder && typeof size === 'number' && ( + {formatFileSize(size)} + )} + + + {disabledTip} + + + ) + : ( +
+ + + {name} + + {!isFolder && typeof size === 'number' && ( + {formatFileSize(size)} + )} +
)} - > - - - {name} - - {!isFolder && typeof size === 'number' && ( - {formatFileSize(size)} - )} -
-
) } diff --git a/web/app/components/datasets/documents/create-from-pipeline/processing/embedding-process/index.tsx b/web/app/components/datasets/documents/create-from-pipeline/processing/embedding-process/index.tsx index 797f7b296a..58d77b387c 100644 --- a/web/app/components/datasets/documents/create-from-pipeline/processing/embedding-process/index.tsx +++ b/web/app/components/datasets/documents/create-from-pipeline/processing/embedding-process/index.tsx @@ -4,6 +4,7 @@ import type { InitialDocumentDetail } from '@/models/pipeline' import type { RETRIEVE_METHOD } from '@/types/app' import { Button } from '@langgenius/dify-ui/button' import { cn } from '@langgenius/dify-ui/cn' +import { Popover, PopoverContent, PopoverTrigger } from '@langgenius/dify-ui/popover' import { RiAedFill, RiArrowRightLine, @@ -17,7 +18,6 @@ import { useEffect, useMemo, useState } from 'react' import { useTranslation } from 'react-i18next' import Divider from '@/app/components/base/divider' import NotionIcon from '@/app/components/base/notion-icon' -import Tooltip from '@/app/components/base/tooltip' import PriorityLabel from '@/app/components/billing/priority-label' import { Plan } from '@/app/components/billing/type' import UpgradeBtn from '@/app/components/billing/upgrade-btn' @@ -203,15 +203,18 @@ const EmbeddingProcess = ({
{`${getSourcePercent(indexingStatusDetail)}%`}
)} {indexingStatusDetail.indexing_status === 'error' && ( - - + + - - + + + {indexingStatusDetail.error} + + )} {indexingStatusDetail.indexing_status === 'completed' && ( diff --git a/web/app/components/datasets/documents/detail/completed/display-toggle.tsx b/web/app/components/datasets/documents/detail/completed/display-toggle.tsx index 6e961ac43f..6735399cd2 100644 --- a/web/app/components/datasets/documents/detail/completed/display-toggle.tsx +++ b/web/app/components/datasets/documents/detail/completed/display-toggle.tsx @@ -1,9 +1,9 @@ import type { FC } from 'react' +import { Tooltip, TooltipContent, TooltipTrigger } from '@langgenius/dify-ui/tooltip' import { RiLineHeight } from '@remixicon/react' import * as React from 'react' import { useTranslation } from 'react-i18next' import { Collapse } from '@/app/components/base/icons/src/vender/knowledge' -import Tooltip from '@/app/components/base/tooltip' type DisplayToggleProps = { isCollapsed: boolean @@ -15,25 +15,30 @@ const DisplayToggle: FC = ({ toggleCollapsed, }) => { const { t } = useTranslation() + const label = isCollapsed ? t('segment.expandChunks', { ns: 'datasetDocuments' }) : t('segment.collapseChunks', { ns: 'datasetDocuments' }) return ( - - - + + + { + isCollapsed + ? + : + } + + )} + /> + + {label} + ) } diff --git a/web/app/components/datasets/documents/detail/completed/segment-card/index.tsx b/web/app/components/datasets/documents/detail/completed/segment-card/index.tsx index 1111bb6411..865ffbce15 100644 --- a/web/app/components/datasets/documents/detail/completed/segment-card/index.tsx +++ b/web/app/components/datasets/documents/detail/completed/segment-card/index.tsx @@ -10,13 +10,13 @@ import { } from '@langgenius/dify-ui/alert-dialog' import { cn } from '@langgenius/dify-ui/cn' import { Switch } from '@langgenius/dify-ui/switch' +import { Tooltip, TooltipContent, TooltipTrigger } from '@langgenius/dify-ui/tooltip' import { RiDeleteBinLine, RiEditLine } from '@remixicon/react' import * as React from 'react' import { useCallback, useMemo, useState } from 'react' import { useTranslation } from 'react-i18next' import Badge from '@/app/components/base/badge' import Divider from '@/app/components/base/divider' -import Tooltip from '@/app/components/base/tooltip' import ImageList from '@/app/components/datasets/common/image-list' import { ChunkingMode } from '@/models/datasets' import { formatNumber } from '@/utils/format' @@ -182,35 +182,43 @@ const SegmentCard: FC = ({ > {!archived && ( <> - -
{ - e.stopPropagation() - onClickEdit?.() - }} - > - -
+ + { + e.stopPropagation() + onClickEdit?.() + }} + > + + + )} + /> + Edit - -
{ - e.stopPropagation() - setShowModal(true) - }} - > - -
+ + { + e.stopPropagation() + setShowModal(true) + }} + > + + + )} + /> + Delete diff --git a/web/app/components/datasets/metadata/edit-metadata-batch/modal.tsx b/web/app/components/datasets/metadata/edit-metadata-batch/modal.tsx index 8253f7faf6..f76284b36f 100644 --- a/web/app/components/datasets/metadata/edit-metadata-batch/modal.tsx +++ b/web/app/components/datasets/metadata/edit-metadata-batch/modal.tsx @@ -3,7 +3,6 @@ import type { FC } from 'react' import type { BuiltInMetadataItem, MetadataItemInBatchEdit, MetadataItemWithEdit } from '../types' import { Button } from '@langgenius/dify-ui/button' import { toast } from '@langgenius/dify-ui/toast' -import { RiQuestionLine } from '@remixicon/react' import { produce } from 'immer' import * as React from 'react' import { useCallback, useState } from 'react' @@ -11,8 +10,8 @@ import { useTranslation } from 'react-i18next' import Divider from '@/app/components/base/divider' import { useCreateMetaData } from '@/service/knowledge/use-metadata' import Checkbox from '../../../base/checkbox' +import { Infotip } from '../../../base/infotip' import Modal from '../../../base/modal' -import Tooltip from '../../../base/tooltip' import AddMetadataButton from '../add-metadata-button' import useCheckMetadataName from '../hooks/use-check-metadata-name' import SelectMetadataModal from '../metadata-dataset/select-metadata-modal' @@ -115,11 +114,14 @@ const EditMetadataBatchModal: FC = ({ datasetId, documentNum, list, onSav
setIsApplyToAllSelectDocument(!isApplyToAllSelectDocument)} id="apply-to-all" />
{t(`${i18nPrefix}.applyToAllSelectDocument`, { ns: 'dataset' })}
- {t(`${i18nPrefix}.applyToAllSelectDocumentTip`, { ns: 'dataset' })}
}> -
- -
-
+ + {t(`${i18nPrefix}.applyToAllSelectDocumentTip`, { ns: 'dataset' })} +
} /> + + {t(`${i18nPrefix}.builtInDescription`, { ns: 'dataset' })} +
diff --git a/web/app/components/header/account-setting/model-provider-page/model-auth/__tests__/switch-credential-in-load-balancing.spec.tsx b/web/app/components/header/account-setting/model-provider-page/model-auth/__tests__/switch-credential-in-load-balancing.spec.tsx index f9c923e6a1..73aa8f9bfc 100644 --- a/web/app/components/header/account-setting/model-provider-page/model-auth/__tests__/switch-credential-in-load-balancing.spec.tsx +++ b/web/app/components/header/account-setting/model-provider-page/model-auth/__tests__/switch-credential-in-load-balancing.spec.tsx @@ -1,5 +1,6 @@ import type { CustomModel, ModelProvider } from '@/app/components/header/account-setting/model-provider-page/declarations' import { fireEvent, render, screen } from '@testing-library/react' +import userEvent from '@testing-library/user-event' import { ModelTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations' import SwitchCredentialInLoadBalancing from '../switch-credential-in-load-balancing' @@ -105,7 +106,8 @@ describe('SwitchCredentialInLoadBalancing', () => { expect(mockSetCustomModelCredential).toHaveBeenCalledWith(mockCredentials[0]) }) - it('should show tooltip when empty and custom credentials not allowed', () => { + it('should show tooltip when empty and custom credentials not allowed', async () => { + const user = userEvent.setup() const restrictedProvider = { ...mockProvider, allow_custom_token: false } render( { />, ) - fireEvent.mouseEnter(screen.getByText(/auth.credentialUnavailableInButton/)) - expect(screen.getByText('plugin.auth.credentialUnavailable'))!.toBeInTheDocument() + await user.hover(screen.getByRole('button', { name: /auth.credentialUnavailableInButton/ })) + expect(await screen.findByText('plugin.auth.credentialUnavailable'))!.toBeInTheDocument() }) // Empty credentials with allowed custom: no tooltip but still shows unavailable text diff --git a/web/app/components/header/account-setting/model-provider-page/model-auth/config-provider.tsx b/web/app/components/header/account-setting/model-provider-page/model-auth/config-provider.tsx index 4a268168ba..7529ef9afb 100644 --- a/web/app/components/header/account-setting/model-provider-page/model-auth/config-provider.tsx +++ b/web/app/components/header/account-setting/model-provider-page/model-auth/config-provider.tsx @@ -5,6 +5,7 @@ import type { import { Button, } from '@langgenius/dify-ui/button' +import { Tooltip, TooltipContent, TooltipTrigger } from '@langgenius/dify-ui/tooltip' import { RiEqualizer2Line, } from '@remixicon/react' @@ -13,7 +14,6 @@ import { useCallback, } from 'react' import { useTranslation } from 'react-i18next' -import Tooltip from '@/app/components/base/tooltip' import { ConfigurationMethodEnum } from '@/app/components/header/account-setting/model-provider-page/declarations' import Authorized from './authorized' import { useCredentialStatus } from './hooks' @@ -53,11 +53,11 @@ const ConfigProvider = ({ ) if (notAllowCustomCredential && !hasCredential) { return ( - - {Item} + + + + {t('auth.credentialUnavailable', { ns: 'plugin' })} + ) } diff --git a/web/app/components/header/account-setting/model-provider-page/model-auth/switch-credential-in-load-balancing.tsx b/web/app/components/header/account-setting/model-provider-page/model-auth/switch-credential-in-load-balancing.tsx index 58ffc180dd..8ccfc0a640 100644 --- a/web/app/components/header/account-setting/model-provider-page/model-auth/switch-credential-in-load-balancing.tsx +++ b/web/app/components/header/account-setting/model-provider-page/model-auth/switch-credential-in-load-balancing.tsx @@ -6,6 +6,7 @@ import type { } from '../declarations' import { Button } from '@langgenius/dify-ui/button' import { cn } from '@langgenius/dify-ui/cn' +import { Tooltip, TooltipContent, TooltipTrigger } from '@langgenius/dify-ui/tooltip' import { RiArrowDownSLine } from '@remixicon/react' import { memo, @@ -13,7 +14,6 @@ import { } from 'react' import { useTranslation } from 'react-i18next' import Badge from '@/app/components/base/badge' -import Tooltip from '@/app/components/base/tooltip' import { ConfigurationMethodEnum, ModelModalModeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations' import Indicator from '@/app/components/header/indicator' import Authorized from './authorized' @@ -89,11 +89,11 @@ const SwitchCredentialInLoadBalancing = ({ ) if (empty && notAllowCustomCredential) { return ( - - {Item} + + + + {t('auth.credentialUnavailable', { ns: 'plugin' })} + ) } diff --git a/web/app/components/header/account-setting/model-provider-page/model-parameter-modal/__tests__/status-indicators.spec.tsx b/web/app/components/header/account-setting/model-provider-page/model-parameter-modal/__tests__/status-indicators.spec.tsx index dc7c512f78..b204462ab5 100644 --- a/web/app/components/header/account-setting/model-provider-page/model-parameter-modal/__tests__/status-indicators.spec.tsx +++ b/web/app/components/header/account-setting/model-provider-page/model-parameter-modal/__tests__/status-indicators.spec.tsx @@ -21,10 +21,10 @@ describe('StatusIndicators', () => { installedPlugins = [{ name: 'demo-plugin', plugin_unique_identifier: 'demo@1.0.0' }] }) - const getTooltipTrigger = (container: HTMLElement) => { - const trigger = container.querySelector('[role="button"][aria-haspopup="dialog"]') + const getPopoverTrigger = (name: string) => { + const trigger = screen.getByRole('button', { name }) expect(trigger).toBeInTheDocument() - return trigger as HTMLElement + return trigger } it('should render nothing when model is available and enabled', () => { @@ -43,7 +43,7 @@ describe('StatusIndicators', () => { it('should render deprecated tooltip when provider model is disabled and in model list', async () => { const user = userEvent.setup() - const { container } = render( + render( { />, ) - await user.hover(getTooltipTrigger(container)) + await user.hover(getPopoverTrigger('nodes.agent.modelSelectorTooltips.deprecated')) expect(await screen.findByText('nodes.agent.modelSelectorTooltips.deprecated')).toBeInTheDocument() }) it('should render model-not-support tooltip when disabled model is not in model list and has no pluginInfo', async () => { const user = userEvent.setup() - const { container } = render( + render( { />, ) - await user.hover(getTooltipTrigger(container)) + await user.hover(getPopoverTrigger('nodes.agent.modelNotSupport.title')) expect(await screen.findByText('nodes.agent.modelNotSupport.title')).toBeInTheDocument() }) @@ -125,7 +125,7 @@ describe('StatusIndicators', () => { it('should render marketplace warning tooltip when provider is unavailable', async () => { const user = userEvent.setup() - const { container } = render( + render( { />, ) - await user.hover(getTooltipTrigger(container)) + await user.hover(getPopoverTrigger('nodes.agent.modelNotInMarketplace.title')) expect(await screen.findByText('nodes.agent.modelNotInMarketplace.title')).toBeInTheDocument() }) diff --git a/web/app/components/header/account-setting/model-provider-page/model-parameter-modal/status-indicators.tsx b/web/app/components/header/account-setting/model-provider-page/model-parameter-modal/status-indicators.tsx index cca5846390..bc505657e2 100644 --- a/web/app/components/header/account-setting/model-provider-page/model-parameter-modal/status-indicators.tsx +++ b/web/app/components/header/account-setting/model-provider-page/model-parameter-modal/status-indicators.tsx @@ -1,5 +1,6 @@ +import type { ReactNode } from 'react' +import { Popover, PopoverContent, PopoverTrigger } from '@langgenius/dify-ui/popover' import { RiErrorWarningFill } from '@remixicon/react' -import Tooltip from '@/app/components/base/tooltip' import { SwitchPluginVersion } from '@/app/components/workflow/nodes/_base/components/switch-plugin-version' import Link from '@/next/link' import { useInstalledPluginList } from '@/service/use-plugins' @@ -13,6 +14,28 @@ type StatusIndicatorsProps = { t: any } +type StatusPopoverProps = { + ariaLabel: string + content: ReactNode + children: ReactNode +} + +const StatusPopover = ({ ariaLabel, content, children }: StatusPopoverProps) => ( + + e.stopPropagation()} + > + {children} + + + {content} + + +) + const StatusIndicators = ({ needsConfiguration, modelProvider, inModelList, disabled, pluginInfo, t }: StatusIndicatorsProps) => { const { data: pluginList } = useInstalledPluginList() const renderTooltipContent = (title: string, description?: string, linkText?: string, linkHref?: string) => { @@ -48,27 +71,26 @@ const StatusIndicators = ({ needsConfiguration, modelProvider, inModelList, disa <> {inModelList ? ( - - + ) : !pluginInfo ? ( - - + ) : ( )} {!modelProvider && !pluginInfo && ( - - + )} ) diff --git a/web/app/components/header/account-setting/model-provider-page/model-selector/__tests__/popup-item.spec.tsx b/web/app/components/header/account-setting/model-provider-page/model-selector/__tests__/popup-item.spec.tsx index 3c4fea6f51..e198853ddd 100644 --- a/web/app/components/header/account-setting/model-provider-page/model-selector/__tests__/popup-item.spec.tsx +++ b/web/app/components/header/account-setting/model-provider-page/model-selector/__tests__/popup-item.spec.tsx @@ -42,10 +42,6 @@ vi.mock('../feature-icon', () => ({ default: ({ feature }: { feature: string }) => {feature}, })) -vi.mock('@/app/components/base/tooltip', () => ({ - default: ({ children }: { children: ReactNode }) =>
{children}
, -})) - const mockCredentialPanelState = vi.hoisted(() => vi.fn()) vi.mock('../../provider-added-card/use-credential-panel-state', () => ({ useCredentialPanelState: mockCredentialPanelState, diff --git a/web/app/components/header/account-setting/model-provider-page/provider-added-card/model-list-item.tsx b/web/app/components/header/account-setting/model-provider-page/provider-added-card/model-list-item.tsx index 305ef71c50..8ef0f11901 100644 --- a/web/app/components/header/account-setting/model-provider-page/provider-added-card/model-list-item.tsx +++ b/web/app/components/header/account-setting/model-provider-page/provider-added-card/model-list-item.tsx @@ -1,5 +1,6 @@ import type { ModelItem, ModelProvider } from '../declarations' import { cn } from '@langgenius/dify-ui/cn' +import { Popover, PopoverContent, PopoverTrigger } from '@langgenius/dify-ui/popover' import { Switch } from '@langgenius/dify-ui/switch' import { useQueryClient } from '@tanstack/react-query' import { useDebounceFn } from 'ahooks' @@ -7,7 +8,6 @@ import { memo, useCallback } from 'react' import { useTranslation } from 'react-i18next' import Badge from '@/app/components/base/badge' import { Balance } from '@/app/components/base/icons/src/vender/line/financeAndECommerce' -import Tooltip from '@/app/components/base/tooltip' import { Plan } from '@/app/components/billing/type' import { useAppContext } from '@/context/app-context' import { useProviderContext, useProviderContextSelector } from '@/context/provider-context' @@ -102,14 +102,12 @@ const ModelListItem = ({ model, provider, isConfigurable, onChange, onModifyLoad { model.deprecated ? ( - {t('modelProvider.modelHasBeenDeprecated', { ns: 'common' })} - } - offset={{ mainAxis: 4 }} - > - - + + } /> + + {t('modelProvider.modelHasBeenDeprecated', { ns: 'common' })} + + ) : (isCurrentWorkspaceManager && ( {
- - {data.endpoint} -
- )} - position="left" - > -
- {data.endpoint} -
-
+ {data.endpoint + ? ( + + + {data.endpoint} + + + {data.endpoint} + + + ) + : ( +
+ {data.endpoint} +
+ )}
·
{data.workflows_in_use > 0 ? t('subscription.list.item.usedByNum', { ns: 'pluginTrigger', num: data.workflows_in_use }) : t('subscription.list.item.noUsed', { ns: 'pluginTrigger' })} diff --git a/web/app/components/plugins/plugin-detail-panel/tool-selector/components/__tests__/reasoning-config-form.spec.tsx b/web/app/components/plugins/plugin-detail-panel/tool-selector/components/__tests__/reasoning-config-form.spec.tsx index 016eda373d..50db3887b0 100644 --- a/web/app/components/plugins/plugin-detail-panel/tool-selector/components/__tests__/reasoning-config-form.spec.tsx +++ b/web/app/components/plugins/plugin-detail-panel/tool-selector/components/__tests__/reasoning-config-form.spec.tsx @@ -54,10 +54,6 @@ vi.mock('@langgenius/dify-ui/switch', () => ({ ), })) -vi.mock('@/app/components/base/tooltip', () => ({ - default: ({ children }: { children?: React.ReactNode }) => <>{children}, -})) - vi.mock('@/app/components/header/account-setting/model-provider-page/hooks', () => ({ useLanguage: () => 'en_US', })) @@ -233,7 +229,7 @@ describe('ReasoningConfigForm', () => { it('should open schema modal for object fields and support app selection', () => { const onChange = vi.fn() - const { container } = render( + render( { />, ) - fireEvent.click(container.querySelector('div.ml-0\\.5.cursor-pointer')!) + fireEvent.click(screen.getByRole('button', { name: 'workflow.nodes.agent.clickToViewParameterSchema' })) expect(screen.getByTestId('schema-modal')).toHaveTextContent('Config') fireEvent.click(screen.getByTestId('close-schema')) diff --git a/web/app/components/plugins/plugin-detail-panel/tool-selector/components/reasoning-config-form.tsx b/web/app/components/plugins/plugin-detail-panel/tool-selector/components/reasoning-config-form.tsx index e6af05065f..1baae6d3ca 100644 --- a/web/app/components/plugins/plugin-detail-panel/tool-selector/components/reasoning-config-form.tsx +++ b/web/app/components/plugins/plugin-detail-panel/tool-selector/components/reasoning-config-form.tsx @@ -9,6 +9,7 @@ import type { import { cn } from '@langgenius/dify-ui/cn' import { Select, SelectContent, SelectItem, SelectItemIndicator, SelectItemText, SelectTrigger } from '@langgenius/dify-ui/select' import { Switch } from '@langgenius/dify-ui/switch' +import { Tooltip, TooltipContent, TooltipTrigger } from '@langgenius/dify-ui/tooltip' import { RiArrowRightUpLine, RiBracesLine, @@ -16,9 +17,8 @@ import { import { useBoolean } from 'ahooks' import { useCallback, useState } from 'react' import { useTranslation } from 'react-i18next' +import { Infotip } from '@/app/components/base/infotip' import Input from '@/app/components/base/input' -// eslint-disable-next-line no-restricted-imports -- legacy tooltip migration is handled separately from this change -import Tooltip from '@/app/components/base/tooltip' import { FormTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations' import { useLanguage } from '@/app/components/header/account-setting/model-provider-page/hooks' import { AppSelector } from '@/app/components/plugins/plugin-detail-panel/app-selector' @@ -127,17 +127,16 @@ const ReasoningConfigForm: React.FC = ({ } = schema const auto = value[variable]?.auto const fieldTitle = getFieldTitle(label, language) - const tooltipContent = (tooltip && ( - - {tooltip[language] || tooltip.en_US} -
- )} - triggerClassName="ml-0.5 w-4 h-4" - asChild={false} - /> - )) + const tooltipText = tooltip?.[language] || tooltip?.en_US + const tooltipContent = tooltipText && ( + + {tooltipText} + + ) const varInput = value[variable]!.value const { isString, @@ -173,20 +172,22 @@ const ReasoningConfigForm: React.FC = ({ · {resolveTargetVarType(type)} {isShowJSONEditor && ( - - {t('nodes.agent.clickToViewParameterSchema', { ns: 'workflow' })} - - )} - asChild={false} - > -
showSchema(input_schema as SchemaRoot, fieldTitle!)} - > - -
+ + showSchema(input_schema as SchemaRoot, fieldTitle!)} + > + + + )} + /> + + {t('nodes.agent.clickToViewParameterSchema', { ns: 'workflow' })} + )} diff --git a/web/app/components/plugins/plugin-detail-panel/tool-selector/components/tool-item.tsx b/web/app/components/plugins/plugin-detail-panel/tool-selector/components/tool-item.tsx index ba85957108..d92c59b457 100644 --- a/web/app/components/plugins/plugin-detail-panel/tool-selector/components/tool-item.tsx +++ b/web/app/components/plugins/plugin-detail-panel/tool-selector/components/tool-item.tsx @@ -1,8 +1,8 @@ 'use client' import { Button } from '@langgenius/dify-ui/button' import { cn } from '@langgenius/dify-ui/cn' +import { Popover, PopoverContent, PopoverTrigger } from '@langgenius/dify-ui/popover' import { Switch } from '@langgenius/dify-ui/switch' -import { Tooltip, TooltipContent, TooltipTrigger } from '@langgenius/dify-ui/tooltip' import { RiDeleteBinLine, RiEqualizer2Line, @@ -14,7 +14,6 @@ import { useTranslation } from 'react-i18next' import ActionButton from '@/app/components/base/action-button' import AppIcon from '@/app/components/base/app-icon' import { Group } from '@/app/components/base/icons/src/vender/other' -import { ToolTipContent } from '@/app/components/base/tooltip/content' import Indicator from '@/app/components/header/indicator' import { InstallPluginButton } from '@/app/components/workflow/nodes/_base/components/install-plugin-button' import { useMCPToolAvailability } from '@/app/components/workflow/nodes/_base/components/mcp-tool-availability' @@ -144,11 +143,14 @@ const ToolItem = ({ className="-mt-1" uniqueIdentifier={installInfo} tooltip={( - - {`${t('detailPanel.toolSelector.unsupportedContent', { ns: 'plugin' })} ${t('detailPanel.toolSelector.unsupportedContent2', { ns: 'plugin' })}`} - +
+
+ {t('detailPanel.toolSelector.unsupportedTitle', { ns: 'plugin' })} +
+
+ {`${t('detailPanel.toolSelector.unsupportedContent', { ns: 'plugin' })} ${t('detailPanel.toolSelector.unsupportedContent2', { ns: 'plugin' })}`} +
+
)} onChange={() => { onInstall?.() @@ -167,18 +169,18 @@ const ToolItem = ({ /> )} {isError && ( - - - - - )} - /> - + + + + + {errorTip} - - + + )} ) diff --git a/web/app/components/plugins/plugin-item/index.tsx b/web/app/components/plugins/plugin-item/index.tsx index 5843dffbe9..6e6aaf88c9 100644 --- a/web/app/components/plugins/plugin-item/index.tsx +++ b/web/app/components/plugins/plugin-item/index.tsx @@ -2,6 +2,7 @@ import type { FC } from 'react' import type { PluginDetail } from '../types' import { cn } from '@langgenius/dify-ui/cn' +import { Popover, PopoverContent, PopoverTrigger } from '@langgenius/dify-ui/popover' import { RiArrowRightUpLine, RiBugLine, @@ -13,7 +14,6 @@ import { useSuspenseQuery } from '@tanstack/react-query' import * as React from 'react' import { useCallback, useMemo } from 'react' import { useTranslation } from 'react-i18next' -import Tooltip from '@/app/components/base/tooltip' import useRefreshPluginList from '@/app/components/plugins/install-plugin/hooks/use-refresh-plugin-list' import { API_PREFIX } from '@/config' import { useAppContext } from '@/context/app-context' @@ -124,12 +124,18 @@ const PluginItem: FC = ({ {verified && <Verified className="ml-0.5 h-4 w-4" text={t('marketplace.verifiedTip', { ns: 'plugin' })} />} {!isDifyVersionCompatible && ( - <Tooltip popupContent={ - t('difyVersionNotCompatible', { ns: 'plugin', minimalDifyVersion: declarationMeta.minimum_dify_version }) - } - > - <RiErrorWarningLine color="red" className="ml-0.5 h-4 w-4 shrink-0 text-text-accent" /> - </Tooltip> + <Popover> + <PopoverTrigger + openOnHover + aria-label={t('difyVersionNotCompatible', { ns: 'plugin', minimalDifyVersion: declarationMeta.minimum_dify_version })} + className="ml-0.5 inline-flex h-4 w-4 shrink-0 border-0 bg-transparent p-0" + > + <RiErrorWarningLine color="red" className="h-4 w-4 text-text-accent" /> + </PopoverTrigger> + <PopoverContent popupClassName="px-3 py-2 system-xs-regular text-text-tertiary"> + {t('difyVersionNotCompatible', { ns: 'plugin', minimalDifyVersion: declarationMeta.minimum_dify_version })} + </PopoverContent> + </Popover> )} <Badge className="ml-1 shrink-0" diff --git a/web/app/components/rag-pipeline/components/panel/input-field/index.tsx b/web/app/components/rag-pipeline/components/panel/input-field/index.tsx index 95a76d5e86..3572d6012f 100644 --- a/web/app/components/rag-pipeline/components/panel/input-field/index.tsx +++ b/web/app/components/rag-pipeline/components/panel/input-field/index.tsx @@ -13,7 +13,7 @@ import { import { useTranslation } from 'react-i18next' import { useNodes } from 'reactflow' import Divider from '@/app/components/base/divider' -import Tooltip from '@/app/components/base/tooltip' +import { Infotip } from '@/app/components/base/infotip' import { useInputFieldPanel } from '@/app/components/rag-pipeline/hooks' import { useNodesSyncDraft } from '@/app/components/workflow/hooks' import { useStore } from '@/app/components/workflow/store' @@ -137,10 +137,12 @@ const InputFieldPanel = () => { <span className="system-sm-semibold-uppercase text-text-secondary"> {t('inputFieldPanel.uniqueInputs.title', { ns: 'datasetPipeline' })} </span> - <Tooltip - popupContent={t('inputFieldPanel.uniqueInputs.tooltip', { ns: 'datasetPipeline' })} + <Infotip + aria-label={t('inputFieldPanel.uniqueInputs.tooltip', { ns: 'datasetPipeline' })} popupClassName="max-w-[240px]" - /> + > + {t('inputFieldPanel.uniqueInputs.tooltip', { ns: 'datasetPipeline' })} + </Infotip> </div> <div className="flex flex-col gap-y-1 py-1"> { diff --git a/web/app/components/tools/edit-custom-collection-modal/config-credentials.tsx b/web/app/components/tools/edit-custom-collection-modal/config-credentials.tsx index bee775e80b..9ed7c45165 100644 --- a/web/app/components/tools/edit-custom-collection-modal/config-credentials.tsx +++ b/web/app/components/tools/edit-custom-collection-modal/config-credentials.tsx @@ -6,9 +6,9 @@ import { cn } from '@langgenius/dify-ui/cn' import * as React from 'react' import { useTranslation } from 'react-i18next' import Drawer from '@/app/components/base/drawer-plus' +import { Infotip } from '@/app/components/base/infotip' import Input from '@/app/components/base/input' import Radio from '@/app/components/base/radio/ui' -import Tooltip from '@/app/components/base/tooltip' import { AuthHeaderPrefix, AuthType } from '@/app/components/tools/types' type Props = { @@ -123,14 +123,13 @@ const ConfigCredential: FC<Props> = ({ <div> <div className="flex items-center py-2 system-sm-medium text-text-primary"> {t('createTool.authMethod.key', { ns: 'tools' })} - <Tooltip - popupContent={( - <div className="w-[261px] text-text-tertiary"> - {t('createTool.authMethod.keyTooltip', { ns: 'tools' })} - </div> - )} - triggerClassName="ml-0.5 w-4 h-4" - /> + <Infotip + aria-label={t('createTool.authMethod.keyTooltip', { ns: 'tools' })} + className="ml-0.5 h-4 w-4" + popupClassName="w-[261px] text-text-tertiary" + > + {t('createTool.authMethod.keyTooltip', { ns: 'tools' })} + </Infotip> </div> <Input value={tempCredential.api_key_header} @@ -153,14 +152,13 @@ const ConfigCredential: FC<Props> = ({ <div> <div className="flex items-center py-2 system-sm-medium text-text-primary"> {t('createTool.authMethod.queryParam', { ns: 'tools' })} - <Tooltip - popupContent={( - <div className="w-[261px] text-text-tertiary"> - {t('createTool.authMethod.queryParamTooltip', { ns: 'tools' })} - </div> - )} - triggerClassName="ml-0.5 w-4 h-4" - /> + <Infotip + aria-label={t('createTool.authMethod.queryParamTooltip', { ns: 'tools' })} + className="ml-0.5 h-4 w-4" + popupClassName="w-[261px] text-text-tertiary" + > + {t('createTool.authMethod.queryParamTooltip', { ns: 'tools' })} + </Infotip> </div> <Input value={tempCredential.api_key_query_param} diff --git a/web/app/components/tools/workflow-tool/__tests__/index.spec.tsx b/web/app/components/tools/workflow-tool/__tests__/index.spec.tsx index 8c35232d35..3a8e3a539b 100644 --- a/web/app/components/tools/workflow-tool/__tests__/index.spec.tsx +++ b/web/app/components/tools/workflow-tool/__tests__/index.spec.tsx @@ -1,4 +1,3 @@ -import type { ReactNode } from 'react' import type { WorkflowToolDrawerPayload } from '../index' import { render, screen, waitFor } from '@testing-library/react' import userEvent from '@testing-library/user-event' @@ -28,21 +27,6 @@ vi.mock('@/app/components/tools/labels/selector', () => ({ ), })) -vi.mock('@/app/components/base/tooltip', () => ({ - default: ({ - children, - popupContent, - }: { - children?: ReactNode - popupContent?: ReactNode - }) => ( - <div> - {children} - {popupContent} - </div> - ), -})) - vi.mock('../confirm-modal', () => ({ default: ({ show, onClose, onConfirm }: { show: boolean, onClose: () => void, onConfirm: () => void }) => ( show diff --git a/web/app/components/workflow/block-selector/__tests__/tabs.spec.tsx b/web/app/components/workflow/block-selector/__tests__/tabs.spec.tsx index 3002cafa0a..208d87c23a 100644 --- a/web/app/components/workflow/block-selector/__tests__/tabs.spec.tsx +++ b/web/app/components/workflow/block-selector/__tests__/tabs.spec.tsx @@ -23,21 +23,6 @@ const { }, })) -vi.mock('@/app/components/base/tooltip', () => ({ - default: ({ - children, - popupContent, - }: { - children: React.ReactNode - popupContent: React.ReactNode - }) => ( - <div> - <span>{popupContent}</span> - {children} - </div> - ), -})) - vi.mock('@/service/use-plugins', () => ({ useFeaturedToolsRecommendations: () => ({ plugins: [], @@ -121,11 +106,13 @@ describe('Tabs', () => { filterElem: <div>filter</div>, } - it('should render start content and disabled tab tooltip text', () => { + it('should render start content and disabled tab tooltip text', async () => { + const user = userEvent.setup() render(<Tabs {...baseProps} />) expect(screen.getByText('start-content'))!.toBeInTheDocument() - expect(screen.getByText('workflow.tabs.startDisabledTip'))!.toBeInTheDocument() + await user.hover(screen.getByText('Blocks')) + expect(await screen.findByText('workflow.tabs.startDisabledTip'))!.toBeInTheDocument() }) it('should switch tabs through click handlers and render tools content with normalized icons', () => { diff --git a/web/app/components/workflow/block-selector/tabs.tsx b/web/app/components/workflow/block-selector/tabs.tsx index 48af942df7..0b38a2df2c 100644 --- a/web/app/components/workflow/block-selector/tabs.tsx +++ b/web/app/components/workflow/block-selector/tabs.tsx @@ -6,10 +6,10 @@ import type { ToolWithProvider, } from '../types' import { cn } from '@langgenius/dify-ui/cn' +import { Tooltip, TooltipContent, TooltipTrigger } from '@langgenius/dify-ui/tooltip' import { useSuspenseQuery } from '@tanstack/react-query' import { memo, useEffect, useMemo } from 'react' import { useTranslation } from 'react-i18next' -import Tooltip from '@/app/components/base/tooltip' import { systemFeaturesQueryOptions } from '@/service/system-features' import { useFeaturedToolsRecommendations } from '@/service/use-plugins' import { useAllBuiltInTools, useAllCustomTools, useAllMCPTools, useAllWorkflowTools, useInvalidateAllBuiltInTools } from '@/service/use-tools' @@ -129,19 +129,22 @@ const TabHeaderItem = ({ if (tab.disabled) { return ( - <Tooltip - key={tab.key} - position="top" - popupClassName="max-w-[200px]" - popupContent={disabledTip} - > - <div - className={className} - aria-disabled={tab.disabled} - onClick={handleClick} - > - {tab.name} - </div> + <Tooltip key={tab.key}> + <TooltipTrigger + render={( + <button + type="button" + className={className} + aria-disabled={tab.disabled} + onClick={handleClick} + > + {tab.name} + </button> + )} + /> + <TooltipContent placement="top" className="max-w-[200px]"> + {disabledTip} + </TooltipContent> </Tooltip> ) } diff --git a/web/app/components/workflow/nodes/_base/components/mcp-tool-not-support-tooltip.tsx b/web/app/components/workflow/nodes/_base/components/mcp-tool-not-support-tooltip.tsx index 671459bbbd..8c2ee7f976 100644 --- a/web/app/components/workflow/nodes/_base/components/mcp-tool-not-support-tooltip.tsx +++ b/web/app/components/workflow/nodes/_base/components/mcp-tool-not-support-tooltip.tsx @@ -1,22 +1,23 @@ 'use client' import type { FC } from 'react' +import { Popover, PopoverContent, PopoverTrigger } from '@langgenius/dify-ui/popover' import { RiAlertFill } from '@remixicon/react' import * as React from 'react' import { useTranslation } from 'react-i18next' -import Tooltip from '@/app/components/base/tooltip' const McpToolNotSupportTooltip: FC = () => { const { t } = useTranslation() + const tip = t('detailPanel.toolSelector.unsupportedMCPTool', { ns: 'plugin' }) + return ( - <Tooltip - popupContent={( - <div className="w-[256px]"> - {t('detailPanel.toolSelector.unsupportedMCPTool', { ns: 'plugin' })} - </div> - )} - > - <RiAlertFill className="size-4 text-text-warning-secondary" /> - </Tooltip> + <Popover> + <PopoverTrigger openOnHover aria-label={tip} className="inline-flex border-0 bg-transparent p-0"> + <RiAlertFill className="size-4 text-text-warning-secondary" /> + </PopoverTrigger> + <PopoverContent popupClassName="w-[256px] px-3 py-2 system-xs-regular text-text-tertiary"> + {tip} + </PopoverContent> + </Popover> ) } export default React.memo(McpToolNotSupportTooltip) diff --git a/web/app/components/workflow/nodes/_base/components/switch-plugin-version.tsx b/web/app/components/workflow/nodes/_base/components/switch-plugin-version.tsx index 141323e5b3..fc2b328950 100644 --- a/web/app/components/workflow/nodes/_base/components/switch-plugin-version.tsx +++ b/web/app/components/workflow/nodes/_base/components/switch-plugin-version.tsx @@ -2,13 +2,13 @@ import type { FC, ReactNode } from 'react' import { cn } from '@langgenius/dify-ui/cn' +import { Popover, PopoverContent, PopoverTrigger } from '@langgenius/dify-ui/popover' import { RiArrowLeftRightLine, RiExternalLinkLine } from '@remixicon/react' import { useBoolean } from 'ahooks' import { useCallback, useState } from 'react' import { useTranslation } from 'react-i18next' import Badge from '@/app/components/base/badge' import { Badge as Badge2, BadgeState } from '@/app/components/base/badge/index' -import Tooltip from '@/app/components/base/tooltip' import useGetIcon from '@/app/components/plugins/install-plugin/base/use-get-icon' import { pluginManifestToCardPluginProps } from '@/app/components/plugins/install-plugin/utils' import PluginMutationModel from '@/app/components/plugins/plugin-mutation-model' @@ -67,76 +67,91 @@ export const SwitchPluginVersion: FC<SwitchPluginVersionProps> = (props) => { if (!uniqueIdentifier || !pluginId) return null + const content = ( + <div className={cn('flex w-fit items-center justify-center', className)} onClick={e => e.stopPropagation()}> + {isShowUpdateModal && pluginDetail && ( + <PluginMutationModel + onCancel={hideUpdateModal} + plugin={pluginManifestToCardPluginProps({ + ...pluginDetail.declaration, + icon: icon!, + })} + mutation={mutation} + mutate={install} + confirmButtonText={t('nodes.agent.installPlugin.install', { ns: 'workflow' })} + cancelButtonText={t('nodes.agent.installPlugin.cancel', { ns: 'workflow' })} + modelTitle={t('nodes.agent.installPlugin.title', { ns: 'workflow' })} + description={t('nodes.agent.installPlugin.desc', { ns: 'workflow' })} + cardTitleLeft={( + <> + <Badge2 className="mx-1" size="s" state={BadgeState.Warning}> + {`${pluginDetail.version} -> ${target!.version}`} + </Badge2> + </> + )} + modalBottomLeft={( + <Link + className="flex items-center justify-center gap-1" + href={getMarketplaceUrl(`/plugins/${pluginDetail.declaration.author}/${pluginDetail.declaration.name}`)} + target="_blank" + rel="noopener noreferrer" + > + <span className="system-xs-regular text-xs text-text-accent"> + {t('nodes.agent.installPlugin.changelog', { ns: 'workflow' })} + </span> + <RiExternalLinkLine className="size-3 text-text-accent" /> + </Link> + )} + /> + )} + {pluginDetail && ( + <PluginVersionPicker + isShow={isShow} + onShowChange={setIsShow} + pluginID={pluginId} + currentVersion={pluginDetail.version} + onSelect={(state) => { + setTarget({ + pluginUniqueIden: state.unique_identifier, + version: state.version, + }) + showUpdateModal() + }} + trigger={( + <Badge + className={cn( + 'mx-1 flex hover:bg-state-base-hover', + isShow && 'bg-state-base-hover', + )} + uppercase={true} + text={( + <> + <div>{pluginDetail.version}</div> + <RiArrowLeftRightLine className="ml-1 h-3 w-3 text-text-tertiary" /> + </> + )} + hasRedCornerMark={true} + /> + )} + /> + )} + </div> + ) + + if (!tooltip || isShow || isShowUpdateModal) + return content + return ( - <Tooltip popupContent={!isShow && !isShowUpdateModal && tooltip} triggerMethod="hover"> - <div className={cn('flex w-fit items-center justify-center', className)} onClick={e => e.stopPropagation()}> - {isShowUpdateModal && pluginDetail && ( - <PluginMutationModel - onCancel={hideUpdateModal} - plugin={pluginManifestToCardPluginProps({ - ...pluginDetail.declaration, - icon: icon!, - })} - mutation={mutation} - mutate={install} - confirmButtonText={t('nodes.agent.installPlugin.install', { ns: 'workflow' })} - cancelButtonText={t('nodes.agent.installPlugin.cancel', { ns: 'workflow' })} - modelTitle={t('nodes.agent.installPlugin.title', { ns: 'workflow' })} - description={t('nodes.agent.installPlugin.desc', { ns: 'workflow' })} - cardTitleLeft={( - <> - <Badge2 className="mx-1" size="s" state={BadgeState.Warning}> - {`${pluginDetail.version} -> ${target!.version}`} - </Badge2> - </> - )} - modalBottomLeft={( - <Link - className="flex items-center justify-center gap-1" - href={getMarketplaceUrl(`/plugins/${pluginDetail.declaration.author}/${pluginDetail.declaration.name}`)} - target="_blank" - rel="noopener noreferrer" - > - <span className="system-xs-regular text-xs text-text-accent"> - {t('nodes.agent.installPlugin.changelog', { ns: 'workflow' })} - </span> - <RiExternalLinkLine className="size-3 text-text-accent" /> - </Link> - )} - /> - )} - {pluginDetail && ( - <PluginVersionPicker - isShow={isShow} - onShowChange={setIsShow} - pluginID={pluginId} - currentVersion={pluginDetail.version} - onSelect={(state) => { - setTarget({ - pluginUniqueIden: state.unique_identifier, - version: state.version, - }) - showUpdateModal() - }} - trigger={( - <Badge - className={cn( - 'mx-1 flex hover:bg-state-base-hover', - isShow && 'bg-state-base-hover', - )} - uppercase={true} - text={( - <> - <div>{pluginDetail.version}</div> - <RiArrowLeftRightLine className="ml-1 h-3 w-3 text-text-tertiary" /> - </> - )} - hasRedCornerMark={true} - /> - )} - /> - )} - </div> - </Tooltip> + <Popover> + <PopoverTrigger + openOnHover + nativeButton={false} + aria-label={typeof tooltip === 'string' ? tooltip : t('nodes.agent.installPlugin.title', { ns: 'workflow' })} + render={content} + /> + <PopoverContent popupClassName="px-3 py-2 system-xs-regular text-text-tertiary"> + {tooltip} + </PopoverContent> + </Popover> ) } diff --git a/web/app/components/workflow/nodes/iteration-start/index.tsx b/web/app/components/workflow/nodes/iteration-start/index.tsx index 90a57bef26..ec4f1bfa7f 100644 --- a/web/app/components/workflow/nodes/iteration-start/index.tsx +++ b/web/app/components/workflow/nodes/iteration-start/index.tsx @@ -1,8 +1,8 @@ import type { NodeProps } from 'reactflow' +import { Tooltip, TooltipContent, TooltipTrigger } from '@langgenius/dify-ui/tooltip' import { RiHome5Fill } from '@remixicon/react' import { memo } from 'react' import { useTranslation } from 'react-i18next' -import Tooltip from '@/app/components/base/tooltip' import { NodeSourceHandle } from '@/app/components/workflow/nodes/_base/components/node-handle' const IterationStartNode = ({ id, data }: NodeProps) => { @@ -10,10 +10,14 @@ const IterationStartNode = ({ id, data }: NodeProps) => { return ( <div className="nodrag group mt-1 flex h-11 w-11 items-center justify-center rounded-2xl border border-workflow-block-border bg-workflow-block-bg shadow-xs"> - <Tooltip popupContent={t('blocks.iteration-start', { ns: 'workflow' })} asChild={false}> - <div className="flex h-6 w-6 items-center justify-center rounded-full border-[0.5px] border-components-panel-border-subtle bg-util-colors-blue-brand-blue-brand-500"> + <Tooltip> + <TooltipTrigger + aria-label={t('blocks.iteration-start', { ns: 'workflow' })} + className="flex h-6 w-6 items-center justify-center rounded-full border-[0.5px] border-components-panel-border-subtle bg-util-colors-blue-brand-blue-brand-500 p-0" + > <RiHome5Fill className="h-3 w-3 text-text-primary-on-surface" /> - </div> + </TooltipTrigger> + <TooltipContent>{t('blocks.iteration-start', { ns: 'workflow' })}</TooltipContent> </Tooltip> <NodeSourceHandle id={id} @@ -30,10 +34,14 @@ export const IterationStartNodeDumb = () => { return ( <div className="nodrag relative top-[21px] left-[17px] z-11 flex h-11 w-11 items-center justify-center rounded-2xl border border-workflow-block-border bg-workflow-block-bg"> - <Tooltip popupContent={t('blocks.iteration-start', { ns: 'workflow' })} asChild={false}> - <div className="flex h-6 w-6 items-center justify-center rounded-full border-[0.5px] border-components-panel-border-subtle bg-util-colors-blue-brand-blue-brand-500"> + <Tooltip> + <TooltipTrigger + aria-label={t('blocks.iteration-start', { ns: 'workflow' })} + className="flex h-6 w-6 items-center justify-center rounded-full border-[0.5px] border-components-panel-border-subtle bg-util-colors-blue-brand-blue-brand-500 p-0" + > <RiHome5Fill className="h-3 w-3 text-text-primary-on-surface" /> - </div> + </TooltipTrigger> + <TooltipContent>{t('blocks.iteration-start', { ns: 'workflow' })}</TooltipContent> </Tooltip> </div> ) diff --git a/web/app/components/workflow/nodes/knowledge-base/components/retrieval-setting/search-method-option.tsx b/web/app/components/workflow/nodes/knowledge-base/components/retrieval-setting/search-method-option.tsx index a1f601cce9..54b37f9b52 100644 --- a/web/app/components/workflow/nodes/knowledge-base/components/retrieval-setting/search-method-option.tsx +++ b/web/app/components/workflow/nodes/knowledge-base/components/retrieval-setting/search-method-option.tsx @@ -17,7 +17,7 @@ import { import { useTranslation } from 'react-i18next' import WeightedScoreComponent from '@/app/components/app/configuration/dataset-config/params-config/weighted-score' import { AlertTriangle } from '@/app/components/base/icons/src/vender/solid/alertsAndFeedback' -import Tooltip from '@/app/components/base/tooltip' +import { Infotip } from '@/app/components/base/infotip' import { DEFAULT_WEIGHTED_SCORE } from '@/models/datasets' import { HybridSearchModeEnum, @@ -174,10 +174,13 @@ const SearchMethodOption = ({ disabled={readonly} /> {t('modelProvider.rerankModel.key', { ns: 'common' })} - <Tooltip - triggerClassName="ml-0.5 shrink-0 w-3.5 h-3.5" - popupContent={t('modelProvider.rerankModel.tip', { ns: 'common' })} - /> + <Infotip + aria-label={t('modelProvider.rerankModel.tip', { ns: 'common' })} + className="ml-0.5 h-3.5 w-3.5 shrink-0" + iconClassName="h-3.5 w-3.5" + > + {t('modelProvider.rerankModel.tip', { ns: 'common' })} + </Infotip> </div> ) } diff --git a/web/app/components/workflow/nodes/knowledge-retrieval/components/metadata/metadata-filter/index.tsx b/web/app/components/workflow/nodes/knowledge-retrieval/components/metadata/metadata-filter/index.tsx index 2f5baeb089..61d693de29 100644 --- a/web/app/components/workflow/nodes/knowledge-retrieval/components/metadata/metadata-filter/index.tsx +++ b/web/app/components/workflow/nodes/knowledge-retrieval/components/metadata/metadata-filter/index.tsx @@ -5,7 +5,7 @@ import { useState, } from 'react' import { useTranslation } from 'react-i18next' -import Tooltip from '@/app/components/base/tooltip' +import { Infotip } from '@/app/components/base/infotip' import ModelParameterModal from '@/app/components/header/account-setting/model-provider-page/model-parameter-modal' import Collapse from '@/app/components/workflow/nodes/_base/components/collapse' import { MetadataFilteringModeEnum } from '@/app/components/workflow/nodes/knowledge-retrieval/types' @@ -46,13 +46,9 @@ const MetadataFilter = ({ <div className="mr-0.5 system-sm-semibold-uppercase text-text-secondary"> {t('nodes.knowledgeRetrieval.metadata.title', { ns: 'workflow' })} </div> - <Tooltip - popupContent={( - <div className="w-[200px]"> - {t('nodes.knowledgeRetrieval.metadata.tip', { ns: 'workflow' })} - </div> - )} - /> + <Infotip aria-label={t('nodes.knowledgeRetrieval.metadata.tip', { ns: 'workflow' })} popupClassName="w-[200px]"> + {t('nodes.knowledgeRetrieval.metadata.tip', { ns: 'workflow' })} + </Infotip> {collapseIcon} </div> <div className="flex items-center"> diff --git a/web/app/components/workflow/nodes/loop-start/index.tsx b/web/app/components/workflow/nodes/loop-start/index.tsx index a7bd18c7a5..9900b84856 100644 --- a/web/app/components/workflow/nodes/loop-start/index.tsx +++ b/web/app/components/workflow/nodes/loop-start/index.tsx @@ -1,8 +1,8 @@ import type { NodeProps } from 'reactflow' +import { Tooltip, TooltipContent, TooltipTrigger } from '@langgenius/dify-ui/tooltip' import { RiHome5Fill } from '@remixicon/react' import { memo } from 'react' import { useTranslation } from 'react-i18next' -import Tooltip from '@/app/components/base/tooltip' import { NodeSourceHandle } from '@/app/components/workflow/nodes/_base/components/node-handle' const LoopStartNode = ({ id, data }: NodeProps) => { @@ -10,10 +10,14 @@ const LoopStartNode = ({ id, data }: NodeProps) => { return ( <div className="nodrag group mt-1 flex h-11 w-11 items-center justify-center rounded-2xl border border-workflow-block-border bg-workflow-block-bg"> - <Tooltip popupContent={t('blocks.loop-start', { ns: 'workflow' })} asChild={false}> - <div className="flex h-6 w-6 items-center justify-center rounded-full border-[0.5px] border-components-panel-border-subtle bg-util-colors-blue-brand-blue-brand-500"> + <Tooltip> + <TooltipTrigger + aria-label={t('blocks.loop-start', { ns: 'workflow' })} + className="flex h-6 w-6 items-center justify-center rounded-full border-[0.5px] border-components-panel-border-subtle bg-util-colors-blue-brand-blue-brand-500 p-0" + > <RiHome5Fill className="h-3 w-3 text-text-primary-on-surface" /> - </div> + </TooltipTrigger> + <TooltipContent>{t('blocks.loop-start', { ns: 'workflow' })}</TooltipContent> </Tooltip> <NodeSourceHandle id={id} @@ -30,10 +34,14 @@ export const LoopStartNodeDumb = () => { return ( <div className="nodrag relative top-[21px] left-[17px] z-11 flex h-11 w-11 items-center justify-center rounded-2xl border border-workflow-block-border bg-workflow-block-bg"> - <Tooltip popupContent={t('blocks.loop-start', { ns: 'workflow' })} asChild={false}> - <div className="flex h-6 w-6 items-center justify-center rounded-full border-[0.5px] border-components-panel-border-subtle bg-util-colors-blue-brand-blue-brand-500"> + <Tooltip> + <TooltipTrigger + aria-label={t('blocks.loop-start', { ns: 'workflow' })} + className="flex h-6 w-6 items-center justify-center rounded-full border-[0.5px] border-components-panel-border-subtle bg-util-colors-blue-brand-blue-brand-500 p-0" + > <RiHome5Fill className="h-3 w-3 text-text-primary-on-surface" /> - </div> + </TooltipTrigger> + <TooltipContent>{t('blocks.loop-start', { ns: 'workflow' })}</TooltipContent> </Tooltip> </div> ) diff --git a/web/app/components/workflow/nodes/parameter-extractor/panel.tsx b/web/app/components/workflow/nodes/parameter-extractor/panel.tsx index a116a6303d..9165d53394 100644 --- a/web/app/components/workflow/nodes/parameter-extractor/panel.tsx +++ b/web/app/components/workflow/nodes/parameter-extractor/panel.tsx @@ -3,7 +3,7 @@ import type { ParameterExtractorNodeType } from './types' import type { NodePanelProps } from '@/app/components/workflow/types' import * as React from 'react' import { useTranslation } from 'react-i18next' -import Tooltip from '@/app/components/base/tooltip' +import { Infotip } from '@/app/components/base/infotip' import ModelParameterModal from '@/app/components/header/account-setting/model-provider-page/model-parameter-modal' import { FieldCollapse } from '@/app/components/workflow/nodes/_base/components/collapse' import Field from '@/app/components/workflow/nodes/_base/components/field' @@ -131,14 +131,14 @@ const Panel: FC<NodePanelProps<ParameterExtractorNodeType>> = ({ title={( <div className="flex items-center space-x-1"> <span className="uppercase">{t(`${i18nPrefix}.instruction`, { ns: 'workflow' })}</span> - <Tooltip - popupContent={( - <div className="w-[120px]"> - {t(`${i18nPrefix}.instructionTip`, { ns: 'workflow' })} - </div> - )} - triggerClassName="w-3.5 h-3.5 ml-0.5" - /> + <Infotip + aria-label={t(`${i18nPrefix}.instructionTip`, { ns: 'workflow' })} + className="ml-0.5 h-3.5 w-3.5" + iconClassName="h-3.5 w-3.5" + popupClassName="w-[120px]" + > + {t(`${i18nPrefix}.instructionTip`, { ns: 'workflow' })} + </Infotip> </div> )} value={inputs.instruction} diff --git a/web/app/components/workflow/nodes/question-classifier/__tests__/integration.spec.tsx b/web/app/components/workflow/nodes/question-classifier/__tests__/integration.spec.tsx index c11f78bc08..ada3fc43cc 100644 --- a/web/app/components/workflow/nodes/question-classifier/__tests__/integration.spec.tsx +++ b/web/app/components/workflow/nodes/question-classifier/__tests__/integration.spec.tsx @@ -344,8 +344,8 @@ describe('question-classifier path', () => { ) expect(screen.getByText(`${longName.slice(0, 50)}...`)).toBeInTheDocument() - await user.hover(screen.getByText(`${longName.slice(0, 50)}...`)) - expect(screen.getByText(longName)).toBeInTheDocument() + await user.hover(screen.getByRole('button', { name: longName })) + expect(await screen.findByText(longName)).toBeInTheDocument() rerender( <Node diff --git a/web/app/components/workflow/nodes/question-classifier/__tests__/node.spec.tsx b/web/app/components/workflow/nodes/question-classifier/__tests__/node.spec.tsx index a7e72c343c..ad411639e9 100644 --- a/web/app/components/workflow/nodes/question-classifier/__tests__/node.spec.tsx +++ b/web/app/components/workflow/nodes/question-classifier/__tests__/node.spec.tsx @@ -1,25 +1,10 @@ import type { QuestionClassifierNodeType, Topic } from '../types' import { render, screen } from '@testing-library/react' +import userEvent from '@testing-library/user-event' import { useTextGenerationCurrentProviderAndModelAndModelList } from '@/app/components/header/account-setting/model-provider-page/hooks' import { BlockEnum } from '@/app/components/workflow/types' import Node from '../node' -vi.mock('@/app/components/base/tooltip', () => ({ - __esModule: true, - default: ({ - children, - popupContent, - }: { - children: React.ReactNode - popupContent: React.ReactNode - }) => ( - <div> - {children} - {popupContent} - </div> - ), -})) - vi.mock('@/app/components/header/account-setting/model-provider-page/hooks', () => ({ useTextGenerationCurrentProviderAndModelAndModelList: vi.fn(), })) @@ -101,7 +86,8 @@ describe('question-classifier/node', () => { expect(screen.getByText('handle-topic-2')).toBeInTheDocument() }) - it('returns nothing when neither model nor classes are configured and truncates long class names', () => { + it('returns nothing when neither model nor classes are configured and truncates long class names', async () => { + const user = userEvent.setup() const longName = 'L'.repeat(60) const { container, rerender } = render( <Node @@ -119,7 +105,8 @@ describe('question-classifier/node', () => { ) expect(screen.getByText(`${longName.slice(0, 50)}...`)).toBeInTheDocument() - expect(screen.getByText(longName)).toBeInTheDocument() + await user.hover(screen.getByRole('button', { name: longName })) + expect(await screen.findByText(longName)).toBeInTheDocument() rerender( <Node diff --git a/web/app/components/workflow/nodes/question-classifier/components/advanced-setting.tsx b/web/app/components/workflow/nodes/question-classifier/components/advanced-setting.tsx index 90d53f7271..d788d2518f 100644 --- a/web/app/components/workflow/nodes/question-classifier/components/advanced-setting.tsx +++ b/web/app/components/workflow/nodes/question-classifier/components/advanced-setting.tsx @@ -3,7 +3,7 @@ import type { FC } from 'react' import type { Memory, Node, NodeOutPutVar } from '@/app/components/workflow/types' import * as React from 'react' import { useTranslation } from 'react-i18next' -import Tooltip from '@/app/components/base/tooltip' +import { Infotip } from '@/app/components/base/infotip' import Editor from '@/app/components/workflow/nodes/_base/components/prompt/editor' import MemoryConfig from '../../_base/components/memory-config' @@ -48,14 +48,14 @@ const AdvancedSetting: FC<Props> = ({ title={( <div className="flex items-center space-x-1"> <span className="uppercase">{t(`${i18nPrefix}.instruction`, { ns: 'workflow' })}</span> - <Tooltip - popupContent={( - <div className="w-[120px]"> - {t(`${i18nPrefix}.instructionTip`, { ns: 'workflow' })} - </div> - )} - triggerClassName="w-3.5 h-3.5 ml-0.5" - /> + <Infotip + aria-label={t(`${i18nPrefix}.instructionTip`, { ns: 'workflow' })} + className="ml-0.5 h-3.5 w-3.5" + iconClassName="h-3.5 w-3.5" + popupClassName="w-[120px]" + > + {t(`${i18nPrefix}.instructionTip`, { ns: 'workflow' })} + </Infotip> </div> )} value={instruction} diff --git a/web/app/components/workflow/nodes/question-classifier/node.tsx b/web/app/components/workflow/nodes/question-classifier/node.tsx index 2aae8debcf..305eacc204 100644 --- a/web/app/components/workflow/nodes/question-classifier/node.tsx +++ b/web/app/components/workflow/nodes/question-classifier/node.tsx @@ -2,9 +2,9 @@ import type { TFunction } from 'i18next' import type { FC } from 'react' import type { NodeProps } from 'reactflow' import type { QuestionClassifierNodeType } from './types' +import { Popover, PopoverContent, PopoverTrigger } from '@langgenius/dify-ui/popover' import * as React from 'react' import { useTranslation } from 'react-i18next' -import Tooltip from '@/app/components/base/tooltip' import { useTextGenerationCurrentProviderAndModelAndModelList, } from '@/app/components/header/account-setting/model-provider-page/hooks' @@ -47,15 +47,18 @@ const TruncatedClassItem: FC<TruncatedClassItemProps> = ({ topic, index, nodeId, </div> {shouldShowTooltip ? ( - <Tooltip - popupContent={( - <div className="max-w-[300px] wrap-break-word"> - <ReadonlyInputWithSelectVar value={topic.name} nodeId={nodeId} /> - </div> - )} - > - {content} - </Tooltip> + <Popover> + <PopoverTrigger + openOnHover + aria-label={topic.name} + className="w-full border-0 bg-transparent p-0 text-left" + > + {content} + </PopoverTrigger> + <PopoverContent popupClassName="max-w-[300px] px-3 py-2 system-xs-regular wrap-break-word text-text-tertiary"> + <ReadonlyInputWithSelectVar value={topic.name} nodeId={nodeId} /> + </PopoverContent> + </Popover> ) : content} </div> diff --git a/web/app/components/workflow/nodes/trigger-plugin/components/trigger-form/item.tsx b/web/app/components/workflow/nodes/trigger-plugin/components/trigger-form/item.tsx index 44eb9d44c6..3cae373d48 100644 --- a/web/app/components/workflow/nodes/trigger-plugin/components/trigger-form/item.tsx +++ b/web/app/components/workflow/nodes/trigger-plugin/components/trigger-form/item.tsx @@ -9,7 +9,7 @@ import { RiBracesLine, } from '@remixicon/react' import { useBoolean } from 'ahooks' -import Tooltip from '@/app/components/base/tooltip' +import { Infotip } from '@/app/components/base/infotip' import { FormTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations' import { useLanguage } from '@/app/components/header/account-setting/model-provider-page/hooks' import { SchemaModal } from '@/app/components/plugins/plugin-detail-panel/tool-selector/components' @@ -57,15 +57,13 @@ const TriggerFormItem: FC<Props> = ({ <div className="ml-1 system-xs-regular text-text-destructive-secondary">*</div> )} {!showDescription && tooltip && ( - <Tooltip - popupContent={( - <div className="w-[200px]"> - {tooltip[language] || tooltip.en_US} - </div> - )} - triggerClassName="ml-1 w-4 h-4" - asChild={false} - /> + <Infotip + aria-label={tooltip[language] || tooltip.en_US} + className="ml-1 h-4 w-4" + popupClassName="w-[200px]" + > + {tooltip[language] || tooltip.en_US} + </Infotip> )} {showSchemaButton && ( <> diff --git a/web/app/components/workflow/nodes/trigger-webhook/__tests__/panel.spec.tsx b/web/app/components/workflow/nodes/trigger-webhook/__tests__/panel.spec.tsx index a1f5f1e2c8..4d60e77ba2 100644 --- a/web/app/components/workflow/nodes/trigger-webhook/__tests__/panel.spec.tsx +++ b/web/app/components/workflow/nodes/trigger-webhook/__tests__/panel.spec.tsx @@ -94,10 +94,6 @@ vi.mock('@/app/components/base/input-with-copy', () => ({ ), })) -vi.mock('@/app/components/base/tooltip', () => ({ - default: ({ children }: { children: React.ReactNode }) => <>{children}</>, -})) - vi.mock('@/app/components/workflow/nodes/_base/components/field', () => ({ default: ({ title, children }: { title: string, children: React.ReactNode }) => ( <div> diff --git a/web/app/components/workflow/nodes/trigger-webhook/panel.tsx b/web/app/components/workflow/nodes/trigger-webhook/panel.tsx index fb6bfacf38..53498c52f2 100644 --- a/web/app/components/workflow/nodes/trigger-webhook/panel.tsx +++ b/web/app/components/workflow/nodes/trigger-webhook/panel.tsx @@ -11,12 +11,12 @@ import { } from '@langgenius/dify-ui/number-field' import { Select, SelectContent, SelectItem, SelectItemIndicator, SelectItemText, SelectTrigger } from '@langgenius/dify-ui/select' import { toast } from '@langgenius/dify-ui/toast' +import { Tooltip, TooltipContent, TooltipTrigger } from '@langgenius/dify-ui/tooltip' import copy from 'copy-to-clipboard' import * as React from 'react' import { useEffect, useRef, useState } from 'react' import { useTranslation } from 'react-i18next' import InputWithCopy from '@/app/components/base/input-with-copy' -import Tooltip from '@/app/components/base/tooltip' import Field from '@/app/components/workflow/nodes/_base/components/field' import OutputVars from '@/app/components/workflow/nodes/_base/components/output-vars' import Split from '@/app/components/workflow/nodes/_base/components/split' @@ -118,32 +118,38 @@ const Panel: FC<NodePanelProps<WebhookTriggerNodeType>> = ({ </div> {inputs.webhook_debug_url && ( <div className="space-y-2"> - <Tooltip - popupContent={debugUrlCopied ? t(`${i18nPrefix}.debugUrlCopied`, { ns: 'workflow' }) : t(`${i18nPrefix}.debugUrlCopy`, { ns: 'workflow' })} - popupClassName="system-xs-regular text-text-primary bg-components-tooltip-bg border border-components-panel-border shadow-lg backdrop-blur-xs rounded-md px-1.5 py-1" - position="top" - offset={{ mainAxis: -20 }} - needsDelay={true} - > - <div - className="flex cursor-pointer gap-1.5 rounded-lg px-1 py-1.5 transition-colors" - style={{ width: '368px', height: '38px' }} - onClick={() => { - copy(inputs.webhook_debug_url || '') - setDebugUrlCopied(true) - setTimeout(() => setDebugUrlCopied(false), 2000) - }} + <Tooltip> + <TooltipTrigger + render={( + <button + type="button" + aria-label={t(`${i18nPrefix}.debugUrlCopy`, { ns: 'workflow' })} + className="flex cursor-pointer gap-1.5 rounded-lg px-1 py-1.5 text-left transition-colors" + style={{ width: '368px', height: '38px' }} + onClick={() => { + copy(inputs.webhook_debug_url || '') + setDebugUrlCopied(true) + setTimeout(() => setDebugUrlCopied(false), 2000) + }} + > + <span className="mt-0.5 w-0.5 bg-divider-regular" style={{ height: '28px' }} /> + <span className="flex-1" style={{ width: '352px', height: '32px' }}> + <span className="block text-xs leading-4 text-text-tertiary"> + {t(`${i18nPrefix}.debugUrlTitle`, { ns: 'workflow' })} + </span> + <span className="block truncate text-xs leading-4 text-text-primary"> + {inputs.webhook_debug_url} + </span> + </span> + </button> + )} + /> + <TooltipContent + placement="top" + className="rounded-md border border-components-panel-border bg-components-tooltip-bg px-1.5 py-1 system-xs-regular text-text-primary shadow-lg backdrop-blur-xs" > - <div className="mt-0.5 w-0.5 bg-divider-regular" style={{ height: '28px' }}></div> - <div className="flex-1" style={{ width: '352px', height: '32px' }}> - <div className="text-xs leading-4 text-text-tertiary"> - {t(`${i18nPrefix}.debugUrlTitle`, { ns: 'workflow' })} - </div> - <div className="truncate text-xs leading-4 text-text-primary"> - {inputs.webhook_debug_url} - </div> - </div> - </div> + {debugUrlCopied ? t(`${i18nPrefix}.debugUrlCopied`, { ns: 'workflow' }) : t(`${i18nPrefix}.debugUrlCopy`, { ns: 'workflow' })} + </TooltipContent> </Tooltip> {isPrivateOrLocalAddress(inputs.webhook_debug_url) && ( <div className="mt-1 px-0 py-[2px] system-xs-regular text-text-warning"> diff --git a/web/app/components/workflow/panel/env-panel/variable-modal.tsx b/web/app/components/workflow/panel/env-panel/variable-modal.tsx index 267c014e1d..2560ab968e 100644 --- a/web/app/components/workflow/panel/env-panel/variable-modal.tsx +++ b/web/app/components/workflow/panel/env-panel/variable-modal.tsx @@ -7,8 +7,8 @@ import * as React from 'react' import { useEffect } from 'react' import { useTranslation } from 'react-i18next' import { v4 as uuid4 } from 'uuid' +import { Infotip } from '@/app/components/base/infotip' import Input from '@/app/components/base/input' -import Tooltip from '@/app/components/base/tooltip' import { useWorkflowStore } from '@/app/components/workflow/store' import { checkKeys, replaceSpaceWithUnderscoreInVarNameInput } from '@/utils/var' @@ -129,14 +129,14 @@ const VariableModal = ({ onClick={() => setType('secret')} > <span>Secret</span> - <Tooltip - popupContent={( - <div className="w-[240px]"> - {t('env.modal.secretTip', { ns: 'workflow' })} - </div> - )} - triggerClassName="ml-0.5 w-3.5 h-3.5" - /> + <Infotip + aria-label={t('env.modal.secretTip', { ns: 'workflow' })} + className="ml-0.5 h-3.5 w-3.5" + iconClassName="h-3.5 w-3.5" + popupClassName="w-[240px]" + > + {t('env.modal.secretTip', { ns: 'workflow' })} + </Infotip> </div> </div> </div> diff --git a/web/app/components/workflow/run/node.tsx b/web/app/components/workflow/run/node.tsx index a87922eb54..85607a1342 100644 --- a/web/app/components/workflow/run/node.tsx +++ b/web/app/components/workflow/run/node.tsx @@ -17,7 +17,7 @@ import { RiLoader2Line, RiPauseCircleFill, } from '@remixicon/react' -import { useCallback, useEffect, useMemo, useRef, useState } from 'react' +import { useCallback, useEffect, useMemo, useState } from 'react' import { useTranslation } from 'react-i18next' import CodeEditor from '@/app/components/workflow/nodes/_base/components/editor/code-editor' import ErrorHandleTip from '@/app/components/workflow/nodes/_base/components/error-handle/error-handle-tip' @@ -68,16 +68,6 @@ const NodePanel: FC<Props> = ({ return doSetCollapseState(state) }, [hideProcessDetail]) - const titleRef = useRef<HTMLDivElement>(null) - const [isTooltipOpen, setIsTooltipOpen] = useState(false) - const handleTooltipOpenChange = useCallback((open: boolean) => { - if (open) { - const el = titleRef.current - if (!el || el.scrollWidth <= el.clientWidth) - return - } - setIsTooltipOpen(open) - }, []) const { t } = useTranslation() const docLink = useDocLink() @@ -142,11 +132,10 @@ const NodePanel: FC<Props> = ({ /> )} <BlockIcon size={inMessage ? 'xs' : 'sm'} className={cn('mr-2 shrink-0', inMessage && 'mr-1!')} type={nodeInfo.node_type} toolIcon={nodeInfo.extras?.icon || nodeInfo.extras} /> - <Tooltip open={isTooltipOpen} onOpenChange={handleTooltipOpenChange}> + <Tooltip> <TooltipTrigger render={( <div - ref={titleRef} className={cn( 'min-w-0 grow truncate system-xs-semibold-uppercase text-text-secondary', hideInfo && 'text-xs!', diff --git a/web/app/components/workflow/variable-inspect/listening.tsx b/web/app/components/workflow/variable-inspect/listening.tsx index 3994355d58..cf702a623a 100644 --- a/web/app/components/workflow/variable-inspect/listening.tsx +++ b/web/app/components/workflow/variable-inspect/listening.tsx @@ -4,12 +4,12 @@ import type { Node } from 'reactflow' import type { ScheduleTriggerNodeType } from '@/app/components/workflow/nodes/trigger-schedule/types' import type { WebhookTriggerNodeType } from '@/app/components/workflow/nodes/trigger-webhook/types' import { Button } from '@langgenius/dify-ui/button' +import { Tooltip, TooltipContent, TooltipTrigger } from '@langgenius/dify-ui/tooltip' import copy from 'copy-to-clipboard' import { useEffect, useState } from 'react' import { useTranslation } from 'react-i18next' import { useStoreApi } from 'reactflow' import { StopCircle } from '@/app/components/base/icons/src/vender/line/mediaAndDevices' -import Tooltip from '@/app/components/base/tooltip' import BlockIcon from '@/app/components/workflow/block-icon' import { useGetToolIcon } from '@/app/components/workflow/hooks/use-tool-icon' import { getNextExecutionTime } from '@/app/components/workflow/nodes/trigger-schedule/utils/execution-time-calculator' @@ -179,28 +179,32 @@ const Listening: FC<ListeningProps> = ({ <div className="shrink-0 system-xs-regular whitespace-pre-line text-text-tertiary"> {t('nodes.triggerWebhook.debugUrlTitle', { ns: 'workflow' })} </div> - <Tooltip - popupContent={debugUrlCopied - ? t('nodes.triggerWebhook.debugUrlCopied', { ns: 'workflow' }) - : t('nodes.triggerWebhook.debugUrlCopy', { ns: 'workflow' })} - popupClassName="system-xs-regular text-text-primary bg-components-tooltip-bg border border-components-panel-border shadow-lg backdrop-blur-xs rounded-md px-1.5 py-1" - position="top" - offset={{ mainAxis: -4 }} - needsDelay={true} - > - <button - type="button" - aria-label={t('nodes.triggerWebhook.debugUrlCopy', { ns: 'workflow' }) || ''} - className={`inline-flex items-center rounded-md border border-divider-regular bg-components-badge-white-to-dark px-1.5 py-[2px] font-mono text-[13px] leading-[18px] text-text-secondary transition-colors hover:bg-components-panel-on-panel-item-bg-hover focus:outline-hidden focus-visible:outline-2 focus-visible:outline-components-panel-border focus-visible:outline-solid ${debugUrlCopied ? 'bg-components-panel-on-panel-item-bg-hover text-text-primary' : ''}`} - onClick={() => { - copy(webhookDebugUrl) - setDebugUrlCopied(true) - }} + <Tooltip> + <TooltipTrigger + render={( + <button + type="button" + aria-label={t('nodes.triggerWebhook.debugUrlCopy', { ns: 'workflow' }) || ''} + className={`inline-flex items-center rounded-md border border-divider-regular bg-components-badge-white-to-dark px-1.5 py-[2px] font-mono text-[13px] leading-[18px] text-text-secondary transition-colors hover:bg-components-panel-on-panel-item-bg-hover focus:outline-hidden focus-visible:outline-2 focus-visible:outline-components-panel-border focus-visible:outline-solid ${debugUrlCopied ? 'bg-components-panel-on-panel-item-bg-hover text-text-primary' : ''}`} + onClick={() => { + copy(webhookDebugUrl) + setDebugUrlCopied(true) + }} + > + <span className="whitespace-nowrap text-text-primary"> + {webhookDebugUrl} + </span> + </button> + )} + /> + <TooltipContent + placement="top" + className="rounded-md border border-components-panel-border bg-components-tooltip-bg px-1.5 py-1 system-xs-regular text-text-primary shadow-lg backdrop-blur-xs" > - <span className="whitespace-nowrap text-text-primary"> - {webhookDebugUrl} - </span> - </button> + {debugUrlCopied + ? t('nodes.triggerWebhook.debugUrlCopied', { ns: 'workflow' }) + : t('nodes.triggerWebhook.debugUrlCopy', { ns: 'workflow' })} + </TooltipContent> </Tooltip> </div> )} diff --git a/web/app/components/workflow/workflow-preview/components/nodes/base.tsx b/web/app/components/workflow/workflow-preview/components/nodes/base.tsx index 34c8d753ce..5bfbd52561 100644 --- a/web/app/components/workflow/workflow-preview/components/nodes/base.tsx +++ b/web/app/components/workflow/workflow-preview/components/nodes/base.tsx @@ -6,12 +6,12 @@ import type { NodeProps, } from '@/app/components/workflow/types' import { cn } from '@langgenius/dify-ui/cn' +import { Popover, PopoverContent, PopoverTrigger } from '@langgenius/dify-ui/popover' import { cloneElement, memo, } from 'react' import { useTranslation } from 'react-i18next' -import Tooltip from '@/app/components/base/tooltip' import BlockIcon from '@/app/components/workflow/block-icon' import { BlockEnum, @@ -91,19 +91,21 @@ const BaseCard = ({ </div> { data.type === BlockEnum.Iteration && (data as IterationNodeType).is_parallel && ( - <Tooltip popupContent={( - <div className="w-[180px]"> + <Popover> + <PopoverTrigger + openOnHover + aria-label={t('nodes.iteration.parallelModeEnableTitle', { ns: 'workflow' })} + className="ml-1 flex items-center justify-center rounded-[5px] border border-text-warning bg-transparent px-[5px] py-[3px] system-2xs-medium-uppercase text-text-warning" + > + {t('nodes.iteration.parallelModeUpper', { ns: 'workflow' })} + </PopoverTrigger> + <PopoverContent popupClassName="w-[180px] px-3 py-2 system-xs-regular text-text-tertiary"> <div className="font-extrabold"> {t('nodes.iteration.parallelModeEnableTitle', { ns: 'workflow' })} </div> {t('nodes.iteration.parallelModeEnableDesc', { ns: 'workflow' })} - </div> - )} - > - <div className="ml-1 flex items-center justify-center rounded-[5px] border border-text-warning px-[5px] py-[3px] system-2xs-medium-uppercase text-text-warning"> - {t('nodes.iteration.parallelModeUpper', { ns: 'workflow' })} - </div> - </Tooltip> + </PopoverContent> + </Popover> ) } </div> diff --git a/web/app/components/workflow/workflow-preview/components/nodes/iteration-start/index.tsx b/web/app/components/workflow/workflow-preview/components/nodes/iteration-start/index.tsx index 6a69e5f2aa..391150649f 100644 --- a/web/app/components/workflow/workflow-preview/components/nodes/iteration-start/index.tsx +++ b/web/app/components/workflow/workflow-preview/components/nodes/iteration-start/index.tsx @@ -1,8 +1,8 @@ import type { NodeProps } from 'reactflow' +import { Tooltip, TooltipContent, TooltipTrigger } from '@langgenius/dify-ui/tooltip' import { RiHome5Fill } from '@remixicon/react' import { memo } from 'react' import { useTranslation } from 'react-i18next' -import Tooltip from '@/app/components/base/tooltip' import { NodeSourceHandle } from '../../node-handle' const IterationStartNode = ({ id, data }: NodeProps) => { @@ -10,10 +10,14 @@ const IterationStartNode = ({ id, data }: NodeProps) => { return ( <div className="nodrag group mt-1 flex h-11 w-11 items-center justify-center rounded-2xl border border-workflow-block-border bg-workflow-block-bg shadow-xs"> - <Tooltip popupContent={t('blocks.iteration-start', { ns: 'workflow' })} asChild={false}> - <div className="flex h-6 w-6 items-center justify-center rounded-full border-[0.5px] border-components-panel-border-subtle bg-util-colors-blue-brand-blue-brand-500"> + <Tooltip> + <TooltipTrigger + aria-label={t('blocks.iteration-start', { ns: 'workflow' })} + className="flex h-6 w-6 items-center justify-center rounded-full border-[0.5px] border-components-panel-border-subtle bg-util-colors-blue-brand-blue-brand-500 p-0" + > <RiHome5Fill className="h-3 w-3 text-text-primary-on-surface" /> - </div> + </TooltipTrigger> + <TooltipContent>{t('blocks.iteration-start', { ns: 'workflow' })}</TooltipContent> </Tooltip> <NodeSourceHandle id={id} diff --git a/web/app/components/workflow/workflow-preview/components/nodes/loop-start/index.tsx b/web/app/components/workflow/workflow-preview/components/nodes/loop-start/index.tsx index e67c0d9f10..67865be470 100644 --- a/web/app/components/workflow/workflow-preview/components/nodes/loop-start/index.tsx +++ b/web/app/components/workflow/workflow-preview/components/nodes/loop-start/index.tsx @@ -1,8 +1,8 @@ import type { NodeProps } from 'reactflow' +import { Tooltip, TooltipContent, TooltipTrigger } from '@langgenius/dify-ui/tooltip' import { RiHome5Fill } from '@remixicon/react' import { memo } from 'react' import { useTranslation } from 'react-i18next' -import Tooltip from '@/app/components/base/tooltip' import { NodeSourceHandle } from '../../node-handle' const LoopStartNode = ({ id, data }: NodeProps) => { @@ -10,10 +10,14 @@ const LoopStartNode = ({ id, data }: NodeProps) => { return ( <div className="nodrag group mt-1 flex h-11 w-11 items-center justify-center rounded-2xl border border-workflow-block-border bg-workflow-block-bg"> - <Tooltip popupContent={t('blocks.loop-start', { ns: 'workflow' })} asChild={false}> - <div className="flex h-6 w-6 items-center justify-center rounded-full border-[0.5px] border-components-panel-border-subtle bg-util-colors-blue-brand-blue-brand-500"> + <Tooltip> + <TooltipTrigger + aria-label={t('blocks.loop-start', { ns: 'workflow' })} + className="flex h-6 w-6 items-center justify-center rounded-full border-[0.5px] border-components-panel-border-subtle bg-util-colors-blue-brand-blue-brand-500 p-0" + > <RiHome5Fill className="h-3 w-3 text-text-primary-on-surface" /> - </div> + </TooltipTrigger> + <TooltipContent>{t('blocks.loop-start', { ns: 'workflow' })}</TooltipContent> </Tooltip> <NodeSourceHandle id={id} diff --git a/web/docs/overlay-migration.md b/web/docs/overlay-migration.md index b849159867..3d94d82e64 100644 --- a/web/docs/overlay-migration.md +++ b/web/docs/overlay-migration.md @@ -7,7 +7,6 @@ This document tracks the Dify-web migration away from legacy overlay APIs. ## Scope - Deprecated imports: - - `@/app/components/base/tooltip` - `@/app/components/base/modal` - `@/app/components/base/dialog` - `@/app/components/base/drawer` @@ -36,6 +35,8 @@ This document tracks the Dify-web migration away from legacy overlay APIs. 1. Business/UI features outside `app/components/base/**` - Migrate old calls to semantic primitives from `@langgenius/dify-ui/*`. - Keep deprecated imports out of newly touched files. + - Use `@langgenius/dify-ui/tooltip` only for short, non-interactive labels where the trigger already has its own accessible name. + - Use `@langgenius/dify-ui/popover` or the web `Infotip` wrapper for explanatory, long-form, structured, or interactive content. 1. Legacy base components - Migrate legacy base callers gradually. - Keep deprecated imports out of newly touched files. @@ -75,6 +76,9 @@ back to `z-9999`. parent legacy overlay should be migrated instead. - When migrating a legacy overlay that has a high z-index, remove the z-index entirely — the new primitive's default `z-1002` handles it. +- When using Base UI trigger `render`, render a real `button` for button-like + triggers. If the trigger must render a non-button element, the primitive must + explicitly opt out of the native button behavior where that API is available. ### Post-migration cleanup diff --git a/web/eslint.constants.mjs b/web/eslint.constants.mjs index f74c5c9115..eb85d5d902 100644 --- a/web/eslint.constants.mjs +++ b/web/eslint.constants.mjs @@ -45,13 +45,6 @@ export const WEB_RESTRICTED_IMPORT_PATTERNS = [ ] export const OVERLAY_RESTRICTED_IMPORT_PATTERNS = [ - { - group: [ - '**/base/tooltip', - '**/base/tooltip/index', - ], - message: 'Deprecated: use @langgenius/dify-ui/tooltip instead. See issue #32767.', - }, { group: [ '**/base/modal', From 2c9e30426d9049305dbc661e148bf54fb8f49066 Mon Sep 17 00:00:00 2001 From: yyh <92089059+lyzno1@users.noreply.github.com> Date: Sat, 9 May 2026 14:49:26 +0800 Subject: [PATCH 10/13] refactor(web): migrate headless-ui components to dify-ui (#35962) --- web/__tests__/header/nav-flow.test.tsx | 27 +- web/app/account/(commonLayout)/avatar.tsx | 119 ++++---- .../header-opts/__tests__/index.spec.tsx | 152 +++++++---- .../app/annotation/header-opts/index.tsx | 131 ++++----- .../__tests__/access-control-dialog.spec.tsx | 3 +- .../__tests__/access-control.spec.tsx | 6 +- .../access-control-dialog.tsx | 49 +--- .../app/app-access-control/index.tsx | 2 +- .../__tests__/param-config-content.spec.tsx | 48 +--- .../text-to-speech/param-config-content.tsx | 182 ++++--------- .../credential-selector/index.tsx | 142 ++++------ .../operation/transfer-ownership.tsx | 64 ++--- .../__tests__/priority-selector.spec.tsx | 29 -- .../provider-added-card/priority-selector.tsx | 77 ------ .../app-selector/__tests__/index.spec.tsx | 172 ------------ .../components/header/app-selector/index.tsx | 117 -------- .../header/nav/__tests__/index.spec.tsx | 52 ++-- .../nav/nav-selector/__tests__/index.spec.tsx | 17 +- .../header/nav/nav-selector/index.tsx | 256 +++++++++--------- .../form-input-item.branches.spec.tsx | 8 +- .../form-input-item.sections.spec.tsx | 12 +- .../components/form-input-item.sections.tsx | 81 +++--- 22 files changed, 583 insertions(+), 1163 deletions(-) delete mode 100644 web/app/components/header/account-setting/model-provider-page/provider-added-card/__tests__/priority-selector.spec.tsx delete mode 100644 web/app/components/header/account-setting/model-provider-page/provider-added-card/priority-selector.tsx delete mode 100644 web/app/components/header/app-selector/__tests__/index.spec.tsx delete mode 100644 web/app/components/header/app-selector/index.tsx diff --git a/web/__tests__/header/nav-flow.test.tsx b/web/__tests__/header/nav-flow.test.tsx index 667f1e36b7..58c95f0a01 100644 --- a/web/__tests__/header/nav-flow.test.tsx +++ b/web/__tests__/header/nav-flow.test.tsx @@ -1,4 +1,5 @@ import { fireEvent, render, screen, waitFor } from '@testing-library/react' +import userEvent from '@testing-library/user-event' import * as React from 'react' import { beforeEach, describe, expect, it, vi } from 'vitest' import Nav from '@/app/components/header/nav' @@ -192,27 +193,23 @@ describe('Header Nav Flow', () => { }) it('opens the nested create menu and emits all app creation branches', async () => { - renderNav() - - fireEvent.click(screen.getByRole('button', { name: /Alpha/i })) - - const openCreateMenu = async () => { - fireEvent.click(await screen.findByText('menus.newApp')) - return screen.findByText('newApp.startFromBlank') + const user = userEvent.setup() + const clickCreateBranch = async (optionName: string) => { + const { unmount } = renderNav() + await user.click(screen.getByRole('button', { name: /Alpha/i })) + await user.hover(await screen.findByRole('menuitem', { name: /menus\.newApp/i })) + fireEvent.click(await screen.findByRole('menuitem', { name: optionName })) + unmount() } - await openCreateMenu() - fireEvent.click(await screen.findByText('newApp.startFromBlank')) - - await openCreateMenu() - fireEvent.click(await screen.findByText('newApp.startFromTemplate')) - - await openCreateMenu() - fireEvent.click(await screen.findByText('importDSL')) + await clickCreateBranch('newApp.startFromBlank') + await clickCreateBranch('newApp.startFromTemplate') + await clickCreateBranch('importDSL') expect(mockOnCreate).toHaveBeenNthCalledWith(1, 'blank') expect(mockOnCreate).toHaveBeenNthCalledWith(2, 'template') expect(mockOnCreate).toHaveBeenNthCalledWith(3, 'dsl') + expect(mockOnCreate).toHaveBeenCalledTimes(3) }) it('keeps the current nav label in sync with prop updates', async () => { diff --git a/web/app/account/(commonLayout)/avatar.tsx b/web/app/account/(commonLayout)/avatar.tsx index ccae182c9a..3fefb8a319 100644 --- a/web/app/account/(commonLayout)/avatar.tsx +++ b/web/app/account/(commonLayout)/avatar.tsx @@ -1,11 +1,13 @@ 'use client' -import { Menu, MenuButton, MenuItem, MenuItems, Transition } from '@headlessui/react' import { Avatar } from '@langgenius/dify-ui/avatar' +import { cn } from '@langgenius/dify-ui/cn' import { - RiGraduationCapFill, -} from '@remixicon/react' + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuTrigger, +} from '@langgenius/dify-ui/dropdown-menu' import { useSuspenseQuery } from '@tanstack/react-query' -import { Fragment } from 'react' import { useTranslation } from 'react-i18next' import { resetUser } from '@/app/components/base/amplitude/utils' import { LogOut01 } from '@/app/components/base/icons/src/vender/line/general' @@ -38,73 +40,48 @@ export default function AppSelector() { } return ( - <Menu as="div" className="relative inline-block text-left"> - { - ({ open }) => ( - <> - <div> - <MenuButton - className={` - p-1x inline-flex - items-center rounded-[20px] text-sm - text-text-primary - mobile:px-1 - ${open && 'bg-components-panel-bg-blur'} - `} - > - <Avatar avatar={userProfile.avatar_url} name={userProfile.name} /> - </MenuButton> + <DropdownMenu modal={false}> + <DropdownMenuTrigger + aria-label={userProfile.name} + className={cn( + 'inline-flex items-center rounded-[20px] text-sm text-text-primary outline-hidden mobile:px-1', + 'hover:bg-components-panel-bg-blur focus-visible:bg-components-panel-bg-blur focus-visible:ring-1 focus-visible:ring-components-input-border-hover data-popup-open:bg-components-panel-bg-blur', + )} + > + <Avatar avatar={userProfile.avatar_url} name={userProfile.name} /> + </DropdownMenuTrigger> + <DropdownMenuContent + placement="bottom-end" + sideOffset={4} + popupClassName="w-60 max-w-80 divide-y divide-divider-subtle bg-components-panel-bg-blur p-0" + > + <div className="p-1"> + <div className="flex flex-nowrap items-center px-3 py-2"> + <div className="min-w-0 grow"> + <div className="system-md-medium break-all text-text-primary"> + {userProfile.name} + {isEducationAccount && ( + <PremiumBadge size="s" color="blue" className="ml-1 px-2!"> + <span className="mr-1 i-ri-graduation-cap-fill h-3 w-3" /> + <span className="system-2xs-medium">EDU</span> + </PremiumBadge> + )} + </div> + <div className="system-xs-regular break-all text-text-tertiary">{userProfile.email}</div> </div> - <Transition - as={Fragment} - enter="transition ease-out duration-100" - enterFrom="transform opacity-0 scale-95" - enterTo="transform opacity-100 scale-100" - leave="transition ease-in duration-75" - leaveFrom="transform opacity-100 scale-100" - leaveTo="transform opacity-0 scale-95" - > - <MenuItems - className=" - absolute -top-1 -right-2 w-60 max-w-80 - origin-top-right divide-y divide-divider-subtle rounded-lg bg-components-panel-bg-blur - shadow-lg - " - > - <MenuItem> - <div className="p-1"> - <div className="flex flex-nowrap items-center px-3 py-2"> - <div className="grow"> - <div className="system-md-medium break-all text-text-primary"> - {userProfile.name} - {isEducationAccount && ( - <PremiumBadge size="s" color="blue" className="ml-1 px-2!"> - <RiGraduationCapFill className="mr-1 h-3 w-3" /> - <span className="system-2xs-medium">EDU</span> - </PremiumBadge> - )} - </div> - <div className="system-xs-regular break-all text-text-tertiary">{userProfile.email}</div> - </div> - <Avatar avatar={userProfile.avatar_url} name={userProfile.name} /> - </div> - </div> - </MenuItem> - <MenuItem> - <div className="p-1" onClick={() => handleLogout()}> - <div - className="group flex h-9 cursor-pointer items-center justify-start rounded-lg px-3 hover:bg-state-base-hover" - > - <LogOut01 className="mr-1 flex h-4 w-4 text-text-tertiary" /> - <div className="text-[14px] font-normal text-text-secondary">{t('userProfile.logout', { ns: 'common' })}</div> - </div> - </div> - </MenuItem> - </MenuItems> - </Transition> - </> - ) - } - </Menu> + <Avatar avatar={userProfile.avatar_url} name={userProfile.name} /> + </div> + </div> + <div className="p-1"> + <DropdownMenuItem + className="h-9 justify-start px-3" + onClick={handleLogout} + > + <LogOut01 className="mr-1 flex h-4 w-4 text-text-tertiary" /> + <span className="text-[14px] font-normal text-text-secondary">{t('userProfile.logout', { ns: 'common' })}</span> + </DropdownMenuItem> + </div> + </DropdownMenuContent> + </DropdownMenu> ) } diff --git a/web/app/components/app/annotation/header-opts/__tests__/index.spec.tsx b/web/app/components/app/annotation/header-opts/__tests__/index.spec.tsx index 944a8563eb..5e7b2dc1d0 100644 --- a/web/app/components/app/annotation/header-opts/__tests__/index.spec.tsx +++ b/web/app/components/app/annotation/header-opts/__tests__/index.spec.tsx @@ -3,7 +3,7 @@ import type { ComponentProps } from 'react' import type { Mock } from 'vitest' import type { AnnotationItemBasic } from '../../type' import type { Locale } from '@/i18n-config' -import { render, screen, waitFor } from '@testing-library/react' +import { act, render, screen, waitFor } from '@testing-library/react' import userEvent from '@testing-library/user-event' import * as React from 'react' import { useLocale } from '@/context/i18n' @@ -128,21 +128,15 @@ vi.mock('@headlessui/react', () => { } }) -let lastCSVDownloaderProps: Record<string, unknown> | undefined -const mockCSVDownloader = vi.fn(({ children, ...props }) => { - lastCSVDownloaderProps = props - return ( - <div data-testid="csv-downloader"> - {children} - </div> - ) -}) +const mockJsonToCSV = vi.fn((_: unknown) => 'csv-content') +const mockCSVDownloader = vi.fn(({ children }) => <>{children}</>) vi.mock('react-papaparse', () => ({ useCSVDownloader: () => ({ CSVDownloader: (props: any) => mockCSVDownloader(props), Type: { Link: 'link' }, }), + jsonToCSV: (data: unknown) => mockJsonToCSV(data), })) vi.mock('@/service/annotation', () => ({ @@ -194,33 +188,28 @@ const openOperationsPopover = async (user: ReturnType<typeof userEvent.setup>) = const expandExportMenu = async (user: ReturnType<typeof userEvent.setup>) => { await openOperationsPopover(user) - const exportLabel = await screen.findByText('appAnnotation.table.header.bulkExport') - const exportButton = exportLabel.closest('button') as HTMLButtonElement - expect(exportButton).toBeTruthy() - await user.click(exportButton) + const exportItem = await screen.findByRole('menuitem', { name: /appAnnotation\.table\.header\.bulkExport/i }) + await user.hover(exportItem) } -const getExportButtons = async () => { - const csvLabel = await screen.findByText('CSV') - const jsonLabel = await screen.findByText('JSONL') - const csvButton = csvLabel.closest('button') as HTMLButtonElement - const jsonButton = jsonLabel.closest('button') as HTMLButtonElement - expect(csvButton).toBeTruthy() - expect(jsonButton).toBeTruthy() +const getExportItems = async () => { + const csvItem = await screen.findByRole('menuitem', { name: 'CSV' }) + const jsonItem = await screen.findByRole('menuitem', { name: 'JSONL' }) return { - csvButton, - jsonButton, + csvItem, + jsonItem, } } -const clickOperationAction = async ( - user: ReturnType<typeof userEvent.setup>, - translationKey: string, -) => { - const label = await screen.findByText(translationKey) - const button = label.closest('button') as HTMLButtonElement - expect(button).toBeTruthy() - await user.click(button) +const clickMenuItem = async (item: HTMLElement) => { + await act(async () => { + item.click() + }) +} + +const clickOperationAction = async (translationKey: string) => { + const item = await screen.findByRole('menuitem', { name: translationKey }) + await clickMenuItem(item) } const mockAnnotations: AnnotationItemBasic[] = [ @@ -237,11 +226,14 @@ describe('HeaderOptions', () => { beforeEach(() => { vi.clearAllMocks() vi.useRealTimers() - mockCSVDownloader.mockClear() - lastCSVDownloaderProps = undefined + mockJsonToCSV.mockReturnValue('csv-content') mockedFetchAnnotations.mockResolvedValue({ data: [] }) }) + afterEach(() => { + vi.restoreAllMocks() + }) + it('should fetch annotations on mount and render enabled export actions when data exist', async () => { mockedFetchAnnotations.mockResolvedValue({ data: mockAnnotations }) const user = userEvent.setup() @@ -253,22 +245,69 @@ describe('HeaderOptions', () => { await expandExportMenu(user) - const { csvButton, jsonButton } = await getExportButtons() + const { csvItem, jsonItem } = await getExportItems() - expect(csvButton).not.toBeDisabled() - expect(jsonButton).not.toBeDisabled() + expect(csvItem).not.toHaveAttribute('data-disabled') + expect(jsonItem).not.toHaveAttribute('data-disabled') - await waitFor(() => { - expect(lastCSVDownloaderProps).toMatchObject({ - bom: true, - filename: 'annotations-en-US', - type: 'link', - data: [ - ['Question', 'Answer'], - ['Question 1', 'Answer 1'], - ], + await clickMenuItem(csvItem) + + expect(mockJsonToCSV).toHaveBeenCalledWith([ + ['Question', 'Answer'], + ['Question 1', 'Answer 1'], + ]) + }) + + it('should trigger CSV download with locale-specific filename', async () => { + mockedFetchAnnotations.mockResolvedValue({ data: mockAnnotations }) + const user = userEvent.setup() + const originalCreateElement = document.createElement.bind(document) + const anchor = originalCreateElement('a') as HTMLAnchorElement + const clickSpy = vi.spyOn(anchor, 'click').mockImplementation(vi.fn()) + const createElementSpy = vi.spyOn(document, 'createElement') + .mockImplementation((tagName: Parameters<Document['createElement']>[0]) => { + if (tagName === 'a') + return anchor + return originalCreateElement(tagName) }) + let capturedBlob: Blob | null = null + const objectURLSpy = vi.spyOn(URL, 'createObjectURL') + .mockImplementation((blob) => { + capturedBlob = blob as Blob + return 'blob://mock-url' + }) + const revokeSpy = vi.spyOn(URL, 'revokeObjectURL').mockImplementation(vi.fn()) + + renderComponent({}, LanguagesSupported[1]) + + await expandExportMenu(user) + + const { csvItem } = await getExportItems() + await clickMenuItem(csvItem) + + expect(mockJsonToCSV).toHaveBeenCalledWith([ + ['问题', '答案'], + ['Question 1', 'Answer 1'], + ]) + expect(createElementSpy).toHaveBeenCalled() + expect(anchor.download).toBe(`annotations-${LanguagesSupported[1]}.csv`) + expect(clickSpy).toHaveBeenCalled() + expect(revokeSpy).toHaveBeenCalledWith('blob://mock-url') + + expect(capturedBlob).toBeInstanceOf(Blob) + expect(capturedBlob!.type).toBe('text/csv;charset=utf-8;') + + const blobContent = await new Promise<string>((resolve) => { + const reader = new FileReader() + reader.onload = () => resolve(reader.result as string) + reader.readAsText(capturedBlob!) }) + expect(blobContent).toBe('csv-content') + + clickSpy.mockRestore() + createElementSpy.mockRestore() + objectURLSpy.mockRestore() + revokeSpy.mockRestore() }) it('should disable export actions when there are no annotations', async () => { @@ -277,14 +316,11 @@ describe('HeaderOptions', () => { await expandExportMenu(user) - const { csvButton, jsonButton } = await getExportButtons() + const { csvItem, jsonItem } = await getExportItems() - expect(csvButton)!.toBeDisabled() - expect(jsonButton)!.toBeDisabled() - - expect(lastCSVDownloaderProps).toMatchObject({ - data: [['Question', 'Answer']], - }) + expect(csvItem).toHaveAttribute('data-disabled') + expect(jsonItem).toHaveAttribute('data-disabled') + expect(mockJsonToCSV).not.toHaveBeenCalled() }) it('should open the add annotation modal and forward the onAdd callback', async () => { @@ -321,7 +357,7 @@ describe('HeaderOptions', () => { renderComponent({ onAdded }) await openOperationsPopover(user) - await clickOperationAction(user, 'appAnnotation.table.header.bulkImport') + await clickOperationAction('appAnnotation.table.header.bulkImport') expect(await screen.findByText('appAnnotation.batchModal.title'))!.toBeInTheDocument() await user.click( @@ -354,10 +390,8 @@ describe('HeaderOptions', () => { await expandExportMenu(user) - await waitFor(() => expect(mockCSVDownloader).toHaveBeenCalled()) - - const { jsonButton } = await getExportButtons() - await user.click(jsonButton) + const { jsonItem } = await getExportItems() + await clickMenuItem(jsonItem) expect(createElementSpy).toHaveBeenCalled() expect(anchor.download).toBe(`annotations-${LanguagesSupported[1]}.jsonl`) @@ -396,7 +430,7 @@ describe('HeaderOptions', () => { renderComponent({ onAdded }) await openOperationsPopover(user) - await clickOperationAction(user, 'appAnnotation.table.header.clearAll') + await clickOperationAction('appAnnotation.table.header.clearAll') await screen.findByText('appAnnotation.table.header.clearAllConfirm') const confirmButton = screen.getByRole('button', { name: 'common.operation.confirm' }) @@ -416,7 +450,7 @@ describe('HeaderOptions', () => { renderComponent({ onAdded }) await openOperationsPopover(user) - await clickOperationAction(user, 'appAnnotation.table.header.clearAll') + await clickOperationAction('appAnnotation.table.header.clearAll') await screen.findByText('appAnnotation.table.header.clearAllConfirm') const confirmButton = screen.getByRole('button', { name: 'common.operation.confirm' }) await user.click(confirmButton) diff --git a/web/app/components/app/annotation/header-opts/index.tsx b/web/app/components/app/annotation/header-opts/index.tsx index fc27524c71..6814c3692c 100644 --- a/web/app/components/app/annotation/header-opts/index.tsx +++ b/web/app/components/app/annotation/header-opts/index.tsx @@ -1,19 +1,21 @@ 'use client' import type { FC } from 'react' import type { AnnotationItemBasic } from '../type' -import { Menu, MenuButton, MenuItems, Transition } from '@headlessui/react' import { Button } from '@langgenius/dify-ui/button' -import { cn } from '@langgenius/dify-ui/cn' import { DropdownMenu, DropdownMenuContent, + DropdownMenuItem, + DropdownMenuSub, + DropdownMenuSubContent, + DropdownMenuSubTrigger, DropdownMenuTrigger, } from '@langgenius/dify-ui/dropdown-menu' import * as React from 'react' -import { Fragment, useEffect, useState } from 'react' +import { useEffect, useState } from 'react' import { useTranslation } from 'react-i18next' import { - useCSVDownloader, + jsonToCSV, } from 'react-papaparse' import { useLocale } from '@/context/i18n' @@ -54,6 +56,15 @@ const downloadAnnotationJsonl = (list: AnnotationItemBasic[], locale: string) => downloadBlob({ data: file, fileName: `annotations-${locale}.jsonl` }) } +const downloadAnnotationCsv = (list: AnnotationItemBasic[], locale: string) => { + const content = jsonToCSV([ + locale !== LanguagesSupported[1] ? CSV_HEADER_QA_EN : CSV_HEADER_QA_CN, + ...list.map(item => [item.question, item.answer]), + ]) + const file = new Blob([`\uFEFF${content}`], { type: 'text/csv;charset=utf-8;' }) + downloadBlob({ data: file, fileName: `annotations-${locale}.csv` }) +} + const OperationsMenu: FC<OperationsMenuProps> = ({ list, onClose, @@ -63,88 +74,62 @@ const OperationsMenu: FC<OperationsMenuProps> = ({ }) => { const { t } = useTranslation() const locale = useLocale() - const { CSVDownloader, Type } = useCSVDownloader() const annotationUnavailable = list.length === 0 return ( - <div className="w-full py-1"> - <button - type="button" - className="mx-1 flex h-9 w-[calc(100%-8px)] cursor-pointer items-center space-x-2 rounded-lg px-3 py-2 hover:bg-components-panel-on-panel-item-bg-hover disabled:opacity-50" + <> + <DropdownMenuItem + className="gap-2" onClick={() => { onClose() onBulkImport() }} > - <span aria-hidden className="i-custom-vender-line-files-file-plus-02 h-4 w-4 text-text-tertiary" /> - <span className="grow text-left system-sm-regular text-text-secondary">{t('table.header.bulkImport', { ns: 'appAnnotation' })}</span> - </button> - <Menu as="div" className="relative h-full w-full"> - <MenuButton className="mx-1 flex h-9 w-[calc(100%-8px)] cursor-pointer items-center space-x-2 rounded-lg px-3 py-2 hover:bg-components-panel-on-panel-item-bg-hover disabled:opacity-50"> - <span aria-hidden className="i-custom-vender-line-files-file-download-02 h-4 w-4 text-text-tertiary" /> - <span className="grow text-left system-sm-regular text-text-secondary">{t('table.header.bulkExport', { ns: 'appAnnotation' })}</span> - <span aria-hidden className="i-custom-vender-line-arrows-chevron-right h-[14px] w-[14px] shrink-0 text-text-tertiary" /> - </MenuButton> - <Transition - as={Fragment} - enter="transition ease-out duration-100" - enterFrom="transform opacity-0 scale-95" - enterTo="transform opacity-100 scale-100" - leave="transition ease-in duration-75" - leaveFrom="transform opacity-100 scale-100" - leaveTo="transform opacity-0 scale-95" + <span aria-hidden className="i-custom-vender-line-files-file-plus-02 size-4 shrink-0 text-text-tertiary" /> + {t('table.header.bulkImport', { ns: 'appAnnotation' })} + </DropdownMenuItem> + <DropdownMenuSub> + <DropdownMenuSubTrigger className="gap-2"> + <span aria-hidden className="i-custom-vender-line-files-file-download-02 size-4 shrink-0 text-text-tertiary" /> + {t('table.header.bulkExport', { ns: 'appAnnotation' })} + </DropdownMenuSubTrigger> + <DropdownMenuSubContent + placement="left-start" + sideOffset={4} + popupClassName="min-w-[100px]" > - <MenuItems - className={cn( - 'absolute top-px left-1 z-10 min-w-[100px] origin-top-right -translate-x-full rounded-xl border-[0.5px] border-components-panel-on-panel-item-bg bg-components-panel-bg py-1 shadow-xs', - )} + <DropdownMenuItem + disabled={annotationUnavailable} + onClick={() => { + onClose() + downloadAnnotationCsv(list, locale) + }} > - <CSVDownloader - type={Type.Link} - filename={`annotations-${locale}`} - bom={true} - data={[ - locale !== LanguagesSupported[1] ? CSV_HEADER_QA_EN : CSV_HEADER_QA_CN, - ...list.map(item => [item.question, item.answer]), - ]} - > - <button - type="button" - disabled={annotationUnavailable} - className="mx-1 flex h-9 w-[calc(100%-8px)] cursor-pointer items-center space-x-2 rounded-lg px-3 py-2 hover:bg-components-panel-on-panel-item-bg-hover disabled:opacity-50" - onClick={onClose} - > - <span className="grow text-left system-sm-regular text-text-secondary">CSV</span> - </button> - </CSVDownloader> - <button - type="button" - disabled={annotationUnavailable} - className={cn('mx-1 flex h-9 w-[calc(100%-8px)] cursor-pointer items-center space-x-2 rounded-lg px-3 py-2 hover:bg-components-panel-on-panel-item-bg-hover disabled:opacity-50', 'border-0!')} - onClick={() => { - onClose() - onExportJsonl() - }} - > - <span className="grow text-left system-sm-regular text-text-secondary">JSONL</span> - </button> - </MenuItems> - </Transition> - </Menu> - <button - type="button" + CSV + </DropdownMenuItem> + <DropdownMenuItem + disabled={annotationUnavailable} + onClick={() => { + onClose() + onExportJsonl() + }} + > + JSONL + </DropdownMenuItem> + </DropdownMenuSubContent> + </DropdownMenuSub> + <DropdownMenuItem + variant="destructive" + className="gap-2" onClick={() => { onClose() onClearAll() }} - className="mx-1 flex h-9 w-[calc(100%-8px)] cursor-pointer items-center space-x-2 rounded-lg px-3 py-2 text-red-600 hover:bg-red-50 disabled:opacity-50" > - <span aria-hidden className="i-ri-delete-bin-line h-4 w-4" /> - <span className="grow text-left system-sm-regular"> - {t('table.header.clearAll', { ns: 'appAnnotation' })} - </span> - </button> - </div> + <span aria-hidden className="i-ri-delete-bin-line size-4 shrink-0" /> + {t('table.header.clearAll', { ns: 'appAnnotation' })} + </DropdownMenuItem> + </> ) } @@ -204,7 +189,7 @@ const HeaderOptions: FC<Props> = ({ <span aria-hidden className="mr-0.5 i-ri-add-line h-4 w-4" /> <div>{t('table.header.addAnnotation', { ns: 'appAnnotation' })}</div> </Button> - <DropdownMenu open={isOperationsMenuOpen} onOpenChange={setIsOperationsMenuOpen}> + <DropdownMenu modal={false} open={isOperationsMenuOpen} onOpenChange={setIsOperationsMenuOpen}> <DropdownMenuTrigger aria-label={t('operation.more', { ns: 'common' })} className="mr-0 box-border inline-flex h-8 w-8 shrink-0 items-center justify-center rounded-lg border-[0.5px] border-components-button-secondary-border bg-components-button-secondary-bg p-0 text-components-button-secondary-text shadow-xs backdrop-blur-[5px] hover:border-components-button-secondary-border-hover hover:bg-components-button-secondary-bg-hover data-popup-open:border-components-button-secondary-border-hover data-popup-open:bg-components-button-secondary-bg-hover" @@ -214,7 +199,7 @@ const HeaderOptions: FC<Props> = ({ <DropdownMenuContent placement="bottom-end" sideOffset={4} - popupClassName="w-[155px] overflow-visible py-0" + popupClassName="w-[155px]" > <OperationsMenu list={list} diff --git a/web/app/components/app/app-access-control/__tests__/access-control-dialog.spec.tsx b/web/app/components/app/app-access-control/__tests__/access-control-dialog.spec.tsx index 9b3dd8ee05..13331f3f9c 100644 --- a/web/app/components/app/app-access-control/__tests__/access-control-dialog.spec.tsx +++ b/web/app/components/app/app-access-control/__tests__/access-control-dialog.spec.tsx @@ -21,8 +21,7 @@ describe('AccessControlDialog', () => { </AccessControlDialog>, ) - const closeButton = document.body.querySelector('div.absolute.right-5.top-5') as HTMLElement - fireEvent.click(closeButton) + fireEvent.click(screen.getByRole('button', { name: 'Close' })) await waitFor(() => { expect(onClose).toHaveBeenCalledTimes(1) diff --git a/web/app/components/app/app-access-control/__tests__/access-control.spec.tsx b/web/app/components/app/app-access-control/__tests__/access-control.spec.tsx index 21dd8c5fc2..4aaea1670f 100644 --- a/web/app/components/app/app-access-control/__tests__/access-control.spec.tsx +++ b/web/app/components/app/app-access-control/__tests__/access-control.spec.tsx @@ -176,7 +176,7 @@ describe('AccessControlItem', () => { }) }) -// AccessControlDialog renders a headless UI dialog with a manual close control +// AccessControlDialog renders the shared dialog primitive with a close control. describe('AccessControlDialog', () => { it('should render dialog content when visible', () => { render( @@ -191,13 +191,13 @@ describe('AccessControlDialog', () => { it('should trigger onClose when clicking the close control', async () => { const handleClose = vi.fn() - const { container } = render( + render( <AccessControlDialog show onClose={handleClose}> <div>Dialog Content</div> </AccessControlDialog>, ) - const closeButton = container.querySelector('.absolute.right-5.top-5') as HTMLElement + const closeButton = screen.getByRole('button', { name: 'Close' }) fireEvent.click(closeButton) await waitFor(() => { diff --git a/web/app/components/app/app-access-control/access-control-dialog.tsx b/web/app/components/app/app-access-control/access-control-dialog.tsx index bbf5329c9d..611c6f1c92 100644 --- a/web/app/components/app/app-access-control/access-control-dialog.tsx +++ b/web/app/components/app/app-access-control/access-control-dialog.tsx @@ -1,8 +1,11 @@ import type { ReactNode } from 'react' -import { Dialog, Transition } from '@headlessui/react' import { cn } from '@langgenius/dify-ui/cn' -import { RiCloseLine } from '@remixicon/react' -import { Fragment, useCallback } from 'react' +import { + Dialog, + DialogCloseButton, + DialogContent, +} from '@langgenius/dify-ui/dialog' +import { useCallback } from 'react' type DialogProps = { className?: string @@ -21,40 +24,12 @@ const AccessControlDialog = ({ onClose?.() }, [onClose]) return ( - <Transition appear show={show} as={Fragment}> - <Dialog as="div" open={true} className="relative z-99" onClose={() => null}> - <Transition.Child - as={Fragment} - enter="ease-out duration-300" - enterFrom="opacity-0" - enterTo="opacity-100" - leave="ease-in duration-200" - leaveFrom="opacity-100" - leaveTo="opacity-0" - > - <div className="fixed inset-0 bg-background-overlay" /> - </Transition.Child> - - <div className="fixed inset-0 flex items-center justify-center"> - <Transition.Child - as={Fragment} - enter="ease-out duration-300" - enterFrom="opacity-0 scale-95" - enterTo="opacity-100 scale-100" - leave="ease-in duration-200" - leaveFrom="opacity-100 scale-100" - leaveTo="opacity-0 scale-95" - > - <Dialog.Panel className={cn('relative h-auto min-h-[323px] w-[600px] overflow-y-auto rounded-2xl bg-components-panel-bg p-0 shadow-xl transition-all', className)}> - <div onClick={() => close()} className="absolute top-5 right-5 z-10 flex h-8 w-8 cursor-pointer items-center justify-center"> - <RiCloseLine className="h-5 w-5 text-text-tertiary" /> - </div> - {children} - </Dialog.Panel> - </Transition.Child> - </div> - </Dialog> - </Transition> + <Dialog open={show} onOpenChange={open => !open && close()}> + <DialogContent className={cn('min-h-[323px] w-[600px] p-0', className)}> + <DialogCloseButton className="top-5 right-5 h-8 w-8" /> + {children} + </DialogContent> + </Dialog> ) } diff --git a/web/app/components/app/app-access-control/index.tsx b/web/app/components/app/app-access-control/index.tsx index cff670e10f..593664c918 100644 --- a/web/app/components/app/app-access-control/index.tsx +++ b/web/app/components/app/app-access-control/index.tsx @@ -1,8 +1,8 @@ 'use client' import type { Subject } from '@/models/access-control' import type { App } from '@/types/app' -import { Description as DialogDescription, DialogTitle } from '@headlessui/react' import { Button } from '@langgenius/dify-ui/button' +import { DialogDescription, DialogTitle } from '@langgenius/dify-ui/dialog' import { toast } from '@langgenius/dify-ui/toast' import { RiBuildingLine, RiGlobalLine, RiVerifiedBadgeLine } from '@remixicon/react' import { useSuspenseQuery } from '@tanstack/react-query' diff --git a/web/app/components/base/features/new-feature-panel/text-to-speech/__tests__/param-config-content.spec.tsx b/web/app/components/base/features/new-feature-panel/text-to-speech/__tests__/param-config-content.spec.tsx index b4d5beefa6..27a6cd96d0 100644 --- a/web/app/components/base/features/new-feature-panel/text-to-speech/__tests__/param-config-content.spec.tsx +++ b/web/app/components/base/features/new-feature-panel/text-to-speech/__tests__/param-config-content.spec.tsx @@ -64,6 +64,9 @@ const renderWithProvider = ( ) } +const getLanguageSelect = () => screen.getByRole('combobox', { name: /voice\.voiceSettings\.language/ }) +const getVoiceSelect = () => screen.getByRole('combobox', { name: /voice\.voiceSettings\.voice/ }) + describe('ParamConfigContent', () => { beforeEach(() => { vi.clearAllMocks() @@ -116,16 +119,13 @@ describe('ParamConfigContent', () => { it('should display language listbox button', () => { renderWithProvider() - const buttons = screen.getAllByRole('button') - expect(buttons.length).toBeGreaterThanOrEqual(1) + expect(getLanguageSelect()).toBeInTheDocument() }) it('should display current voice in listbox button', () => { renderWithProvider() - const buttons = screen.getAllByRole('button') - const voiceButton = buttons.find(btn => btn.textContent?.includes('Alloy')) - expect(voiceButton)!.toBeInTheDocument() + expect(getVoiceSelect()).toHaveTextContent('Alloy') }) it('should render audition button when language has example', () => { @@ -152,8 +152,7 @@ describe('ParamConfigContent', () => { text2speech: { enabled: true, language: '', voice: '', autoPlay: TtsAutoPlay.disabled }, }) - const buttons = screen.getAllByRole('button') - expect(buttons.length).toBeGreaterThan(0) + expect(getLanguageSelect()).toBeInTheDocument() }) it('should render with no voice set and use first as default', () => { @@ -161,9 +160,7 @@ describe('ParamConfigContent', () => { text2speech: { enabled: true, language: 'en-US', voice: 'nonexistent', autoPlay: TtsAutoPlay.disabled }, }) - const buttons = screen.getAllByRole('button') - const voiceButton = buttons.find(btn => btn.textContent?.includes('Alloy')) - expect(voiceButton)!.toBeInTheDocument() + expect(getVoiceSelect()).toHaveTextContent('Alloy') }) }) @@ -239,10 +236,7 @@ describe('ParamConfigContent', () => { it('should open language listbox and show options', async () => { renderWithProvider() - const buttons = screen.getAllByRole('button') - const languageButton = buttons.find(btn => btn.textContent?.includes('voice.language.')) - expect(languageButton).toBeDefined() - await userEvent.click(languageButton!) + await userEvent.click(getLanguageSelect()) const options = await screen.findAllByRole('option') expect(options.length).toBeGreaterThanOrEqual(2) @@ -252,10 +246,7 @@ describe('ParamConfigContent', () => { const onChange = vi.fn() renderWithProvider({ onChange }) - const buttons = screen.getAllByRole('button') - const languageButton = buttons.find(btn => btn.textContent?.includes('voice.language.')) - expect(languageButton).toBeDefined() - await userEvent.click(languageButton!) + await userEvent.click(getLanguageSelect()) const options = await screen.findAllByRole('option') expect(options.length).toBeGreaterThan(1) await userEvent.click(options[1]!) @@ -266,10 +257,7 @@ describe('ParamConfigContent', () => { const onChange = vi.fn() renderWithProvider({ onChange }) - const buttons = screen.getAllByRole('button') - const voiceButton = buttons.find(btn => btn.textContent?.includes('Alloy')) - expect(voiceButton).toBeDefined() - await userEvent.click(voiceButton!) + await userEvent.click(getVoiceSelect()) const options = await screen.findAllByRole('option') expect(options.length).toBeGreaterThan(1) await userEvent.click(options[1]!) @@ -279,10 +267,7 @@ describe('ParamConfigContent', () => { it('should show selected language option in listbox', async () => { renderWithProvider() - const buttons = screen.getAllByRole('button') - const languageButton = buttons.find(btn => btn.textContent?.includes('voice.language.')) - expect(languageButton).toBeDefined() - await userEvent.click(languageButton!) + await userEvent.click(getLanguageSelect()) const options = await screen.findAllByRole('option') expect(options.length).toBeGreaterThanOrEqual(1) @@ -294,10 +279,7 @@ describe('ParamConfigContent', () => { it('should show selected voice option in listbox', async () => { renderWithProvider() - const buttons = screen.getAllByRole('button') - const voiceButton = buttons.find(btn => btn.textContent?.includes('Alloy')) - expect(voiceButton).toBeDefined() - await userEvent.click(voiceButton!) + await userEvent.click(getVoiceSelect()) const options = await screen.findAllByRole('option') expect(options.length).toBeGreaterThanOrEqual(1) @@ -320,11 +302,7 @@ describe('ParamConfigContent', () => { const placeholderTexts = screen.getAllByText(/placeholder\.select/) expect(placeholderTexts.length).toBeGreaterThanOrEqual(2) - const disabledButtons = screen - .getAllByRole('button') - .filter(button => button.hasAttribute('disabled') || button.getAttribute('aria-disabled') === 'true') - - expect(disabledButtons.length).toBeGreaterThanOrEqual(1) + expect(getVoiceSelect()).toHaveAttribute('data-disabled') }) it('should call useAppVoices with empty appId when pathname has no app segment', () => { diff --git a/web/app/components/base/features/new-feature-panel/text-to-speech/param-config-content.tsx b/web/app/components/base/features/new-feature-panel/text-to-speech/param-config-content.tsx index f7c3b738a9..24670fa748 100644 --- a/web/app/components/base/features/new-feature-panel/text-to-speech/param-config-content.tsx +++ b/web/app/components/base/features/new-feature-panel/text-to-speech/param-config-content.tsx @@ -1,11 +1,15 @@ 'use client' import type { OnFeaturesChange } from '@/app/components/base/features/types' -import { Listbox, ListboxButton, ListboxOption, ListboxOptions, Transition } from '@headlessui/react' -import { cn } from '@langgenius/dify-ui/cn' +import { + Select, + SelectContent, + SelectItem, + SelectItemIndicator, + SelectItemText, + SelectTrigger, +} from '@langgenius/dify-ui/select' import { Switch } from '@langgenius/dify-ui/switch' import { produce } from 'immer' -import * as React from 'react' -import { Fragment } from 'react' import { useTranslation } from 'react-i18next' import { replace } from 'string-ts' import AudioBtn from '@/app/components/base/audio-btn' @@ -35,6 +39,9 @@ const VoiceParamConfig = ({ const appId = (matched?.length && matched[1]) ? matched[1] : '' const text2speech = useFeatures(state => state.features.text2speech) const featuresStore = useFeaturesStore() + const formatLanguageName = (item: SelectOption) => { + return t(`voice.language.${replace(String(item.value), '-', '')}`, item.name, { ns: 'common' as const }) + } let languageItem = languages.find(item => item.value === text2speech?.language) if (languages && !languageItem) @@ -70,21 +77,14 @@ const VoiceParamConfig = ({ <> <div className="mb-4 flex items-center justify-between"> <div className="system-xl-semibold text-text-primary">{t('voice.voiceSettings.title', { ns: 'appDebug' })}</div> - <div - className="cursor-pointer p-1" - role="button" - tabIndex={0} + <button + type="button" + className="rounded-md p-1 hover:bg-state-base-hover focus-visible:bg-state-base-hover focus-visible:outline-hidden" aria-label={t('appDebug:voice.voiceSettings.close')} onClick={onClose} - onKeyDown={(e) => { - if (e.key === 'Enter' || e.key === ' ') { - e.preventDefault() - onClose() - } - }} > - <span className="i-ri-close-line h-4 w-4 text-text-tertiary" /> - </div> + <span aria-hidden className="i-ri-close-line h-4 w-4 text-text-tertiary" /> + </button> </div> <div className="mb-3"> <div className="mb-1 flex items-center py-1 system-sm-semibold text-text-secondary"> @@ -100,129 +100,63 @@ const VoiceParamConfig = ({ ))} </Infotip> </div> - <Listbox - value={languageItem} - onChange={(value: SelectOption) => { + <Select + value={languageItem ? String(languageItem.value) : null} + onValueChange={(nextValue) => { + if (!nextValue) + return handleChange({ - language: String(value.value), + language: nextValue, }) }} > - <div className="relative h-8"> - <ListboxButton - className="h-full w-full cursor-pointer rounded-lg border-0 bg-components-input-bg-normal py-1.5 pr-10 pl-3 group-hover:bg-state-base-hover focus-visible:bg-state-base-hover focus-visible:outline-hidden sm:text-sm sm:leading-6" - > - <span className={cn('block truncate text-left text-text-secondary', !languageItem?.name && 'text-text-tertiary')}> - {languageItem?.name - ? t(`voice.language.${replace(languageItem?.value ?? '', '-', '')}`, languageItem?.name, { ns: 'common' as const }) - : localLanguagePlaceholder} - </span> - <span className="pointer-events-none absolute inset-y-0 right-0 flex items-center pr-2"> - <span className="i-heroicons-chevron-down-20-solid h-4 w-4 text-text-tertiary" aria-hidden="true" /> - </span> - </ListboxButton> - <Transition - as={Fragment} - leave="transition ease-in duration-100" - leaveFrom="opacity-100" - leaveTo="opacity-0" - > - - <ListboxOptions - className="absolute z-10 mt-1 max-h-60 w-full overflow-auto rounded-md border-[0.5px] border-components-panel-border bg-components-panel-bg px-1 py-1 text-base shadow-lg focus:outline-hidden sm:text-sm" - > - {languages.map(item => ( - <ListboxOption - key={item.value} - className="relative cursor-pointer rounded-lg py-2 pr-9 pl-3 text-text-secondary select-none hover:bg-state-base-hover data-active:bg-state-base-active" - value={item} - disabled={false} - > - {({ /* active, */ selected }) => ( - <> - <span - className={cn('block', selected && 'font-normal')} - > - {t(`voice.language.${replace((item.value), '-', '')}`, item.name, { ns: 'common' as const })} - </span> - {(selected || item.value === text2speech?.language) && ( - <span - className={cn('absolute inset-y-0 right-0 flex items-center pr-4 text-text-secondary')} - > - <span className="i-heroicons-check-20-solid h-4 w-4" aria-hidden="true" /> - </span> - )} - </> - )} - </ListboxOption> - ))} - </ListboxOptions> - </Transition> - </div> - </Listbox> + <SelectTrigger aria-label={t('voice.voiceSettings.language', { ns: 'appDebug' })} className="w-full"> + {languageItem ? formatLanguageName(languageItem) : localLanguagePlaceholder} + </SelectTrigger> + <SelectContent listClassName="max-h-60"> + {languages.map(item => ( + <SelectItem key={item.value} value={String(item.value)}> + <SelectItemText> + {formatLanguageName(item)} + </SelectItemText> + <SelectItemIndicator /> + </SelectItem> + ))} + </SelectContent> + </Select> </div> <div className="mb-3"> <div className="mb-1 py-1 system-sm-semibold text-text-secondary"> {t('voice.voiceSettings.voice', { ns: 'appDebug' })} </div> <div className="flex items-center gap-1"> - <Listbox - value={voiceItem} + <Select + value={voiceItem ? String(voiceItem.value) : null} disabled={!languageItem} - onChange={(value: SelectOption) => { + onValueChange={(nextValue) => { + if (!nextValue) + return handleChange({ - voice: String(value.value), + voice: nextValue, }) }} > - <div className="relative h-8 grow"> - <ListboxButton - className="h-full w-full cursor-pointer rounded-lg border-0 bg-components-input-bg-normal py-1.5 pr-10 pl-3 group-hover:bg-state-base-hover focus-visible:bg-state-base-hover focus-visible:outline-hidden sm:text-sm sm:leading-6" - > - <span - className={cn('block truncate text-left text-text-secondary', !voiceItem?.name && 'text-text-tertiary')} - > - {voiceItem?.name ?? localVoicePlaceholder} - </span> - <span className="pointer-events-none absolute inset-y-0 right-0 flex items-center pr-2"> - <span className="i-heroicons-chevron-down-20-solid h-4 w-4 text-text-tertiary" aria-hidden="true" /> - </span> - </ListboxButton> - <Transition - as={Fragment} - leave="transition ease-in duration-100" - leaveFrom="opacity-100" - leaveTo="opacity-0" - > - - <ListboxOptions - className="absolute z-10 mt-1 max-h-60 w-full overflow-auto rounded-md border-[0.5px] border-components-panel-border bg-components-panel-bg px-1 py-1 text-base shadow-lg focus:outline-hidden sm:text-sm" - > - {voiceItems?.map((item: SelectOption) => ( - <ListboxOption - key={item.value} - className="relative cursor-pointer rounded-lg py-2 pr-9 pl-3 text-text-secondary select-none hover:bg-state-base-hover data-active:bg-state-base-active" - value={item} - disabled={false} - > - {({ /* active, */ selected }) => ( - <> - <span className={cn('block', selected && 'font-normal')}>{item.name}</span> - {(selected || item.value === text2speech?.voice) && ( - <span - className={cn('absolute inset-y-0 right-0 flex items-center pr-4 text-text-secondary')} - > - <span className="i-heroicons-check-20-solid h-4 w-4" aria-hidden="true" /> - </span> - )} - </> - )} - </ListboxOption> - ))} - </ListboxOptions> - </Transition> + <div className="grow"> + <SelectTrigger aria-label={t('voice.voiceSettings.voice', { ns: 'appDebug' })} className="w-full"> + {voiceItem?.name ?? localVoicePlaceholder} + </SelectTrigger> + <SelectContent listClassName="max-h-60"> + {voiceItems?.map((item: SelectOption) => ( + <SelectItem key={item.value} value={String(item.value)}> + <SelectItemText> + {item.name} + </SelectItemText> + <SelectItemIndicator /> + </SelectItem> + ))} + </SelectContent> </div> - </Listbox> + </Select> {languageItem?.example && ( <div className="h-8 shrink-0 rounded-lg bg-components-button-tertiary-bg p-1" data-testid="audition-button"> <AudioBtn @@ -253,4 +187,4 @@ const VoiceParamConfig = ({ ) } -export default React.memo(VoiceParamConfig) +export default VoiceParamConfig diff --git a/web/app/components/base/notion-page-selector/credential-selector/index.tsx b/web/app/components/base/notion-page-selector/credential-selector/index.tsx index c8db7bc978..81ee1c06d8 100644 --- a/web/app/components/base/notion-page-selector/credential-selector/index.tsx +++ b/web/app/components/base/notion-page-selector/credential-selector/index.tsx @@ -1,7 +1,12 @@ 'use client' -import { Menu, MenuButton, MenuItem, MenuItems, Transition } from '@headlessui/react' -import * as React from 'react' -import { Fragment, useMemo } from 'react' +import { + Select, + SelectContent, + SelectItem, + SelectItemIndicator, + SelectItemText, + SelectTrigger, +} from '@langgenius/dify-ui/select' import { CredentialIcon } from '@/app/components/datasets/common/credential-icon' export type NotionCredential = { @@ -17,99 +22,66 @@ type CredentialSelectorProps = { onSelect: (v: string) => void } +const getDisplayName = (item?: NotionCredential) => { + return item?.workspaceName || item?.credentialName || '' +} + const CredentialSelector = ({ value, items, onSelect, }: CredentialSelectorProps) => { - const currentCredential = items.find(item => item.credentialId === value)! - - const getDisplayName = (item: NotionCredential) => { - return item.workspaceName || item.credentialName - } - - const currentDisplayName = useMemo(() => { - return getDisplayName(currentCredential) - }, [currentCredential]) + const currentCredential = items.find(item => item.credentialId === value) ?? items[0] + const currentDisplayName = getDisplayName(currentCredential) return ( - <Menu as="div" className="relative inline-block text-left"> - { - ({ open }) => ( - <> - <MenuButton - className={`flex h-7 items-center justify-center rounded-md p-1 pr-2 hover:bg-state-base-hover ${open && 'bg-state-base-hover'} cursor-pointer`} - data-testid="notion-credential-selector-btn" + <Select value={currentCredential?.credentialId ?? null} onValueChange={nextValue => nextValue && onSelect(nextValue)}> + <SelectTrigger + className="w-[168px]" + data-testid="notion-credential-selector-btn" + > + <span className="flex min-w-0 items-center"> + <CredentialIcon + className="mr-2 shrink-0" + avatarUrl={currentCredential?.workspaceIcon} + name={currentDisplayName} + size={20} + /> + <span + className="truncate" + title={currentDisplayName} + data-testid="notion-credential-selector-name" + > + {currentDisplayName} + </span> + </span> + </SelectTrigger> + <SelectContent popupClassName="w-80" listClassName="max-h-50"> + {items.map((item) => { + const displayName = getDisplayName(item) + return ( + <SelectItem + key={item.credentialId} + value={item.credentialId} + className="h-9 px-3" + data-testid={`notion-credential-item-${item.credentialId}`} > <CredentialIcon - className="mr-2" - avatarUrl={currentCredential?.workspaceIcon} - name={currentDisplayName} + className="mr-2 shrink-0" + avatarUrl={item.workspaceIcon} + name={displayName} size={20} /> - <div - className="mr-1 w-[90px] truncate text-left text-sm font-medium text-text-secondary" - title={currentDisplayName} - data-testid="notion-credential-selector-name" - > - {currentDisplayName} - </div> - <div className="i-ri-arrow-down-s-line h-4 w-4 text-text-secondary" /> - </MenuButton> - <Transition - as={Fragment} - enter="transition ease-out duration-100" - enterFrom="transform opacity-0 scale-95" - enterTo="transform opacity-100 scale-100" - leave="transition ease-in duration-75" - leaveFrom="transform opacity-100 scale-100" - leaveTo="transform opacity-0 scale-95" - > - <MenuItems - className="absolute top-8 left-0 z-10 w-80 - origin-top-right rounded-lg border-[0.5px] - border-components-panel-border bg-components-panel-bg-blur shadow-lg shadow-shadow-shadow-5" - > - <div className="max-h-50 overflow-auto p-1"> - { - items.map((item) => { - const displayName = getDisplayName(item) - return ( - <MenuItem key={item.credentialId}> - <div - className="flex h-9 cursor-pointer items-center rounded-lg px-3 hover:bg-state-base-hover" - onClick={() => onSelect(item.credentialId)} - data-testid={`notion-credential-item-${item.credentialId}`} - > - <CredentialIcon - className="mr-2 shrink-0" - avatarUrl={item.workspaceIcon} - name={displayName} - size={20} - /> - <div - className="mr-2 grow truncate system-sm-medium text-text-secondary" - title={displayName} - > - {displayName} - </div> - {/* // ?Cannot get page length with new auth system */} - {/* <div className='system-xs-medium shrink-0 text-text-accent'> - {item.pages.length} {t('common.dataSource.notion.selector.pageSelected')} - </div> */} - </div> - </MenuItem> - ) - }) - } - </div> - </MenuItems> - </Transition> - </> - ) - } - </Menu> + <SelectItemText title={displayName}> + {displayName} + </SelectItemText> + <SelectItemIndicator /> + </SelectItem> + ) + })} + </SelectContent> + </Select> ) } -export default React.memo(CredentialSelector) +export default CredentialSelector diff --git a/web/app/components/header/account-setting/members-page/operation/transfer-ownership.tsx b/web/app/components/header/account-setting/members-page/operation/transfer-ownership.tsx index 97a4a5b2f2..e46989643e 100644 --- a/web/app/components/header/account-setting/members-page/operation/transfer-ownership.tsx +++ b/web/app/components/header/account-setting/members-page/operation/transfer-ownership.tsx @@ -1,11 +1,15 @@ 'use client' -import { Menu, MenuButton, MenuItem, MenuItems, Transition } from '@headlessui/react' import { cn } from '@langgenius/dify-ui/cn' +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuTrigger, +} from '@langgenius/dify-ui/dropdown-menu' import { RiArrowDownSLine, } from '@remixicon/react' import { useSuspenseQuery } from '@tanstack/react-query' -import { Fragment } from 'react' import { useTranslation } from 'react-i18next' import Loading from '@/app/components/base/loading' import { useAppContext } from '@/context/app-context' @@ -31,39 +35,29 @@ const TransferOwnership = ({ onOperate }: Props) => { } return ( - <Menu as="div" className="relative h-full w-full"> - { - ({ open }) => ( - <> - <MenuButton className={cn('group flex h-full w-full cursor-pointer items-center justify-between px-3 system-sm-regular text-text-secondary hover:bg-state-base-hover', open && 'bg-state-base-hover')}> - {t('members.owner', { ns: 'common' })} - <RiArrowDownSLine className={cn('h-4 w-4 group-hover:block', open ? 'block' : 'hidden')} /> - </MenuButton> - <Transition - as={Fragment} - enter="transition ease-out duration-100" - enterFrom="transform opacity-0 scale-95" - enterTo="transform opacity-100 scale-100" - leave="transition ease-in duration-75" - leaveFrom="transform opacity-100 scale-100" - leaveTo="transform opacity-0 scale-95" - > - <MenuItems - className={cn('absolute top-[52px] right-0 z-10 origin-top-right rounded-xl border-[0.5px] border-components-panel-border bg-components-panel-bg-blur shadow-lg backdrop-blur-xs')} - > - <div className="p-1"> - <MenuItem> - <div className="flex cursor-pointer rounded-lg px-3 py-2 hover:bg-state-base-hover" onClick={onOperate}> - <div className="system-md-regular whitespace-nowrap text-text-secondary">{t('members.transferOwnership', { ns: 'common' })}</div> - </div> - </MenuItem> - </div> - </MenuItems> - </Transition> - </> - ) - } - </Menu> + <DropdownMenu modal={false}> + <DropdownMenuTrigger + className={cn( + 'group flex h-full w-full cursor-pointer items-center justify-between px-3 system-sm-regular text-text-secondary outline-hidden', + 'hover:bg-state-base-hover focus-visible:bg-state-base-hover focus-visible:ring-1 focus-visible:ring-components-input-border-hover data-popup-open:bg-state-base-hover', + )} + > + {t('members.owner', { ns: 'common' })} + <RiArrowDownSLine className="hidden h-4 w-4 group-hover:block group-data-popup-open:block" /> + </DropdownMenuTrigger> + <DropdownMenuContent + placement="bottom-end" + sideOffset={4} + popupClassName="bg-components-panel-bg-blur p-1 backdrop-blur-xs" + > + <DropdownMenuItem + className="h-auto px-3 py-2" + onClick={onOperate} + > + <span className="system-md-regular whitespace-nowrap text-text-secondary">{t('members.transferOwnership', { ns: 'common' })}</span> + </DropdownMenuItem> + </DropdownMenuContent> + </DropdownMenu> ) } diff --git a/web/app/components/header/account-setting/model-provider-page/provider-added-card/__tests__/priority-selector.spec.tsx b/web/app/components/header/account-setting/model-provider-page/provider-added-card/__tests__/priority-selector.spec.tsx deleted file mode 100644 index d122bf921b..0000000000 --- a/web/app/components/header/account-setting/model-provider-page/provider-added-card/__tests__/priority-selector.spec.tsx +++ /dev/null @@ -1,29 +0,0 @@ -import { fireEvent, render, screen } from '@testing-library/react' -import PrioritySelector from '../priority-selector' - -describe('PrioritySelector', () => { - const mockOnSelect = vi.fn() - - beforeEach(() => { - vi.clearAllMocks() - }) - - it('should render selector button', () => { - render(<PrioritySelector value="system" onSelect={mockOnSelect} />) - expect(screen.getByRole('button')).toBeInTheDocument() - }) - - it('should call onSelect when option clicked', () => { - render(<PrioritySelector value="system" onSelect={mockOnSelect} />) - fireEvent.click(screen.getByRole('button')) - const option = screen.getByText('common.modelProvider.apiKey') - fireEvent.click(option) - expect(mockOnSelect).toHaveBeenCalled() - }) - - it('should display priority use header in popover', () => { - render(<PrioritySelector value="custom" onSelect={mockOnSelect} />) - fireEvent.click(screen.getByRole('button')) - expect(screen.getByText('common.modelProvider.card.priorityUse')).toBeInTheDocument() - }) -}) diff --git a/web/app/components/header/account-setting/model-provider-page/provider-added-card/priority-selector.tsx b/web/app/components/header/account-setting/model-provider-page/provider-added-card/priority-selector.tsx deleted file mode 100644 index a74c400035..0000000000 --- a/web/app/components/header/account-setting/model-provider-page/provider-added-card/priority-selector.tsx +++ /dev/null @@ -1,77 +0,0 @@ -import type { FC } from 'react' -import { Popover, PopoverButton, PopoverPanel, Transition } from '@headlessui/react' -import { Button } from '@langgenius/dify-ui/button' -import { cn } from '@langgenius/dify-ui/cn' -import { - RiCheckLine, - RiMoreFill, -} from '@remixicon/react' -import { Fragment } from 'react' -import { useTranslation } from 'react-i18next' -import { PreferredProviderTypeEnum } from '../declarations' - -type SelectorProps = { - value?: string - onSelect: (key: PreferredProviderTypeEnum) => void -} -const Selector: FC<SelectorProps> = ({ - value, - onSelect, -}) => { - const { t } = useTranslation() - const options = [ - { - key: PreferredProviderTypeEnum.custom, - text: t('modelProvider.apiKey', { ns: 'common' }), - }, - { - key: PreferredProviderTypeEnum.system, - text: t('modelProvider.quota', { ns: 'common' }), - }, - ] - - return ( - <Popover className="relative"> - <PopoverButton as="div"> - { - ({ open }) => ( - <Button className={cn( - 'h-6 w-6 rounded-md px-0', - open && 'bg-components-button-secondary-bg-hover', - )} - > - <RiMoreFill className="h-3 w-3" /> - </Button> - ) - } - </PopoverButton> - <Transition - as={Fragment} - leave="transition ease-in duration-100" - leaveFrom="opacity-100" - leaveTo="opacity-0" - > - <PopoverPanel className="absolute top-7 right-0 z-10 w-[144px] rounded-lg border-[0.5px] border-components-panel-border bg-components-panel-bg shadow-lg"> - <div className="p-1"> - <div className="px-3 pt-2 pb-1 text-sm font-medium text-text-secondary">{t('modelProvider.card.priorityUse', { ns: 'common' })}</div> - { - options.map(option => ( - <PopoverButton as={Fragment} key={option.key}> - <div - className="flex h-9 cursor-pointer items-center justify-between rounded-lg px-3 text-sm text-text-secondary hover:bg-components-panel-on-panel-item-bg-hover" - onClick={() => onSelect(option.key)} - > - <div className="grow">{option.text}</div> - {value === option.key && <RiCheckLine className="h-4 w-4 text-text-accent" />} - </div> - </PopoverButton> - )) - } - </div> - </PopoverPanel> - </Transition> - </Popover> - ) -} - -export default Selector diff --git a/web/app/components/header/app-selector/__tests__/index.spec.tsx b/web/app/components/header/app-selector/__tests__/index.spec.tsx deleted file mode 100644 index 2d255c006e..0000000000 --- a/web/app/components/header/app-selector/__tests__/index.spec.tsx +++ /dev/null @@ -1,172 +0,0 @@ -import type { AppDetailResponse } from '@/models/app' -import { act, fireEvent, render, screen } from '@testing-library/react' -import { vi } from 'vitest' -import { useAppContext } from '@/context/app-context' -import { useRouter } from '@/next/navigation' -import AppSelector from '../index' - -// Mock next/navigation -vi.mock('@/next/navigation', () => ({ - useRouter: vi.fn(), -})) - -// Mock app context -vi.mock('@/context/app-context', () => ({ - useAppContext: vi.fn(), -})) - -// Mock CreateAppDialog to avoid complex dependencies -vi.mock('@/app/components/app/create-app-dialog', () => ({ - default: ({ show, onClose }: { show: boolean, onClose: () => void }) => show - ? ( - <div data-testid="create-app-dialog"> - <button onClick={onClose}>Close</button> - </div> - ) - : null, -})) - -describe('AppSelector Component', () => { - const mockPush = vi.fn() - const mockAppItems = [ - { id: '1', name: 'App 1' }, - { id: '2', name: 'App 2' }, - ] as unknown as AppDetailResponse[] - const mockCurApp = mockAppItems[0]! - - beforeEach(() => { - vi.clearAllMocks() - vi.mocked(useRouter).mockReturnValue({ - push: mockPush, - } as unknown as ReturnType<typeof useRouter>) - vi.mocked(useAppContext).mockReturnValue({ - isCurrentWorkspaceEditor: true, - } as unknown as ReturnType<typeof useAppContext>) - }) - - describe('Rendering', () => { - it('should render current app name', () => { - render(<AppSelector appItems={mockAppItems} curApp={mockCurApp} />) - expect(screen.getByText('App 1'))!.toBeInTheDocument() - }) - }) - - describe('Interactions', () => { - it('should open menu and show app items', async () => { - render(<AppSelector appItems={mockAppItems} curApp={mockCurApp} />) - - const button = screen.getByRole('button', { name: /App 1/i }) - await act(async () => { - fireEvent.click(button) - }) - - expect(screen.getByText('App 2'))!.toBeInTheDocument() - }) - - it('should navigate to configuration when an app is clicked and user is editor', async () => { - render(<AppSelector appItems={mockAppItems} curApp={mockCurApp} />) - - const button = screen.getByRole('button', { name: /App 1/i }) - await act(async () => { - fireEvent.click(button) - }) - - const app2Item = screen.getByText('App 2') - await act(async () => { - fireEvent.click(app2Item) - }) - - expect(mockPush).toHaveBeenCalledWith('/app/2/configuration') - }) - - it('should navigate to overview when an app is clicked and user is not editor', async () => { - vi.mocked(useAppContext).mockReturnValue({ - isCurrentWorkspaceEditor: false, - } as unknown as ReturnType<typeof useAppContext>) - - render(<AppSelector appItems={mockAppItems} curApp={mockCurApp} />) - - const button = screen.getByRole('button', { name: /App 1/i }) - await act(async () => { - fireEvent.click(button) - }) - - const app2Item = screen.getByText('App 2') - await act(async () => { - fireEvent.click(app2Item) - }) - - expect(mockPush).toHaveBeenCalledWith('/app/2/overview') - }) - }) - - describe('New App Dialog', () => { - it('should show "New App" button for editor and open dialog', async () => { - render(<AppSelector appItems={mockAppItems} curApp={mockCurApp} />) - - const button = screen.getByRole('button', { name: /App 1/i }) - await act(async () => { - fireEvent.click(button) - }) - - const newAppBtn = screen.getByText('common.menus.newApp') - await act(async () => { - fireEvent.click(newAppBtn) - }) - - expect(screen.getByTestId('create-app-dialog'))!.toBeInTheDocument() - }) - - it('should not show "New App" button for non-editor', async () => { - vi.mocked(useAppContext).mockReturnValue({ - isCurrentWorkspaceEditor: false, - } as unknown as ReturnType<typeof useAppContext>) - - render(<AppSelector appItems={mockAppItems} curApp={mockCurApp} />) - - const button = screen.getByRole('button', { name: /App 1/i }) - await act(async () => { - fireEvent.click(button) - }) - - expect(screen.queryByText('common.menus.newApp')).not.toBeInTheDocument() - }) - - it('should close dialog when onClose is called', async () => { - render(<AppSelector appItems={mockAppItems} curApp={mockCurApp} />) - - const button = screen.getByRole('button', { name: /App 1/i }) - await act(async () => { - fireEvent.click(button) - }) - - const newAppBtn = screen.getByText('common.menus.newApp') - await act(async () => { - fireEvent.click(newAppBtn) - }) - - const closeBtn = screen.getByText('Close') - await act(async () => { - fireEvent.click(closeBtn) - }) - - expect(screen.queryByTestId('create-app-dialog')).not.toBeInTheDocument() - }) - }) - - describe('Edge Cases', () => { - it('should render nothing in menu if appItems is empty', async () => { - render(<AppSelector appItems={[]} curApp={mockCurApp} />) - - const button = screen.getByRole('button', { name: /App 1/i }) - await act(async () => { - fireEvent.click(button) - }) - - expect(screen.queryByText('App 2')).not.toBeInTheDocument() - // "New App" should still be there if editor - // "New App" should still be there if editor - expect(screen.getByText('common.menus.newApp'))!.toBeInTheDocument() - }) - }) -}) diff --git a/web/app/components/header/app-selector/index.tsx b/web/app/components/header/app-selector/index.tsx deleted file mode 100644 index 52e60de2b4..0000000000 --- a/web/app/components/header/app-selector/index.tsx +++ /dev/null @@ -1,117 +0,0 @@ -'use client' -import type { AppDetailResponse } from '@/models/app' -import { Menu, MenuButton, MenuItem, MenuItems, Transition } from '@headlessui/react' -import { ChevronDownIcon, PlusIcon } from '@heroicons/react/24/solid' -import { noop } from 'es-toolkit/function' -import { Fragment, useState } from 'react' -import { useTranslation } from 'react-i18next' -import CreateAppDialog from '@/app/components/app/create-app-dialog' -import AppIcon from '@/app/components/base/app-icon' -import { useAppContext } from '@/context/app-context' -import { useRouter } from '@/next/navigation' -import Indicator from '../indicator' - -type IAppSelectorProps = { - appItems: AppDetailResponse[] - curApp: AppDetailResponse -} - -export default function AppSelector({ appItems, curApp }: IAppSelectorProps) { - const router = useRouter() - const { isCurrentWorkspaceEditor } = useAppContext() - const [showNewAppDialog, setShowNewAppDialog] = useState(false) - const { t } = useTranslation() - - const itemClassName = ` - flex items-center w-full h-10 px-3 text-gray-700 text-[14px] - rounded-lg font-normal hover:bg-gray-100 cursor-pointer - ` - - return ( - <div className=""> - <Menu as="div" className="relative inline-block text-left"> - <div> - <MenuButton - className=" - inline-flex h-7 w-full items-center justify-center - rounded-[10px] pr-2.5 pl-2 text-[14px] font-semibold - text-[#1C64F2] hover:bg-[#EBF5FF] - " - > - {curApp?.name} - <ChevronDownIcon - className="ml-1 h-3 w-3" - aria-hidden="true" - /> - </MenuButton> - </div> - <Transition - as={Fragment} - enter="transition ease-out duration-100" - enterFrom="transform opacity-0 scale-95" - enterTo="transform opacity-100 scale-100" - leave="transition ease-in duration-75" - leaveFrom="transform opacity-100 scale-100" - leaveTo="transform opacity-0 scale-95" - > - <MenuItems - className=" - absolute right-0 -left-11 mt-1.5 w-60 max-w-80 - origin-top-right divide-y divide-gray-100 rounded-lg bg-white - shadow-lg - " - > - {!!appItems.length && ( - <div className="overflow-auto px-1 py-1" style={{ maxHeight: '50vh' }}> - { - appItems.map((app: AppDetailResponse) => ( - <MenuItem key={app.id}> - <div - className={itemClassName} - onClick={() => - router.push(`/app/${app.id}/${isCurrentWorkspaceEditor ? 'configuration' : 'overview'}`)} - > - <div className="relative mr-2 h-6 w-6 rounded-md bg-[#D5F5F6]"> - <AppIcon size="tiny" /> - <div className="absolute -right-0.5 -bottom-0.5 flex h-2.5 w-2.5 items-center justify-center rounded-sm bg-white"> - <Indicator /> - </div> - </div> - {app.name} - </div> - </MenuItem> - )) - } - </div> - )} - {isCurrentWorkspaceEditor && ( - <MenuItem> - <div className="p-1" onClick={() => setShowNewAppDialog(true)}> - <div - className="flex h-12 cursor-pointer items-center rounded-lg hover:bg-gray-100" - > - <div - className=" - mr-2 ml-4 flex - h-6 w-6 items-center justify-center rounded-md border-[0.5px] - border-dashed border-gray-200 bg-gray-100 - " - > - <PlusIcon className="h-4 w-4 text-gray-500" /> - </div> - <div className="text-[14px] font-normal text-gray-700">{t('menus.newApp', { ns: 'common' })}</div> - </div> - </div> - </MenuItem> - )} - </MenuItems> - </Transition> - </Menu> - <CreateAppDialog - show={showNewAppDialog} - onClose={() => setShowNewAppDialog(false)} - onSuccess={noop} - /> - </div> - ) -} diff --git a/web/app/components/header/nav/__tests__/index.spec.tsx b/web/app/components/header/nav/__tests__/index.spec.tsx index f4a1399638..6f9b448981 100644 --- a/web/app/components/header/nav/__tests__/index.spec.tsx +++ b/web/app/components/header/nav/__tests__/index.spec.tsx @@ -7,6 +7,7 @@ import { screen, waitFor, } from '@testing-library/react' +import userEvent from '@testing-library/user-event' import * as React from 'react' import { use } from 'react' import { vi } from 'vitest' @@ -291,45 +292,24 @@ describe('Nav Component', () => { }) it('should show sub-menu and call onCreate with types when isApp is true', async () => { - render(<Nav {...defaultProps} curNav={curNav} isApp />) - const selectorButton = screen.getByRole('button', { name: /Item 1/i }) - - await act(async () => { - fireEvent.click(selectorButton) - }) - - const openCreateMenu = async () => { - const createButton = await screen.findByText('Create New') - await act(async () => { - fireEvent.click(createButton) - }) - return screen.findByText(/app\.newApp\.startFromBlank/i) + const user = userEvent.setup() + const clickCreateBranch = async (optionName: RegExp) => { + const { unmount } = render(<Nav {...defaultProps} curNav={curNav} isApp />) + await user.click(screen.getByRole('button', { name: /Item 1/i })) + const createButton = await screen.findByRole('menuitem', { name: /Create New/i }) + await user.hover(createButton) + fireEvent.click(await screen.findByRole('menuitem', { name: optionName })) + unmount() } - await openCreateMenu() - const blankOption = await screen.findByText( - /app\.newApp\.startFromBlank/i, - ) - await act(async () => { - fireEvent.click(blankOption) - }) - expect(mockOnCreate).toHaveBeenCalledWith('blank') + await clickCreateBranch(/app\.newApp\.startFromBlank/i) + await clickCreateBranch(/app\.newApp\.startFromTemplate/i) + await clickCreateBranch(/app\.importDSL/i) - await openCreateMenu() - const templateOption = await screen.findByText( - /app\.newApp\.startFromTemplate/i, - ) - await act(async () => { - fireEvent.click(templateOption) - }) - expect(mockOnCreate).toHaveBeenCalledWith('template') - - await openCreateMenu() - const dslOption = await screen.findByText(/app\.importDSL/i) - await act(async () => { - fireEvent.click(dslOption) - }) - expect(mockOnCreate).toHaveBeenCalledWith('dsl') + expect(mockOnCreate).toHaveBeenNthCalledWith(1, 'blank') + expect(mockOnCreate).toHaveBeenNthCalledWith(2, 'template') + expect(mockOnCreate).toHaveBeenNthCalledWith(3, 'dsl') + expect(mockOnCreate).toHaveBeenCalledTimes(3) }) it('should not show create button if NOT an editor', async () => { diff --git a/web/app/components/header/nav/nav-selector/__tests__/index.spec.tsx b/web/app/components/header/nav/nav-selector/__tests__/index.spec.tsx index b1de3ab5e3..32de0691dd 100644 --- a/web/app/components/header/nav/nav-selector/__tests__/index.spec.tsx +++ b/web/app/components/header/nav/nav-selector/__tests__/index.spec.tsx @@ -1,6 +1,7 @@ import type { INavSelectorProps, NavItem } from '../index' import type { AppContextValue } from '@/context/app-context' import { act, fireEvent, render, screen } from '@testing-library/react' +import userEvent from '@testing-library/user-event' import * as React from 'react' import { vi } from 'vitest' import { useStore as useAppStore } from '@/app/components/app/store' @@ -198,6 +199,7 @@ describe('NavSelector Component', () => { }) it('should show extended create menu in app mode', async () => { + const user = userEvent.setup() render(<NavSelector {...defaultProps} isApp />) const button = screen.getByRole('button') await act(async () => { @@ -205,10 +207,10 @@ describe('NavSelector Component', () => { }) const openCreateMenu = async () => { - const createBtn = screen.getByText('Create New') - await act(async () => { - fireEvent.click(createBtn) - }) + if (!screen.queryByRole('menuitem', { name: /Create New/i })) + await user.click(screen.getByRole('button', { name: /Item 1/i })) + const createBtn = await screen.findByRole('menuitem', { name: /Create New/i }) + await user.hover(createBtn) return screen.findByText(/app\.newApp\.startFromBlank/i) } @@ -235,16 +237,15 @@ describe('NavSelector Component', () => { }) it('should open extended create menu on hover in app mode', async () => { + const user = userEvent.setup() render(<NavSelector {...defaultProps} isApp />) const button = screen.getByRole('button') await act(async () => { fireEvent.click(button) }) - const createBtn = screen.getByText('Create New') - await act(async () => { - fireEvent.mouseEnter(createBtn) - }) + const createBtn = await screen.findByRole('menuitem', { name: /Create New/i }) + await user.hover(createBtn) expect(await screen.findByText(/app\.newApp\.startFromBlank/i))!.toBeInTheDocument() }) diff --git a/web/app/components/header/nav/nav-selector/index.tsx b/web/app/components/header/nav/nav-selector/index.tsx index 9b42d30308..5a8c1ae9e3 100644 --- a/web/app/components/header/nav/nav-selector/index.tsx +++ b/web/app/components/header/nav/nav-selector/index.tsx @@ -1,14 +1,21 @@ 'use client' import type { AppIconType, AppModeEnum } from '@/types/app' -import { Menu, MenuButton, MenuItem, MenuItems } from '@headlessui/react' import { cn } from '@langgenius/dify-ui/cn' +import { + DropdownMenu, + DropdownMenuContent, + DropdownMenuItem, + DropdownMenuSub, + DropdownMenuSubContent, + DropdownMenuSubTrigger, + DropdownMenuTrigger, +} from '@langgenius/dify-ui/dropdown-menu' import { RiAddLine, RiArrowDownSLine, - RiArrowRightSLine, } from '@remixicon/react' import { debounce } from 'es-toolkit/compat' -import { useCallback, useState } from 'react' +import { useCallback } from 'react' import { useTranslation } from 'react-i18next' import { useStore as useAppStore } from '@/app/components/app/store' import { AppTypeIcon } from '@/app/components/app/type-selector' @@ -53,57 +60,54 @@ const AppCreateMenu = ({ importDSLText, onCreate, }: AppCreateMenuProps) => { - const [open, setOpen] = useState(false) - const handleCreate = (state: string) => { - setOpen(false) onCreate(state) } return ( - <div className="relative h-full w-full" onMouseLeave={() => setOpen(false)}> - <button - type="button" - className="w-full p-1 text-left" - onClick={() => setOpen(value => !value)} - onMouseEnter={() => setOpen(true)} - > - <div className={cn( - 'flex cursor-pointer items-center gap-2 rounded-lg px-3 py-[6px] hover:bg-state-base-hover', - open && 'bg-state-base-hover!', - )} + <DropdownMenuSub> + <div className="p-1"> + <DropdownMenuSubTrigger + className="h-9 gap-2 px-3 py-[6px]" > <div className="flex h-6 w-6 shrink-0 items-center justify-center rounded-md border-[0.5px] border-divider-regular bg-background-default"> - <RiAddLine className="h-4 w-4 text-text-primary" /> + <span className="i-ri-add-line h-4 w-4 text-text-primary" /> </div> - <div className="grow text-left text-[14px] font-normal text-text-secondary">{createText}</div> - <RiArrowRightSLine className="h-3.5 w-3.5 shrink-0 text-text-primary" /> + <span className="grow text-left text-[14px] font-normal text-text-secondary">{createText}</span> + </DropdownMenuSubTrigger> + </div> + <DropdownMenuSubContent + placement="right-start" + sideOffset={4} + popupClassName="min-w-[200px] bg-components-panel-bg-blur p-0" + > + <div className="p-1"> + <DropdownMenuItem + className="h-9 px-3 py-[6px] font-normal text-text-secondary" + onClick={() => handleCreate('blank')} + > + <FilePlus01 className="mr-2 h-4 w-4 shrink-0 text-text-secondary" /> + {startFromBlankText} + </DropdownMenuItem> + <DropdownMenuItem + className="h-9 px-3 py-[6px] font-normal text-text-secondary" + onClick={() => handleCreate('template')} + > + <FilePlus02 className="mr-2 h-4 w-4 shrink-0 text-text-secondary" /> + {startFromTemplateText} + </DropdownMenuItem> </div> - </button> - {open && ( - <div - className="absolute top-[3px] right-[-198px] z-10 min-w-[200px] rounded-lg border-[0.5px] border-components-panel-border bg-components-panel-bg-blur shadow-lg" - onMouseEnter={() => setOpen(true)} - > - <div className="p-1"> - <button type="button" className={cn('flex w-full cursor-pointer items-center rounded-lg px-3 py-[6px] text-left font-normal text-text-secondary hover:bg-state-base-hover')} onClick={() => handleCreate('blank')}> - <FilePlus01 className="mr-2 h-4 w-4 shrink-0 text-text-secondary" /> - {startFromBlankText} - </button> - <button type="button" className={cn('flex w-full cursor-pointer items-center rounded-lg px-3 py-[6px] text-left font-normal text-text-secondary hover:bg-state-base-hover')} onClick={() => handleCreate('template')}> - <FilePlus02 className="mr-2 h-4 w-4 shrink-0 text-text-secondary" /> - {startFromTemplateText} - </button> - </div> - <div className="border-t border-divider-regular p-1"> - <button type="button" className={cn('flex w-full cursor-pointer items-center rounded-lg px-3 py-[6px] text-left font-normal text-text-secondary hover:bg-state-base-hover')} onClick={() => handleCreate('dsl')}> - <FileArrow01 className="mr-2 h-4 w-4 shrink-0 text-text-secondary" /> - {importDSLText} - </button> - </div> + <div className="border-t border-divider-regular p-1"> + <DropdownMenuItem + className="h-9 px-3 py-[6px] font-normal text-text-secondary" + onClick={() => handleCreate('dsl')} + > + <FileArrow01 className="mr-2 h-4 w-4 shrink-0 text-text-secondary" /> + {importDSLText} + </DropdownMenuItem> </div> - )} - </div> + </DropdownMenuSubContent> + </DropdownMenuSub> ) } @@ -123,94 +127,86 @@ const NavSelector = ({ curNav, navigationItems, createText, isApp, onCreate, onL }, 50), []) return ( - <Menu as="div" className="relative"> - {({ open }) => ( - <> - <MenuButton className={cn( - 'hover:hover:bg-components-main-nav-nav-button-bg-active-hover group inline-flex h-7 w-full items-center justify-center rounded-[10px] pr-2.5 pl-2 text-[14px] font-semibold text-components-main-nav-nav-button-text-active', - open && 'bg-components-main-nav-nav-button-bg-active', - )} - > - <div className="max-w-[157px] truncate" title={curNav?.name}>{curNav?.name}</div> - <RiArrowDownSLine - className={cn('ml-1 h-3 w-3 shrink-0 opacity-50 group-hover:opacity-100', open && 'opacity-100!')} - aria-hidden="true" - /> - </MenuButton> - <MenuItems - className=" - absolute right-0 -left-11 mt-1.5 w-60 max-w-80 - origin-top-right divide-y divide-divider-regular rounded-lg bg-components-panel-bg-blur - shadow-lg outline-hidden - " - > - <div className="overflow-auto px-1 py-1" style={{ maxHeight: '50vh' }} onScroll={handleScroll}> - { - navigationItems.map(nav => ( - <MenuItem key={nav.id}> - <div - className="flex w-full cursor-pointer items-center truncate rounded-lg px-3 py-[6px] text-[14px] font-normal text-text-secondary hover:bg-state-base-hover" - onClick={() => { - if (curNav?.id === nav.id) - return - setAppDetail() - router.push(nav.link) - }} - title={nav.name} - > - <div className="relative mr-2 h-6 w-6 rounded-md"> - <AppIcon - size="tiny" - iconType={nav.icon_type} - icon={nav.icon} - background={nav.icon_background} - imageUrl={nav.icon_url} - /> - {!!nav.mode && ( - <AppTypeIcon type={nav.mode} wrapperClassName="absolute -bottom-0.5 -right-0.5 h-3.5 w-3.5 shadow-sm" className="h-2.5 w-2.5" /> - )} - </div> - <div className="truncate"> - {nav.name} - </div> - </div> - </MenuItem> - )) - } - {isLoadingMore && ( - <div className="flex justify-center py-2"> - <Loading /> - </div> - )} - </div> - {!isApp && isCurrentWorkspaceEditor && ( - <MenuItem as="div" className="w-full p-1"> - <div - onClick={() => onCreate('')} - className={cn( - 'flex cursor-pointer items-center gap-2 rounded-lg px-3 py-[6px] hover:bg-state-base-hover', + <DropdownMenu modal={false}> + <DropdownMenuTrigger + className={cn( + 'hover:hover:bg-components-main-nav-nav-button-bg-active-hover group inline-flex h-7 items-center justify-center rounded-[10px] pr-2.5 pl-2 text-[14px] font-semibold text-components-main-nav-nav-button-text-active outline-hidden', + 'focus-visible:bg-components-main-nav-nav-button-bg-active focus-visible:ring-1 focus-visible:ring-components-input-border-hover data-popup-open:bg-components-main-nav-nav-button-bg-active', + )} + > + <div className="max-w-[157px] truncate" title={curNav?.name}>{curNav?.name}</div> + <RiArrowDownSLine + className="ml-1 h-3 w-3 shrink-0 opacity-50 group-hover:opacity-100 group-data-popup-open:opacity-100" + aria-hidden="true" + /> + </DropdownMenuTrigger> + <DropdownMenuContent + placement="bottom-end" + sideOffset={6} + popupClassName="w-60 max-w-80 divide-y divide-divider-regular bg-components-panel-bg-blur p-0" + > + <div className="max-h-[50vh] overflow-auto px-1 py-1" onScroll={handleScroll}> + { + navigationItems.map(nav => ( + <DropdownMenuItem + key={nav.id} + className="h-auto truncate px-3 py-[6px] text-[14px] font-normal text-text-secondary" + onClick={() => { + if (curNav?.id === nav.id) + return + setAppDetail() + router.push(nav.link) + }} + title={nav.name} + > + <div className="relative mr-2 h-6 w-6 shrink-0 rounded-md"> + <AppIcon + size="tiny" + iconType={nav.icon_type} + icon={nav.icon} + background={nav.icon_background} + imageUrl={nav.icon_url} + /> + {!!nav.mode && ( + <AppTypeIcon type={nav.mode} wrapperClassName="absolute -bottom-0.5 -right-0.5 h-3.5 w-3.5 shadow-sm" className="h-2.5 w-2.5" /> )} - > - <div className="flex h-6 w-6 shrink-0 items-center justify-center rounded-md border-[0.5px] border-divider-regular bg-background-default"> - <RiAddLine className="h-4 w-4 text-text-primary" /> - </div> - <div className="grow text-left text-[14px] font-normal text-text-secondary">{createText}</div> </div> - </MenuItem> - )} - {isApp && isCurrentWorkspaceEditor && ( - <AppCreateMenu - createText={createText} - startFromBlankText={t('newApp.startFromBlank', { ns: 'app' })} - startFromTemplateText={t('newApp.startFromTemplate', { ns: 'app' })} - importDSLText={t('importDSL', { ns: 'app' })} - onCreate={onCreate} - /> - )} - </MenuItems> - </> - )} - </Menu> + <div className="min-w-0 truncate"> + {nav.name} + </div> + </DropdownMenuItem> + )) + } + {isLoadingMore && ( + <div className="flex justify-center py-2"> + <Loading /> + </div> + )} + </div> + {!isApp && isCurrentWorkspaceEditor && ( + <div className="p-1"> + <DropdownMenuItem + className="h-9 gap-2 px-3 py-[6px]" + onClick={() => onCreate('')} + > + <div className="flex h-6 w-6 shrink-0 items-center justify-center rounded-md border-[0.5px] border-divider-regular bg-background-default"> + <RiAddLine className="h-4 w-4 text-text-primary" /> + </div> + <div className="grow text-left text-[14px] font-normal text-text-secondary">{createText}</div> + </DropdownMenuItem> + </div> + )} + {isApp && isCurrentWorkspaceEditor && ( + <AppCreateMenu + createText={createText} + startFromBlankText={t('newApp.startFromBlank', { ns: 'app' })} + startFromTemplateText={t('newApp.startFromTemplate', { ns: 'app' })} + importDSLText={t('importDSL', { ns: 'app' })} + onCreate={onCreate} + /> + )} + </DropdownMenuContent> + </DropdownMenu> ) } diff --git a/web/app/components/workflow/nodes/_base/components/__tests__/form-input-item.branches.spec.tsx b/web/app/components/workflow/nodes/_base/components/__tests__/form-input-item.branches.spec.tsx index 38fa62a728..9ca932e54c 100644 --- a/web/app/components/workflow/nodes/_base/components/__tests__/form-input-item.branches.spec.tsx +++ b/web/app/components/workflow/nodes/_base/components/__tests__/form-input-item.branches.spec.tsx @@ -2,6 +2,7 @@ import type { ComponentProps } from 'react' import type { CredentialFormSchema, FormOption } from '@/app/components/header/account-setting/model-provider-page/declarations' import type { AppSelectorValue } from '@/app/components/plugins/plugin-detail-panel/app-selector' import { fireEvent, screen, waitFor } from '@testing-library/react' +import userEvent from '@testing-library/user-event' import { FormTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations' import { PluginCategoryEnum } from '@/app/components/plugins/types' import { renderWorkflowFlowComponent } from '@/app/components/workflow/__tests__/workflow-test-env' @@ -207,7 +208,8 @@ describe('FormInputItem branches', () => { }) }) - it('should render static multi-select values and update selected labels', () => { + it('should render static multi-select values and update selected labels', async () => { + const user = userEvent.setup() const { onChange } = renderFormInputItem({ schema: createSchema({ multiple: true, @@ -226,8 +228,8 @@ describe('FormInputItem branches', () => { }) expect(screen.getByText('alpha')).toBeInTheDocument() - fireEvent.click(screen.getByText('alpha').closest('button') as HTMLButtonElement) - fireEvent.click(screen.getByText('beta')) + await user.click(screen.getByRole('combobox', { name: 'alpha' })) + await user.click(await screen.findByRole('option', { name: 'beta' })) expect(onChange).toHaveBeenCalledWith({ field: { diff --git a/web/app/components/workflow/nodes/_base/components/__tests__/form-input-item.sections.spec.tsx b/web/app/components/workflow/nodes/_base/components/__tests__/form-input-item.sections.spec.tsx index 9a98e60483..34382f2be0 100644 --- a/web/app/components/workflow/nodes/_base/components/__tests__/form-input-item.sections.spec.tsx +++ b/web/app/components/workflow/nodes/_base/components/__tests__/form-input-item.sections.spec.tsx @@ -1,4 +1,5 @@ -import { fireEvent, screen } from '@testing-library/react' +import { screen } from '@testing-library/react' +import userEvent from '@testing-library/user-event' import { renderWorkflowComponent } from '@/app/components/workflow/__tests__/workflow-test-env' import { JsonEditorField, @@ -18,7 +19,7 @@ describe('form-input-item sections', () => { />, ) - expect(screen.getByText('Loading...')).toBeInTheDocument() + expect(screen.getByRole('combobox', { name: 'Options' })).toHaveTextContent('Loading') }) it('should render the shared json editor section', () => { @@ -33,7 +34,8 @@ describe('form-input-item sections', () => { expect(screen.getByText('JSON')).toBeInTheDocument() }) - it('should render placeholder, icons, and select multi-select options', () => { + it('should render placeholder, icons, and select multi-select options', async () => { + const user = userEvent.setup() const onChange = vi.fn() renderWorkflowComponent( @@ -51,8 +53,8 @@ describe('form-input-item sections', () => { ) expect(screen.getByText('Choose options')).toBeInTheDocument() - fireEvent.click(screen.getByRole('button')) - fireEvent.click(screen.getByText('Alpha')) + await user.click(screen.getByRole('combobox', { name: 'Choose options' })) + await user.click(await screen.findByRole('option', { name: 'Alpha' })) expect(document.querySelector('img[src="/alpha.svg"]')).toBeInTheDocument() expect(onChange).toHaveBeenCalled() diff --git a/web/app/components/workflow/nodes/_base/components/form-input-item.sections.tsx b/web/app/components/workflow/nodes/_base/components/form-input-item.sections.tsx index 84cfb4629d..896250508c 100644 --- a/web/app/components/workflow/nodes/_base/components/form-input-item.sections.tsx +++ b/web/app/components/workflow/nodes/_base/components/form-input-item.sections.tsx @@ -2,10 +2,16 @@ import type { FC, ReactElement } from 'react' import type { SelectItem } from './form-input-item.helpers' -import { Listbox, ListboxButton, ListboxOption, ListboxOptions } from '@headlessui/react' -import { ChevronDownIcon } from '@heroicons/react/20/solid' import { cn } from '@langgenius/dify-ui/cn' -import { RiCheckLine, RiLoader4Line } from '@remixicon/react' +import { + SelectItem as DifySelectItem, + Select, + SelectContent, + SelectItemIndicator, + SelectItemText, + SelectTrigger, +} from '@langgenius/dify-ui/select' +import { RiLoader4Line } from '@remixicon/react' import CodeEditor from '@/app/components/workflow/nodes/_base/components/editor/code-editor' import { CodeLanguage } from '@/app/components/workflow/nodes/code/types' @@ -20,20 +26,7 @@ type MultiSelectFieldProps = { } const LoadingIndicator = () => ( - <RiLoader4Line className="h-3.5 w-3.5 animate-spin text-text-secondary" /> -) - -const ToggleIndicator = () => ( - <ChevronDownIcon - className="h-4 w-4 text-text-quaternary group-hover/simple-select:text-text-secondary" - aria-hidden="true" - /> -) - -const SelectedMark = () => ( - <span className="absolute inset-y-0 right-0 flex items-center pr-2 text-text-accent"> - <RiCheckLine className="h-4 w-4" aria-hidden="true" /> - </span> + <RiLoader4Line className="mr-1 h-3.5 w-3.5 shrink-0 animate-spin text-text-secondary motion-reduce:animate-none" aria-hidden="true" /> ) export const MultiSelectField: FC<MultiSelectFieldProps> = ({ @@ -56,48 +49,44 @@ export const MultiSelectField: FC<MultiSelectFieldProps> = ({ const renderLabel = () => { if (isLoading) - return 'Loading...' + return 'Loading…' return selectedLabel || placeholder || 'Select options' } return ( - <Listbox multiple value={value} onChange={onChange} disabled={disabled}> - <div className="group/simple-select relative h-8 grow"> - <ListboxButton className="flex h-full w-full cursor-pointer items-center rounded-lg border-0 bg-components-input-bg-normal pr-10 pl-3 group-hover/simple-select:bg-state-base-hover-alt focus-visible:bg-state-base-hover-alt focus-visible:outline-hidden sm:text-sm sm:leading-6"> - <span className={textClassName}> + <Select multiple value={value} onValueChange={onChange} disabled={disabled || isLoading}> + <div className="grow"> + <SelectTrigger aria-label={placeholder || selectedLabel || 'Options'}> + <span className={cn('flex min-w-0 items-center', textClassName)}> + {isLoading && <LoadingIndicator />} {renderLabel()} </span> - <span className="absolute inset-y-0 right-0 flex items-center pr-2"> - {isLoading ? <LoadingIndicator /> : <ToggleIndicator />} - </span> - </ListboxButton> - <ListboxOptions className="absolute z-10 mt-1 max-h-60 w-full overflow-auto rounded-xl border-[0.5px] border-components-panel-border bg-components-panel-bg-blur px-1 py-1 text-base shadow-lg backdrop-blur-xs focus:outline-hidden sm:text-sm"> + </SelectTrigger> + <SelectContent + popupClassName="w-(--anchor-width) bg-components-panel-bg-blur backdrop-blur-xs" + listClassName="max-h-60" + > {items.map(item => ( - <ListboxOption + <DifySelectItem key={item.value} value={item.value} - className={({ focus }) => - cn('relative cursor-pointer rounded-lg py-2 pr-9 pl-3 text-text-secondary select-none hover:bg-state-base-hover', focus && 'bg-state-base-hover')} + className="h-auto py-2 pr-9 pl-3" > - {({ selected }) => ( - <> - <div className="flex items-center"> - {item.icon && ( - <img src={item.icon} alt="" className="mr-2 h-4 w-4" /> - )} - <span className={cn('block truncate', selected && 'font-normal')}> - {item.name} - </span> - </div> - {selected && <SelectedMark />} - </> - )} - </ListboxOption> + <div className="flex min-w-0 items-center"> + {item.icon && ( + <img src={item.icon} alt="" width={16} height={16} className="mr-2 h-4 w-4 shrink-0" /> + )} + <SelectItemText> + {item.name} + </SelectItemText> + </div> + <SelectItemIndicator /> + </DifySelectItem> ))} - </ListboxOptions> + </SelectContent> </div> - </Listbox> + </Select> ) } From f3eb3ab4dd770461ff639bd1f598d27ac50bd4bc Mon Sep 17 00:00:00 2001 From: Joel <iamjoel007@gmail.com> Date: Sat, 9 May 2026 15:01:35 +0800 Subject: [PATCH 11/13] fix: mismatched button label in prefilled WebApp launch description (#35964) --- web/i18n/ja-JP/app-overview.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/i18n/ja-JP/app-overview.json b/web/i18n/ja-JP/app-overview.json index 745f02273c..e7c59e926a 100644 --- a/web/i18n/ja-JP/app-overview.json +++ b/web/i18n/ja-JP/app-overview.json @@ -106,7 +106,7 @@ "overview.appInfo.settings.workflow.subTitle": "ワークフローの詳細", "overview.appInfo.settings.workflow.title": "ワークフローステップ", "overview.appInfo.title": "Web App", - "overview.appInfo.workflowLaunchHiddenInputs.description": "非表示フィールドに値を入力後、<bold>起動</bold>をクリックすると、事前入力された値が適用された WebApp が開きます。", + "overview.appInfo.workflowLaunchHiddenInputs.description": "非表示フィールドに値を入力後、<bold>公開</bold>をクリックすると、事前入力された値が適用された WebApp が開きます。", "overview.appInfo.workflowLaunchHiddenInputs.title": "非表示フィールドを事前入力", "overview.disableTooltip.triggerMode": "トリガーノードモードでは{{feature}}機能を使用できません。", "overview.status.disable": "無効", From 19476109da30ddea47c835d4f1bd2bb7c0a8796e Mon Sep 17 00:00:00 2001 From: -LAN- <laipz8200@outlook.com> Date: Sat, 9 May 2026 15:30:03 +0800 Subject: [PATCH 12/13] chore(api): upgrade graphon to v0.3.0 (#35469) Signed-off-by: -LAN- <laipz8200@outlook.com> Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: WH-2099 <wh2099@pm.me> --- api/core/app/llm/__init__.py | 14 +- api/core/app/llm/quota.py | 181 +++-- api/core/app/workflow/layers/llm_quota.py | 146 +++-- api/core/entities/provider_configuration.py | 28 +- api/core/helper/moderation.py | 8 +- api/core/plugin/impl/model_runtime.py | 184 +++++- api/core/plugin/impl/model_runtime_factory.py | 48 +- api/core/provider_manager.py | 6 +- api/core/workflow/node_factory.py | 8 +- api/core/workflow/nodes/agent/agent_node.py | 4 +- .../nodes/datasource/datasource_node.py | 4 +- .../knowledge_index/knowledge_index_node.py | 4 +- .../knowledge_retrieval_node.py | 4 +- api/core/workflow/system_variables.py | 24 +- api/core/workflow/workflow_entry.py | 11 +- .../update_provider_when_message_created.py | 28 +- api/pyproject.toml | 2 +- api/services/credit_pool_service.py | 86 ++- .../workflow_draft_variable_service.py | 4 +- api/services/workflow_service.py | 2 +- .../test_datasource_node_integration.py | 2 +- .../workflow/nodes/__mock/model.py | 7 +- .../workflow/nodes/test_code.py | 4 +- .../workflow/nodes/test_http.py | 10 +- .../workflow/nodes/test_llm.py | 4 +- .../nodes/test_parameter_extractor.py | 4 +- .../workflow/nodes/test_template_transform.py | 4 +- .../workflow/nodes/test_tool.py | 4 +- .../layers/test_pause_state_persist_layer.py | 4 +- .../test_human_input_resume_node_execution.py | 8 +- .../services/test_credit_pool_service.py | 24 +- .../test_generate_task_pipeline_core.py | 16 +- .../core/app/apps/test_pause_resume.py | 6 +- .../app/apps/test_workflow_app_runner_core.py | 12 +- .../test_workflow_app_runner_single_node.py | 2 +- .../test_generate_task_pipeline_core.py | 20 +- .../app/layers/test_trigger_post_layer.py | 12 +- .../unit_tests/core/app/test_llm_quota.py | 617 ++++++++++++++++++ .../core/app/workflow/test_node_factory.py | 4 +- .../app/workflow/test_persistence_layer.py | 5 +- .../test_entities_provider_configuration.py | 80 ++- .../unit_tests/core/helper/test_moderation.py | 14 +- .../test_model_provider_factory.py | 83 +-- .../plugin/impl/test_model_runtime_factory.py | 2 +- .../core/plugin/test_model_runtime_adapter.py | 191 +++++- .../unit_tests/core/test_provider_manager.py | 6 +- .../graph_engine/layers/test_llm_quota.py | 332 +++++++--- .../graph_engine/test_mock_factory.py | 8 +- .../workflow/graph_engine/test_mock_nodes.py | 4 +- .../test_parallel_human_input_join_resume.py | 10 +- .../core/workflow/nodes/answer/test_answer.py | 137 +--- .../nodes/datasource/test_datasource_node.py | 2 +- .../test_http_request_executor.py | 30 +- .../http_request/test_http_request_node.py | 7 +- .../nodes/human_input/test_entities.py | 18 +- .../test_human_input_form_filled_event.py | 14 +- .../test_iteration_child_engine_errors.py | 4 +- .../test_knowledge_index_node.py | 4 +- .../test_knowledge_retrieval_node.py | 28 +- .../workflow/nodes/list_operator/node_spec.py | 38 +- .../core/workflow/nodes/llm/test_node.py | 13 +- .../template_transform_node_spec.py | 2 +- .../test_template_transform_node.py | 2 +- .../core/workflow/nodes/test_base_node.py | 16 +- .../nodes/test_document_extractor_node.py | 72 +- .../core/workflow/nodes/test_if_else.py | 15 +- .../core/workflow/nodes/test_list_operator.py | 2 +- .../nodes/test_start_node_json_object.py | 4 +- .../workflow/nodes/tool/test_tool_node.py | 4 +- .../trigger_plugin/test_trigger_event_node.py | 2 +- .../webhook/test_webhook_file_conversion.py | 2 +- .../nodes/webhook/test_webhook_node.py | 2 +- .../core/workflow/test_node_factory.py | 85 ++- .../core/workflow/test_variable_pool.py | 4 +- .../core/workflow/test_workflow_entry.py | 22 +- .../workflow/test_workflow_entry_helpers.py | 77 ++- ...st_update_provider_when_message_created.py | 130 ++++ .../services/test_credit_pool_service.py | 158 +++++ .../services/test_workflow_service.py | 2 +- api/uv.lock | 8 +- 80 files changed, 2526 insertions(+), 673 deletions(-) create mode 100644 api/tests/unit_tests/core/app/test_llm_quota.py create mode 100644 api/tests/unit_tests/events/test_update_provider_when_message_created.py create mode 100644 api/tests/unit_tests/services/test_credit_pool_service.py diff --git a/api/core/app/llm/__init__.py b/api/core/app/llm/__init__.py index f069bede74..d20a5b2344 100644 --- a/api/core/app/llm/__init__.py +++ b/api/core/app/llm/__init__.py @@ -1,5 +1,15 @@ """LLM-related application services.""" -from .quota import deduct_llm_quota, ensure_llm_quota_available +from .quota import ( + deduct_llm_quota, + deduct_llm_quota_for_model, + ensure_llm_quota_available, + ensure_llm_quota_available_for_model, +) -__all__ = ["deduct_llm_quota", "ensure_llm_quota_available"] +__all__ = [ + "deduct_llm_quota", + "deduct_llm_quota_for_model", + "ensure_llm_quota_available", + "ensure_llm_quota_available_for_model", +] diff --git a/api/core/app/llm/quota.py b/api/core/app/llm/quota.py index b6039e1e4e..5bf3334a7b 100644 --- a/api/core/app/llm/quota.py +++ b/api/core/app/llm/quota.py @@ -1,4 +1,14 @@ -from sqlalchemy import update +"""Tenant-scoped helpers for checking and deducting LLM provider quota. + +System-hosted quota accounting is currently defined only for LLM models. Keep +the public helpers LLM-specific so callers do not carry unused model-type +plumbing, and fail loudly if the deprecated ``ModelInstance`` wrappers are used +with a non-LLM model. +""" + +import warnings + +from sqlalchemy import select from sqlalchemy.orm import sessionmaker from configs import dify_config @@ -6,44 +16,47 @@ from core.entities.model_entities import ModelStatus from core.entities.provider_entities import ProviderQuotaType, QuotaUnit from core.errors.error import QuotaExceededError from core.model_manager import ModelInstance +from core.plugin.impl.model_runtime_factory import create_plugin_provider_manager from extensions.ext_database import db from graphon.model_runtime.entities.llm_entities import LLMUsage +from graphon.model_runtime.entities.model_entities import ModelType from libs.datetime_utils import naive_utc_now from models.provider import Provider, ProviderType from models.provider_ids import ModelProviderID -def ensure_llm_quota_available(*, model_instance: ModelInstance) -> None: - provider_model_bundle = model_instance.provider_model_bundle - provider_configuration = provider_model_bundle.configuration +def _get_provider_configuration(*, tenant_id: str, provider: str): + """Resolve the tenant-bound provider configuration for quota decisions.""" + provider_manager = create_plugin_provider_manager(tenant_id=tenant_id) + provider_configuration = provider_manager.get_configurations(tenant_id).get(provider) + if provider_configuration is None: + raise ValueError(f"Provider {provider} does not exist.") + return provider_configuration + +def ensure_llm_quota_available_for_model(*, tenant_id: str, provider: str, model: str) -> None: + """Raise when a tenant-bound LLM model is already out of quota.""" + provider_configuration = _get_provider_configuration(tenant_id=tenant_id, provider=provider) if provider_configuration.using_provider_type != ProviderType.SYSTEM: return provider_model = provider_configuration.get_provider_model( - model_type=model_instance.model_type_instance.model_type, - model=model_instance.model_name, + model_type=ModelType.LLM, + model=model, ) if provider_model and provider_model.status == ModelStatus.QUOTA_EXCEEDED: - raise QuotaExceededError(f"Model provider {model_instance.provider} quota exceeded.") + raise QuotaExceededError(f"Model provider {provider} quota exceeded.") -def deduct_llm_quota(*, tenant_id: str, model_instance: ModelInstance, usage: LLMUsage) -> None: - provider_model_bundle = model_instance.provider_model_bundle - provider_configuration = provider_model_bundle.configuration - - if provider_configuration.using_provider_type != ProviderType.SYSTEM: - return - - system_configuration = provider_configuration.system_configuration - +def _resolve_llm_used_quota(*, system_configuration, model: str, usage: LLMUsage) -> int | None: + """Compute the quota impact for an LLM invocation under the current quota mode.""" quota_unit = None for quota_configuration in system_configuration.quota_configurations: if quota_configuration.quota_type == system_configuration.current_quota_type: quota_unit = quota_configuration.quota_unit if quota_configuration.quota_limit == -1: - return + return None break @@ -52,42 +65,136 @@ def deduct_llm_quota(*, tenant_id: str, model_instance: ModelInstance, usage: LL if quota_unit == QuotaUnit.TOKENS: used_quota = usage.total_tokens elif quota_unit == QuotaUnit.CREDITS: - used_quota = dify_config.get_model_credits(model_instance.model_name) + used_quota = dify_config.get_model_credits(model) else: used_quota = 1 + return used_quota + + +def _deduct_free_llm_quota( + *, + tenant_id: str, + provider: str, + quota_type: ProviderQuotaType, + used_quota: int, +) -> None: + """Deduct FREE provider quota, capping at the limit before reporting exhaustion.""" + quota_exceeded = False + with sessionmaker(bind=db.engine).begin() as session: + provider_record = session.scalar( + select(Provider) + .where( + Provider.tenant_id == tenant_id, + # TODO: Use provider name with prefix after the data migration. + Provider.provider_name == ModelProviderID(provider).provider_name, + Provider.provider_type == ProviderType.SYSTEM.value, + Provider.quota_type == quota_type, + ) + .with_for_update() + ) + if ( + provider_record is None + or provider_record.quota_limit is None + or provider_record.quota_used is None + or provider_record.quota_limit <= provider_record.quota_used + ): + quota_exceeded = True + else: + available_quota = provider_record.quota_limit - provider_record.quota_used + deducted_quota = min(used_quota, available_quota) + provider_record.quota_used += deducted_quota + provider_record.last_used = naive_utc_now() + quota_exceeded = deducted_quota < used_quota + + if quota_exceeded: + raise QuotaExceededError(f"Model provider {provider} quota exceeded.") + + +def _deduct_used_llm_quota(*, tenant_id: str, provider: str, provider_configuration, used_quota: int | None) -> None: + """Apply a resolved LLM quota charge against the current provider quota bucket.""" + if provider_configuration.using_provider_type != ProviderType.SYSTEM: + return + + system_configuration = provider_configuration.system_configuration if used_quota is not None and system_configuration.current_quota_type is not None: match system_configuration.current_quota_type: case ProviderQuotaType.TRIAL: from services.credit_pool_service import CreditPoolService - CreditPoolService.check_and_deduct_credits( + CreditPoolService.deduct_credits_capped( tenant_id=tenant_id, credits_required=used_quota, ) case ProviderQuotaType.PAID: from services.credit_pool_service import CreditPoolService - CreditPoolService.check_and_deduct_credits( + CreditPoolService.deduct_credits_capped( tenant_id=tenant_id, credits_required=used_quota, pool_type="paid", ) case ProviderQuotaType.FREE: - with sessionmaker(bind=db.engine).begin() as session: - stmt = ( - update(Provider) - .where( - Provider.tenant_id == tenant_id, - # TODO: Use provider name with prefix after the data migration. - Provider.provider_name == ModelProviderID(model_instance.provider).provider_name, - Provider.provider_type == ProviderType.SYSTEM.value, - Provider.quota_type == system_configuration.current_quota_type, - Provider.quota_limit > Provider.quota_used, - ) - .values( - quota_used=Provider.quota_used + used_quota, - last_used=naive_utc_now(), - ) - ) - session.execute(stmt) + _deduct_free_llm_quota( + tenant_id=tenant_id, + provider=provider, + quota_type=system_configuration.current_quota_type, + used_quota=used_quota, + ) + case _: + return + + +def deduct_llm_quota_for_model(*, tenant_id: str, provider: str, model: str, usage: LLMUsage) -> None: + """Deduct tenant-bound quota for the resolved LLM model identity.""" + provider_configuration = _get_provider_configuration(tenant_id=tenant_id, provider=provider) + used_quota = _resolve_llm_used_quota( + system_configuration=provider_configuration.system_configuration, + model=model, + usage=usage, + ) + _deduct_used_llm_quota( + tenant_id=tenant_id, + provider=provider, + provider_configuration=provider_configuration, + used_quota=used_quota, + ) + + +def _require_llm_model_instance(model_instance: ModelInstance) -> None: + """Reject deprecated wrapper calls that pass a non-LLM model instance.""" + if model_instance.model_type_instance.model_type != ModelType.LLM: + raise ValueError("LLM quota helpers only support LLM model instances.") + + +def ensure_llm_quota_available(*, model_instance: ModelInstance) -> None: + """Deprecated compatibility wrapper for callers that still pass ModelInstance.""" + warnings.warn( + "ensure_llm_quota_available(model_instance=...) is deprecated; " + "use ensure_llm_quota_available_for_model(...) instead.", + DeprecationWarning, + stacklevel=2, + ) + _require_llm_model_instance(model_instance) + ensure_llm_quota_available_for_model( + tenant_id=model_instance.provider_model_bundle.configuration.tenant_id, + provider=model_instance.provider, + model=model_instance.model_name, + ) + + +def deduct_llm_quota(*, tenant_id: str, model_instance: ModelInstance, usage: LLMUsage) -> None: + """Deprecated compatibility wrapper for callers that still pass ModelInstance.""" + warnings.warn( + "deduct_llm_quota(tenant_id=..., model_instance=..., usage=...) is deprecated; " + "use deduct_llm_quota_for_model(...) instead.", + DeprecationWarning, + stacklevel=2, + ) + _require_llm_model_instance(model_instance) + deduct_llm_quota_for_model( + tenant_id=tenant_id, + provider=model_instance.provider, + model=model_instance.model_name, + usage=usage, + ) diff --git a/api/core/app/workflow/layers/llm_quota.py b/api/core/app/workflow/layers/llm_quota.py index 4a7918032e..2422eed5a7 100644 --- a/api/core/app/workflow/layers/llm_quota.py +++ b/api/core/app/workflow/layers/llm_quota.py @@ -1,36 +1,48 @@ """ LLM quota deduction layer for GraphEngine. -This layer centralizes model-quota deduction outside node implementations. +This layer centralizes model-quota handling outside node implementations. + +Graphon LLM-backed nodes expose provider/model identity through public node +configuration and, after execution, through ``node_run_result.inputs``. Resolve +quota billing from that public identity instead of depending on +``ModelInstance`` reconstruction inside the workflow layer. Missing identity on +quota-tracked nodes is treated as a workflow bug and aborts execution so quota +handling is never silently skipped. """ import logging -from typing import TYPE_CHECKING, cast, final, override +from typing import final, override -from core.app.entities.app_invoke_entities import DIFY_RUN_CONTEXT_KEY, DifyRunContext -from core.app.llm import deduct_llm_quota, ensure_llm_quota_available +from core.app.llm import deduct_llm_quota_for_model, ensure_llm_quota_available_for_model from core.errors.error import QuotaExceededError -from core.model_manager import ModelInstance -from graphon.enums import BuiltinNodeTypes +from graphon.enums import BuiltinNodeTypes, WorkflowNodeExecutionStatus from graphon.graph_engine.entities.commands import AbortCommand, CommandType from graphon.graph_engine.layers import GraphEngineLayer from graphon.graph_events import GraphEngineEvent, GraphNodeEventBase, NodeRunSucceededEvent +from graphon.node_events import NodeRunResult from graphon.nodes.base.node import Node -if TYPE_CHECKING: - from graphon.nodes.llm.node import LLMNode - from graphon.nodes.parameter_extractor.parameter_extractor_node import ParameterExtractorNode - from graphon.nodes.question_classifier.question_classifier_node import QuestionClassifierNode - logger = logging.getLogger(__name__) +_QUOTA_NODE_TYPES = frozenset( + [ + BuiltinNodeTypes.LLM, + BuiltinNodeTypes.PARAMETER_EXTRACTOR, + BuiltinNodeTypes.QUESTION_CLASSIFIER, + ] +) @final class LLMQuotaLayer(GraphEngineLayer): - """Graph layer that applies LLM quota deduction after node execution.""" + """Graph layer that applies tenant-scoped quota checks to LLM-backed nodes.""" - def __init__(self) -> None: + tenant_id: str + _abort_sent: bool + + def __init__(self, tenant_id: str) -> None: super().__init__() + self.tenant_id = tenant_id self._abort_sent = False @override @@ -50,33 +62,49 @@ class LLMQuotaLayer(GraphEngineLayer): if self._abort_sent: return - model_instance = self._extract_model_instance(node) - if model_instance is None: + if not self._supports_quota(node): return + model_identity = self._extract_model_identity_from_node(node) + if model_identity is None: + reason = "LLM quota check requires public node model identity before execution." + self._abort_before_node_run(node=node, reason=reason, error_type="LLMQuotaIdentityError") + logger.error("LLM quota handling aborted, node_id=%s, reason=%s", node.id, reason) + return + + provider, model_name = model_identity try: - ensure_llm_quota_available(model_instance=model_instance) + ensure_llm_quota_available_for_model( + tenant_id=self.tenant_id, + provider=provider, + model=model_name, + ) except QuotaExceededError as exc: - self._set_stop_event(node) - self._send_abort_command(reason=str(exc)) + self._abort_before_node_run(node=node, reason=str(exc), error_type=QuotaExceededError.__name__) logger.warning("LLM quota check failed, node_id=%s, error=%s", node.id, exc) @override def on_node_run_end( self, node: Node, error: Exception | None, result_event: GraphNodeEventBase | None = None ) -> None: - if error is not None or not isinstance(result_event, NodeRunSucceededEvent): + if error is not None or not isinstance(result_event, NodeRunSucceededEvent) or not self._supports_quota(node): return - model_instance = self._extract_model_instance(node) - if model_instance is None: + model_identity = self._extract_model_identity_from_result_event(result_event) + if model_identity is None: + self._abort_for_missing_model_identity( + node=node, + reason="LLM quota deduction requires model identity in the node result event.", + ) return + provider, model_name = model_identity + try: - dify_ctx = DifyRunContext.model_validate(node.require_run_context_value(DIFY_RUN_CONTEXT_KEY)) - deduct_llm_quota( - tenant_id=dify_ctx.tenant_id, - model_instance=model_instance, + deduct_llm_quota_for_model( + tenant_id=self.tenant_id, + provider=provider, + model=model_name, usage=result_event.node_run_result.llm_usage, ) except QuotaExceededError as exc: @@ -92,6 +120,27 @@ class LLMQuotaLayer(GraphEngineLayer): if stop_event is not None: stop_event.set() + def _abort_before_node_run(self, *, node: Node, reason: str, error_type: str) -> None: + self._set_stop_event(node) + node.node_data.error_strategy = None + node.node_data.retry_config.retry_enabled = False + + def quota_aborted_run() -> NodeRunResult: + return NodeRunResult( + status=WorkflowNodeExecutionStatus.FAILED, + error=reason, + error_type=error_type, + ) + + # TODO: Push Graphon to expose a public pre-run failure/skip hook, then replace this private _run override. + node._run = quota_aborted_run # type: ignore[method-assign] + self._send_abort_command(reason=reason) + + def _abort_for_missing_model_identity(self, *, node: Node, reason: str) -> None: + self._set_stop_event(node) + self._send_abort_command(reason=reason) + logger.error("LLM quota handling aborted, node_id=%s, reason=%s", node.id, reason) + def _send_abort_command(self, *, reason: str) -> None: if not self.command_channel or self._abort_sent: return @@ -108,29 +157,38 @@ class LLMQuotaLayer(GraphEngineLayer): logger.exception("Failed to send quota abort command") @staticmethod - def _extract_model_instance(node: Node) -> ModelInstance | None: - try: - match node.node_type: - case BuiltinNodeTypes.LLM: - model_instance = cast("LLMNode", node).model_instance - case BuiltinNodeTypes.PARAMETER_EXTRACTOR: - model_instance = cast("ParameterExtractorNode", node).model_instance - case BuiltinNodeTypes.QUESTION_CLASSIFIER: - model_instance = cast("QuestionClassifierNode", node).model_instance - case _: - return None - except AttributeError: + def _supports_quota(node: Node) -> bool: + return node.node_type in _QUOTA_NODE_TYPES + + @staticmethod + def _extract_model_identity_from_result_event(result_event: NodeRunSucceededEvent) -> tuple[str, str] | None: + provider = result_event.node_run_result.inputs.get("model_provider") + model_name = result_event.node_run_result.inputs.get("model_name") + if isinstance(provider, str) and provider and isinstance(model_name, str) and model_name: + return provider, model_name + return None + + @staticmethod + def _extract_model_identity_from_node(node: Node) -> tuple[str, str] | None: + node_data = getattr(node, "node_data", None) + if node_data is None: + node_data = getattr(node, "data", None) + + model_config = getattr(node_data, "model", None) + if model_config is None: logger.warning( - "LLMQuotaLayer skipped quota deduction because node does not expose a model instance, node_id=%s", + "LLMQuotaLayer skipped quota handling because node model config is missing, node_id=%s", node.id, ) return None - if isinstance(model_instance, ModelInstance): - return model_instance - - raw_model_instance = getattr(model_instance, "_model_instance", None) - if isinstance(raw_model_instance, ModelInstance): - return raw_model_instance + provider = getattr(model_config, "provider", None) + model_name = getattr(model_config, "name", None) + if isinstance(provider, str) and provider and isinstance(model_name, str) and model_name: + return provider, model_name + logger.warning( + "LLMQuotaLayer skipped quota handling because node model identity is invalid, node_id=%s", + node.id, + ) return None diff --git a/api/core/entities/provider_configuration.py b/api/core/entities/provider_configuration.py index 38b87e2cd1..495fd1d898 100644 --- a/api/core/entities/provider_configuration.py +++ b/api/core/entities/provider_configuration.py @@ -23,7 +23,7 @@ from core.entities.provider_entities import ( ) from core.helper import encrypter from core.helper.model_provider_cache import ProviderCredentialsCache, ProviderCredentialsCacheType -from core.plugin.impl.model_runtime_factory import create_plugin_model_provider_factory +from core.plugin.impl.model_runtime_factory import create_model_type_instance, create_plugin_model_assembly from graphon.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType from graphon.model_runtime.entities.provider_entities import ( ConfigurateMethod, @@ -33,7 +33,7 @@ from graphon.model_runtime.entities.provider_entities import ( ) from graphon.model_runtime.model_providers.base.ai_model import AIModel from graphon.model_runtime.model_providers.model_provider_factory import ModelProviderFactory -from graphon.model_runtime.runtime import ModelRuntime +from graphon.model_runtime.protocols.runtime import ModelRuntime from libs.datetime_utils import naive_utc_now from models.engine import db from models.enums import CredentialSourceType @@ -106,11 +106,18 @@ class ProviderConfiguration(BaseModel): """Attach the already-composed runtime for request-bound call chains.""" self._bound_model_runtime = model_runtime + def _get_runtime_and_provider_factory(self) -> tuple[ModelRuntime, ModelProviderFactory]: + """Resolve a provider factory that stays aligned with the runtime used by the caller.""" + if self._bound_model_runtime is not None: + return self._bound_model_runtime, ModelProviderFactory(runtime=self._bound_model_runtime) + + model_assembly = create_plugin_model_assembly(tenant_id=self.tenant_id) + return model_assembly.model_runtime, model_assembly.model_provider_factory + def get_model_provider_factory(self) -> ModelProviderFactory: """Return a provider factory that preserves any request-bound runtime.""" - if self._bound_model_runtime is not None: - return ModelProviderFactory(model_runtime=self._bound_model_runtime) - return create_plugin_model_provider_factory(tenant_id=self.tenant_id) + _, model_provider_factory = self._get_runtime_and_provider_factory() + return model_provider_factory def get_current_credentials(self, model_type: ModelType, model: str) -> dict[str, Any] | None: """ @@ -1392,10 +1399,13 @@ class ProviderConfiguration(BaseModel): :param model_type: model type :return: """ - model_provider_factory = self.get_model_provider_factory() - - # Get model instance of LLM - return model_provider_factory.get_model_type_instance(provider=self.provider.provider, model_type=model_type) + model_runtime, model_provider_factory = self._get_runtime_and_provider_factory() + provider_schema = model_provider_factory.get_provider_schema(provider=self.provider.provider) + return create_model_type_instance( + runtime=model_runtime, + provider_schema=provider_schema, + model_type=model_type, + ) def get_model_schema( self, model_type: ModelType, model: str, credentials: dict[str, Any] | None diff --git a/api/core/helper/moderation.py b/api/core/helper/moderation.py index f169f247cf..18b9b72e9d 100644 --- a/api/core/helper/moderation.py +++ b/api/core/helper/moderation.py @@ -4,7 +4,7 @@ from typing import cast from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity from core.entities import DEFAULT_PLUGIN_ID -from core.plugin.impl.model_runtime_factory import create_plugin_model_provider_factory +from core.plugin.impl.model_runtime_factory import create_plugin_model_assembly from extensions.ext_hosting_provider import hosting_configuration from graphon.model_runtime.entities.model_entities import ModelType from graphon.model_runtime.errors.invoke import InvokeBadRequestError @@ -41,10 +41,8 @@ def check_moderation(tenant_id: str, model_config: ModelConfigWithCredentialsEnt text_chunk = secrets.choice(text_chunks) try: - model_provider_factory = create_plugin_model_provider_factory(tenant_id=tenant_id) - - # Get model instance of LLM - model_type_instance = model_provider_factory.get_model_type_instance( + model_assembly = create_plugin_model_assembly(tenant_id=tenant_id) + model_type_instance = model_assembly.create_model_type_instance( provider=openai_provider_name, model_type=ModelType.MODERATION ) model_type_instance = cast(ModerationModel, model_type_instance) diff --git a/api/core/plugin/impl/model_runtime.py b/api/core/plugin/impl/model_runtime.py index 4e66d58b5e..62573ba2f5 100644 --- a/api/core/plugin/impl/model_runtime.py +++ b/api/core/plugin/impl/model_runtime.py @@ -4,23 +4,32 @@ import hashlib import logging from collections.abc import Generator, Iterable, Sequence from threading import Lock -from typing import IO, Any, Union +from typing import IO, Any, Literal, cast, overload from pydantic import ValidationError from redis import RedisError from configs import dify_config +from core.llm_generator.output_parser.structured_output import ( + invoke_llm_with_structured_output as invoke_llm_with_structured_output_helper, +) from core.plugin.entities.plugin_daemon import PluginModelProviderEntity from core.plugin.impl.asset import PluginAssetManager from core.plugin.impl.model import PluginModelClient from extensions.ext_redis import redis_client -from graphon.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk +from graphon.model_runtime.entities.llm_entities import ( + LLMResult, + LLMResultChunk, + LLMResultChunkWithStructuredOutput, + LLMResultWithStructuredOutput, +) from graphon.model_runtime.entities.message_entities import PromptMessage, PromptMessageTool from graphon.model_runtime.entities.model_entities import AIModelEntity, ModelType from graphon.model_runtime.entities.provider_entities import ProviderEntity from graphon.model_runtime.entities.rerank_entities import MultimodalRerankInput, RerankResult from graphon.model_runtime.entities.text_embedding_entities import EmbeddingInputType, EmbeddingResult -from graphon.model_runtime.runtime import ModelRuntime +from graphon.model_runtime.model_providers.base.large_language_model import normalize_non_stream_runtime_result +from graphon.model_runtime.protocols.runtime import ModelRuntime from models.provider_ids import ModelProviderID logger = logging.getLogger(__name__) @@ -29,6 +38,68 @@ logger = logging.getLogger(__name__) TENANT_SCOPE_SCHEMA_CACHE_USER_ID = "__DIFY_TS__" +# TODO(-LAN-): Move native structured-output invocation into Graphon's LLM node. +# TODO(-LAN-): Remove this Dify-side adapter once Graphon owns structured output end-to-end. +class _PluginStructuredOutputModelInstance: + """Bind plugin model identity to the shared structured-output helper. + + The structured-output parser is shared with legacy ``ModelInstance`` flows + and only needs an object exposing ``invoke_llm(...)``. ``PluginModelRuntime`` + intentionally exposes a lower-level API where provider, model, and + credentials are passed per call. This adapter supplies the small bound + ``invoke_llm`` surface the helper needs without constructing a full + ``ModelInstance`` or reintroducing model-manager dependencies into the + plugin runtime path. + """ + + def __init__( + self, + *, + runtime: PluginModelRuntime, + provider: str, + model: str, + credentials: dict[str, Any], + ) -> None: + self._runtime = runtime + self._provider = provider + self._model = model + self._credentials = credentials + + def invoke_llm( + self, + *, + prompt_messages: Sequence[PromptMessage], + model_parameters: dict[str, Any] | None = None, + tools: Sequence[PromptMessageTool] | None = None, + stop: Sequence[str] | None = None, + stream: bool = True, + callbacks: object | None = None, + ) -> LLMResult | Generator[LLMResultChunk, None, None]: + del callbacks + if stream: + return self._runtime.invoke_llm( + provider=self._provider, + model=self._model, + credentials=self._credentials, + model_parameters=model_parameters or {}, + prompt_messages=prompt_messages, + tools=list(tools) if tools else None, + stop=stop, + stream=True, + ) + + return self._runtime.invoke_llm( + provider=self._provider, + model=self._model, + credentials=self._credentials, + model_parameters=model_parameters or {}, + prompt_messages=prompt_messages, + tools=list(tools) if tools else None, + stop=stop, + stream=False, + ) + + class PluginModelRuntime(ModelRuntime): """Plugin-backed runtime adapter bound to tenant context and optional caller scope.""" @@ -195,6 +266,34 @@ class PluginModelRuntime(ModelRuntime): return schema + @overload + def invoke_llm( + self, + *, + provider: str, + model: str, + credentials: dict[str, Any], + model_parameters: dict[str, Any], + prompt_messages: Sequence[PromptMessage], + tools: list[PromptMessageTool] | None, + stop: Sequence[str] | None, + stream: Literal[False], + ) -> LLMResult: ... + + @overload + def invoke_llm( + self, + *, + provider: str, + model: str, + credentials: dict[str, Any], + model_parameters: dict[str, Any], + prompt_messages: Sequence[PromptMessage], + tools: list[PromptMessageTool] | None, + stop: Sequence[str] | None, + stream: Literal[True], + ) -> Generator[LLMResultChunk, None, None]: ... + def invoke_llm( self, *, @@ -206,9 +305,9 @@ class PluginModelRuntime(ModelRuntime): tools: list[PromptMessageTool] | None, stop: Sequence[str] | None, stream: bool, - ) -> Union[LLMResult, Generator[LLMResultChunk, None, None]]: + ) -> LLMResult | Generator[LLMResultChunk, None, None]: plugin_id, provider_name = self._split_provider(provider) - return self.client.invoke_llm( + result = self.client.invoke_llm( tenant_id=self.tenant_id, user_id=self.user_id, plugin_id=plugin_id, @@ -221,6 +320,81 @@ class PluginModelRuntime(ModelRuntime): stop=list(stop) if stop else None, stream=stream, ) + if stream: + return result + + return normalize_non_stream_runtime_result( + model=model, + prompt_messages=prompt_messages, + result=result, + ) + + @overload + def invoke_llm_with_structured_output( + self, + *, + provider: str, + model: str, + credentials: dict[str, Any], + json_schema: dict[str, Any], + model_parameters: dict[str, Any], + prompt_messages: Sequence[PromptMessage], + stop: Sequence[str] | None, + stream: Literal[False], + ) -> LLMResultWithStructuredOutput: ... + + @overload + def invoke_llm_with_structured_output( + self, + *, + provider: str, + model: str, + credentials: dict[str, Any], + json_schema: dict[str, Any], + model_parameters: dict[str, Any], + prompt_messages: Sequence[PromptMessage], + stop: Sequence[str] | None, + stream: Literal[True], + ) -> Generator[LLMResultChunkWithStructuredOutput, None, None]: ... + + def invoke_llm_with_structured_output( + self, + *, + provider: str, + model: str, + credentials: dict[str, Any], + json_schema: dict[str, Any], + model_parameters: dict[str, Any], + prompt_messages: Sequence[PromptMessage], + stop: Sequence[str] | None, + stream: bool, + ) -> LLMResultWithStructuredOutput | Generator[LLMResultChunkWithStructuredOutput, None, None]: + model_schema = self.get_model_schema( + provider=provider, + model_type=ModelType.LLM, + model=model, + credentials=credentials, + ) + if model_schema is None: + raise ValueError(f"Model schema not found for {model}") + + adapter = _PluginStructuredOutputModelInstance( + runtime=self, + provider=provider, + model=model, + credentials=credentials, + ) + return invoke_llm_with_structured_output_helper( + provider=provider, + model_schema=model_schema, + model_instance=cast(Any, adapter), + prompt_messages=prompt_messages, + json_schema=json_schema, + model_parameters=model_parameters, + tools=None, + stop=list(stop) if stop else None, + stream=stream, + ) def get_llm_num_tokens( self, diff --git a/api/core/plugin/impl/model_runtime_factory.py b/api/core/plugin/impl/model_runtime_factory.py index 35abd2ae8c..fbe307ea60 100644 --- a/api/core/plugin/impl/model_runtime_factory.py +++ b/api/core/plugin/impl/model_runtime_factory.py @@ -3,13 +3,46 @@ from __future__ import annotations from typing import TYPE_CHECKING from core.plugin.impl.model import PluginModelClient +from graphon.model_runtime.entities.model_entities import ModelType +from graphon.model_runtime.entities.provider_entities import ProviderEntity +from graphon.model_runtime.model_providers.base.ai_model import AIModel +from graphon.model_runtime.model_providers.base.large_language_model import LargeLanguageModel +from graphon.model_runtime.model_providers.base.moderation_model import ModerationModel +from graphon.model_runtime.model_providers.base.rerank_model import RerankModel +from graphon.model_runtime.model_providers.base.speech2text_model import Speech2TextModel +from graphon.model_runtime.model_providers.base.text_embedding_model import TextEmbeddingModel +from graphon.model_runtime.model_providers.base.tts_model import TTSModel from graphon.model_runtime.model_providers.model_provider_factory import ModelProviderFactory +from graphon.model_runtime.protocols.runtime import ModelRuntime if TYPE_CHECKING: from core.model_manager import ModelManager from core.plugin.impl.model_runtime import PluginModelRuntime from core.provider_manager import ProviderManager +_MODEL_CLASS_BY_TYPE: dict[ModelType, type[AIModel]] = { + ModelType.LLM: LargeLanguageModel, + ModelType.TEXT_EMBEDDING: TextEmbeddingModel, + ModelType.RERANK: RerankModel, + ModelType.SPEECH2TEXT: Speech2TextModel, + ModelType.MODERATION: ModerationModel, + ModelType.TTS: TTSModel, +} + + +def create_model_type_instance( + *, + runtime: ModelRuntime, + provider_schema: ProviderEntity, + model_type: ModelType, +) -> AIModel: + """Build the graphon model wrapper explicitly against the request runtime.""" + model_class = _MODEL_CLASS_BY_TYPE.get(model_type) + if model_class is None: + raise ValueError(f"Unsupported model type: {model_type}") + + return model_class(provider_schema=provider_schema, model_runtime=runtime) + class PluginModelAssembly: """Compose request-scoped model views on top of a single plugin runtime.""" @@ -38,9 +71,22 @@ class PluginModelAssembly: @property def model_provider_factory(self) -> ModelProviderFactory: if self._model_provider_factory is None: - self._model_provider_factory = ModelProviderFactory(model_runtime=self.model_runtime) + self._model_provider_factory = ModelProviderFactory(runtime=self.model_runtime) return self._model_provider_factory + def create_model_type_instance( + self, + *, + provider: str, + model_type: ModelType, + ) -> AIModel: + provider_schema = self.model_provider_factory.get_provider_schema(provider=provider) + return create_model_type_instance( + runtime=self.model_runtime, + provider_schema=provider_schema, + model_type=model_type, + ) + @property def provider_manager(self) -> ProviderManager: if self._provider_manager is None: diff --git a/api/core/provider_manager.py b/api/core/provider_manager.py index b290ae456e..9faa70a0b8 100644 --- a/api/core/provider_manager.py +++ b/api/core/provider_manager.py @@ -56,7 +56,7 @@ from models.provider_ids import ModelProviderID from services.feature_service import FeatureService if TYPE_CHECKING: - from graphon.model_runtime.runtime import ModelRuntime + from graphon.model_runtime.protocols.runtime import ModelRuntime _credentials_adapter: TypeAdapter[dict[str, Any]] = TypeAdapter(dict[str, Any]) @@ -165,7 +165,7 @@ class ProviderManager: ) # Get all provider entities - model_provider_factory = ModelProviderFactory(model_runtime=self._model_runtime) + model_provider_factory = ModelProviderFactory(runtime=self._model_runtime) provider_entities = model_provider_factory.get_providers() # Get All preferred provider types of the workspace @@ -362,7 +362,7 @@ class ProviderManager: if not default_model: return None - model_provider_factory = ModelProviderFactory(model_runtime=self._model_runtime) + model_provider_factory = ModelProviderFactory(runtime=self._model_runtime) provider_schema = model_provider_factory.get_provider_schema(provider=default_model.provider_name) return DefaultModelEntity( diff --git a/api/core/workflow/node_factory.py b/api/core/workflow/node_factory.py index 895953a3c1..a306b1c9ac 100644 --- a/api/core/workflow/node_factory.py +++ b/api/core/workflow/node_factory.py @@ -374,11 +374,6 @@ class DifyNodeFactory(NodeFactory): # Re-validate using the resolved node class so workflow-local node schemas # stay explicit and constructors receive the concrete typed payload. resolved_node_data = self._validate_resolved_node_data(node_class, node_data) - config_for_node_init: BaseNodeData | dict[str, Any] - if isinstance(resolved_node_data, BaseNodeData): - config_for_node_init = resolved_node_data.model_dump(mode="python", by_alias=True) - else: - config_for_node_init = resolved_node_data node_type = node_data.type node_init_kwargs_factories: Mapping[NodeType, Callable[[], dict[str, object]]] = { BuiltinNodeTypes.CODE: lambda: { @@ -446,9 +441,10 @@ class DifyNodeFactory(NodeFactory): }, } node_init_kwargs = node_init_kwargs_factories.get(node_type, lambda: {})() + constructor_node_data = resolved_node_data.model_dump(mode="python", by_alias=True) return node_class( node_id=node_id, - config=config_for_node_init, + data=constructor_node_data, graph_init_params=self.graph_init_params, graph_runtime_state=self.graph_runtime_state, **node_init_kwargs, diff --git a/api/core/workflow/nodes/agent/agent_node.py b/api/core/workflow/nodes/agent/agent_node.py index 68a24e86b1..17d71668cb 100644 --- a/api/core/workflow/nodes/agent/agent_node.py +++ b/api/core/workflow/nodes/agent/agent_node.py @@ -35,7 +35,7 @@ class AgentNode(Node[AgentNodeData]): def __init__( self, node_id: str, - config: AgentNodeData, + data: AgentNodeData, *, graph_init_params: GraphInitParams, graph_runtime_state: GraphRuntimeState, @@ -46,7 +46,7 @@ class AgentNode(Node[AgentNodeData]): ) -> None: super().__init__( node_id=node_id, - config=config, + data=data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, ) diff --git a/api/core/workflow/nodes/datasource/datasource_node.py b/api/core/workflow/nodes/datasource/datasource_node.py index f3006c4242..a4ef3d1ea7 100644 --- a/api/core/workflow/nodes/datasource/datasource_node.py +++ b/api/core/workflow/nodes/datasource/datasource_node.py @@ -36,14 +36,14 @@ class DatasourceNode(Node[DatasourceNodeData]): def __init__( self, node_id: str, - config: DatasourceNodeData, + data: DatasourceNodeData, *, graph_init_params: "GraphInitParams", graph_runtime_state: "GraphRuntimeState", ) -> None: super().__init__( node_id=node_id, - config=config, + data=data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, ) diff --git a/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py b/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py index 9c1b7ab2c4..1d60f530a1 100644 --- a/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py +++ b/api/core/workflow/nodes/knowledge_index/knowledge_index_node.py @@ -32,14 +32,14 @@ class KnowledgeIndexNode(Node[KnowledgeIndexNodeData]): def __init__( self, node_id: str, - config: KnowledgeIndexNodeData, + data: KnowledgeIndexNodeData, *, graph_init_params: "GraphInitParams", graph_runtime_state: "GraphRuntimeState", ) -> None: super().__init__( node_id=node_id, - config=config, + data=data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, ) diff --git a/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py b/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py index 25f73e446d..1aba2737b0 100644 --- a/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py +++ b/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py @@ -71,14 +71,14 @@ class KnowledgeRetrievalNode(LLMUsageTrackingMixin, Node[KnowledgeRetrievalNodeD def __init__( self, node_id: str, - config: KnowledgeRetrievalNodeData, + data: KnowledgeRetrievalNodeData, *, graph_init_params: "GraphInitParams", graph_runtime_state: "GraphRuntimeState", ) -> None: super().__init__( node_id=node_id, - config=config, + data=data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, ) diff --git a/api/core/workflow/system_variables.py b/api/core/workflow/system_variables.py index 9d15a3fcea..77ef3826e9 100644 --- a/api/core/workflow/system_variables.py +++ b/api/core/workflow/system_variables.py @@ -3,7 +3,7 @@ from __future__ import annotations from collections import defaultdict from collections.abc import Mapping, Sequence from enum import StrEnum -from typing import Any, Protocol, cast +from typing import Any, Protocol from uuid import uuid4 from graphon.enums import BuiltinNodeTypes @@ -82,13 +82,10 @@ def build_system_variables(values: Mapping[str, Any] | None = None, /, **kwargs: normalized = _normalize_system_variable_values(values, **kwargs) return [ - cast( - Variable, - segment_to_variable( - segment=build_segment(value), - selector=system_variable_selector(key), - name=key, - ), + segment_to_variable( + segment=build_segment(value), + selector=system_variable_selector(key), + name=key, ) for key, value in normalized.items() ] @@ -130,13 +127,10 @@ def build_bootstrap_variables( for node_id, value in rag_pipeline_variables_map.items(): variables.append( - cast( - Variable, - segment_to_variable( - segment=build_segment(value), - selector=(RAG_PIPELINE_VARIABLE_NODE_ID, node_id), - name=node_id, - ), + segment_to_variable( + segment=build_segment(value), + selector=(RAG_PIPELINE_VARIABLE_NODE_ID, node_id), + name=node_id, ) ) diff --git a/api/core/workflow/workflow_entry.py b/api/core/workflow/workflow_entry.py index 4e2f603e5b..3019704dac 100644 --- a/api/core/workflow/workflow_entry.py +++ b/api/core/workflow/workflow_entry.py @@ -46,6 +46,11 @@ _file_access_controller = DatabaseFileAccessController() class _WorkflowChildEngineBuilder: + tenant_id: str + + def __init__(self, *, tenant_id: str) -> None: + self.tenant_id = tenant_id + @staticmethod def _has_node_id(graph_config: Mapping[str, Any], node_id: str) -> bool | None: """ @@ -107,7 +112,7 @@ class _WorkflowChildEngineBuilder: config=config, child_engine_builder=self, ) - child_engine.layer(LLMQuotaLayer()) + child_engine.layer(LLMQuotaLayer(tenant_id=self.tenant_id)) return child_engine @@ -176,7 +181,7 @@ class WorkflowEntry: self.command_channel = command_channel execution_context = capture_current_context() graph_runtime_state.execution_context = execution_context - self._child_engine_builder = _WorkflowChildEngineBuilder() + self._child_engine_builder = _WorkflowChildEngineBuilder(tenant_id=tenant_id) self.graph_engine = GraphEngine( workflow_id=workflow_id, graph=graph, @@ -208,7 +213,7 @@ class WorkflowEntry: max_steps=dify_config.WORKFLOW_MAX_EXECUTION_STEPS, max_time=dify_config.WORKFLOW_MAX_EXECUTION_TIME ) self.graph_engine.layer(limits_layer) - self.graph_engine.layer(LLMQuotaLayer()) + self.graph_engine.layer(LLMQuotaLayer(tenant_id=tenant_id)) # Add observability layer when OTel is enabled if dify_config.ENABLE_OTEL or is_instrument_flag_enabled(): diff --git a/api/events/event_handlers/update_provider_when_message_created.py b/api/events/event_handlers/update_provider_when_message_created.py index 1d615f0f87..8dec5876a9 100644 --- a/api/events/event_handlers/update_provider_when_message_created.py +++ b/api/events/event_handlers/update_provider_when_message_created.py @@ -137,17 +137,13 @@ def handle(sender: Message, **kwargs): if used_quota is not None: match provider_configuration.system_configuration.current_quota_type: case ProviderQuotaType.TRIAL: - from services.credit_pool_service import CreditPoolService - - CreditPoolService.check_and_deduct_credits( + _deduct_credit_pool_quota_capped( tenant_id=tenant_id, credits_required=used_quota, pool_type="trial", ) case ProviderQuotaType.PAID: - from services.credit_pool_service import CreditPoolService - - CreditPoolService.check_and_deduct_credits( + _deduct_credit_pool_quota_capped( tenant_id=tenant_id, credits_required=used_quota, pool_type="paid", @@ -200,6 +196,26 @@ def handle(sender: Message, **kwargs): raise +def _deduct_credit_pool_quota_capped(*, tenant_id: str, credits_required: int, pool_type: str) -> None: + """Apply post-generation credit accounting without failing message persistence on quota exhaustion.""" + from services.credit_pool_service import CreditPoolService + + deducted_credits = CreditPoolService.deduct_credits_capped( + tenant_id=tenant_id, + credits_required=credits_required, + pool_type=pool_type, + ) + if deducted_credits < credits_required: + logger.warning( + "Credit pool exhausted during message-created accounting, " + "tenant_id=%s, pool_type=%s, credits_required=%s, credits_deducted=%s", + tenant_id, + pool_type, + credits_required, + deducted_credits, + ) + + def _calculate_quota_usage( *, message: Message, system_configuration: SystemConfiguration, model_name: str ) -> int | None: diff --git a/api/pyproject.toml b/api/pyproject.toml index 0c488c34d9..6c30779f9d 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -45,7 +45,7 @@ dependencies = [ # Emerging: newer and fast-moving, use compatible pins "fastopenapi[flask]~=0.7.0", - "graphon~=0.2.2", + "graphon~=0.3.0", "httpx-sse~=0.4.0", "json-repair~=0.59.4", ] diff --git a/api/services/credit_pool_service.py b/api/services/credit_pool_service.py index 2d210db121..1f419d7a5b 100644 --- a/api/services/credit_pool_service.py +++ b/api/services/credit_pool_service.py @@ -1,7 +1,7 @@ import logging -from sqlalchemy import select, update -from sqlalchemy.orm import sessionmaker +from sqlalchemy import select +from sqlalchemy.orm import Session, sessionmaker from configs import dify_config from core.errors.error import QuotaExceededError @@ -13,6 +13,18 @@ logger = logging.getLogger(__name__) class CreditPoolService: + @staticmethod + def _get_locked_pool(session: Session, tenant_id: str, pool_type: str) -> TenantCreditPool | None: + return session.scalar( + select(TenantCreditPool) + .where( + TenantCreditPool.tenant_id == tenant_id, + TenantCreditPool.pool_type == pool_type, + ) + .limit(1) + .with_for_update() + ) + @classmethod def create_default_pool(cls, tenant_id: str) -> TenantCreditPool: """create default credit pool for new tenant""" @@ -59,31 +71,57 @@ class CreditPoolService: credits_required: int, pool_type: str = "trial", ) -> int: - """check and deduct credits, returns actual credits deducted""" - - pool = cls.get_pool(tenant_id, pool_type) - if not pool: - raise QuotaExceededError("Credit pool not found") - - if pool.remaining_credits <= 0: - raise QuotaExceededError("No credits remaining") - - # deduct all remaining credits if less than required - actual_credits = min(credits_required, pool.remaining_credits) + """Deduct exactly the requested credits or raise without mutating the pool.""" + if credits_required <= 0: + return 0 try: - with sessionmaker(db.engine).begin() as session: - stmt = ( - update(TenantCreditPool) - .where( - TenantCreditPool.tenant_id == tenant_id, - TenantCreditPool.pool_type == pool_type, - ) - .values(quota_used=TenantCreditPool.quota_used + actual_credits) - ) - session.execute(stmt) + with sessionmaker(db.engine, expire_on_commit=False).begin() as session: + pool = cls._get_locked_pool(session=session, tenant_id=tenant_id, pool_type=pool_type) + if not pool: + raise QuotaExceededError("Credit pool not found") + + remaining_credits = pool.remaining_credits + if remaining_credits <= 0: + raise QuotaExceededError("No credits remaining") + if remaining_credits < credits_required: + raise QuotaExceededError("Insufficient credits remaining") + + pool.quota_used += credits_required + except QuotaExceededError: + raise except Exception: logger.exception("Failed to deduct credits for tenant %s", tenant_id) raise QuotaExceededError("Failed to deduct credits") - return actual_credits + return credits_required + + @classmethod + def deduct_credits_capped( + cls, + tenant_id: str, + credits_required: int, + pool_type: str = "trial", + ) -> int: + """Deduct up to the available balance and return the actual deducted credits.""" + if credits_required <= 0: + return 0 + + try: + with sessionmaker(db.engine, expire_on_commit=False).begin() as session: + pool = cls._get_locked_pool(session=session, tenant_id=tenant_id, pool_type=pool_type) + if not pool: + logger.warning("Credit pool not found, tenant_id=%s, pool_type=%s", tenant_id, pool_type) + return 0 + + deducted_credits = min(credits_required, pool.remaining_credits) + if deducted_credits <= 0: + return 0 + + pool.quota_used += deducted_credits + return deducted_credits + except QuotaExceededError: + raise + except Exception: + logger.exception("Failed to deduct capped credits for tenant %s", tenant_id) + raise QuotaExceededError("Failed to deduct credits") diff --git a/api/services/workflow_draft_variable_service.py b/api/services/workflow_draft_variable_service.py index a55448e352..59db147576 100644 --- a/api/services/workflow_draft_variable_service.py +++ b/api/services/workflow_draft_variable_service.py @@ -157,8 +157,8 @@ class DraftVarLoader(VariableLoader): # This approach reduces loading time by querying external systems concurrently. with ThreadPoolExecutor(max_workers=10) as executor: offloaded_variables = executor.map(self._load_offloaded_variable, offloaded_draft_vars) - for selector, variable in offloaded_variables: - variable_by_selector[selector] = variable + for selector, offloaded_variable in offloaded_variables: + variable_by_selector[selector] = offloaded_variable return list(variable_by_selector.values()) diff --git a/api/services/workflow_service.py b/api/services/workflow_service.py index f97b85dc2b..b8c2ed5e6f 100644 --- a/api/services/workflow_service.py +++ b/api/services/workflow_service.py @@ -1251,7 +1251,7 @@ class WorkflowService: node_data = HumanInputNode.validate_node_data(adapt_human_input_node_data_for_graph(node_config["data"])) node = HumanInputNode( node_id=node_config["id"], - config=node_data, + data=node_data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, runtime=DifyHumanInputNodeRuntime(run_context), diff --git a/api/tests/integration_tests/core/workflow/nodes/datasource/test_datasource_node_integration.py b/api/tests/integration_tests/core/workflow/nodes/datasource/test_datasource_node_integration.py index 2c1e667c58..b9f09ccadd 100644 --- a/api/tests/integration_tests/core/workflow/nodes/datasource/test_datasource_node_integration.py +++ b/api/tests/integration_tests/core/workflow/nodes/datasource/test_datasource_node_integration.py @@ -73,7 +73,7 @@ def test_node_integration_minimal_stream(mocker: MockerFixture): node = DatasourceNode( node_id="n", - config=DatasourceNodeData( + data=DatasourceNodeData( type="datasource", version="1", title="Datasource", diff --git a/api/tests/integration_tests/workflow/nodes/__mock/model.py b/api/tests/integration_tests/workflow/nodes/__mock/model.py index a9a2617bae..a77fe5970a 100644 --- a/api/tests/integration_tests/workflow/nodes/__mock/model.py +++ b/api/tests/integration_tests/workflow/nodes/__mock/model.py @@ -4,7 +4,7 @@ from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEnti from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle from core.entities.provider_entities import CustomConfiguration, CustomProviderConfiguration, SystemConfiguration from core.model_manager import ModelInstance -from core.plugin.impl.model_runtime_factory import create_plugin_model_provider_factory +from core.plugin.impl.model_runtime_factory import create_plugin_model_assembly from graphon.model_runtime.entities.model_entities import ModelType from models.provider import ProviderType @@ -15,8 +15,9 @@ def get_mocked_fetch_model_config( mode: str, credentials: dict, ): - model_provider_factory = create_plugin_model_provider_factory(tenant_id="9d2074fc-6f86-45a9-b09d-6ecc63b9056b") - model_type_instance = model_provider_factory.get_model_type_instance(provider, ModelType.LLM) + model_assembly = create_plugin_model_assembly(tenant_id="9d2074fc-6f86-45a9-b09d-6ecc63b9056b") + model_provider_factory = model_assembly.model_provider_factory + model_type_instance = model_assembly.create_model_type_instance(provider=provider, model_type=ModelType.LLM) provider_model_bundle = ProviderModelBundle( configuration=ProviderConfiguration( tenant_id="1", diff --git a/api/tests/integration_tests/workflow/nodes/test_code.py b/api/tests/integration_tests/workflow/nodes/test_code.py index aaa6092993..9345113aa3 100644 --- a/api/tests/integration_tests/workflow/nodes/test_code.py +++ b/api/tests/integration_tests/workflow/nodes/test_code.py @@ -45,7 +45,7 @@ def init_code_node(code_config: dict): ) # construct variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id="aaa", files=[]), user_inputs={}, environment_variables=[], @@ -66,7 +66,7 @@ def init_code_node(code_config: dict): node = CodeNode( node_id=str(uuid.uuid4()), - config=CodeNodeData.model_validate(code_config["data"]), + data=CodeNodeData.model_validate(code_config["data"]), graph_init_params=init_params, graph_runtime_state=graph_runtime_state, code_executor=node_factory._code_executor, diff --git a/api/tests/integration_tests/workflow/nodes/test_http.py b/api/tests/integration_tests/workflow/nodes/test_http.py index b9f7b9575b..7cd7f50b77 100644 --- a/api/tests/integration_tests/workflow/nodes/test_http.py +++ b/api/tests/integration_tests/workflow/nodes/test_http.py @@ -55,7 +55,7 @@ def init_http_node(config: dict): ) # construct variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id="aaa", files=[]), user_inputs={}, environment_variables=[], @@ -76,7 +76,7 @@ def init_http_node(config: dict): node = HttpRequestNode( node_id=str(uuid.uuid4()), - config=HttpRequestNodeData.model_validate(config["data"]), + data=HttpRequestNodeData.model_validate(config["data"]), graph_init_params=init_params, graph_runtime_state=graph_runtime_state, http_request_config=HTTP_REQUEST_CONFIG, @@ -204,7 +204,7 @@ def test_custom_auth_with_empty_api_key_raises_error(setup_http_mock): from graphon.runtime import VariablePool # Create variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id="test", files=[]), user_inputs={}, environment_variables=[], @@ -702,7 +702,7 @@ def test_nested_object_variable_selector(setup_http_mock): ) # Create independent variable pool for this test only - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id="aaa", files=[]), user_inputs={}, environment_variables=[], @@ -724,7 +724,7 @@ def test_nested_object_variable_selector(setup_http_mock): node = HttpRequestNode( node_id=str(uuid.uuid4()), - config=HttpRequestNodeData.model_validate(graph_config["nodes"][1]["data"]), + data=HttpRequestNodeData.model_validate(graph_config["nodes"][1]["data"]), graph_init_params=init_params, graph_runtime_state=graph_runtime_state, http_request_config=HTTP_REQUEST_CONFIG, diff --git a/api/tests/integration_tests/workflow/nodes/test_llm.py b/api/tests/integration_tests/workflow/nodes/test_llm.py index 3eead70163..92f3a1926c 100644 --- a/api/tests/integration_tests/workflow/nodes/test_llm.py +++ b/api/tests/integration_tests/workflow/nodes/test_llm.py @@ -53,7 +53,7 @@ def init_llm_node(config: dict) -> LLMNode: ) # construct variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( user_id="aaa", app_id=app_id, @@ -77,7 +77,7 @@ def init_llm_node(config: dict) -> LLMNode: node = LLMNode( node_id=str(uuid.uuid4()), - config=LLMNodeData.model_validate(config["data"]), + data=LLMNodeData.model_validate(config["data"]), graph_init_params=init_params, graph_runtime_state=graph_runtime_state, credentials_provider=MagicMock(spec=CredentialsProvider), diff --git a/api/tests/integration_tests/workflow/nodes/test_parameter_extractor.py b/api/tests/integration_tests/workflow/nodes/test_parameter_extractor.py index f2eabb86c3..f11188323a 100644 --- a/api/tests/integration_tests/workflow/nodes/test_parameter_extractor.py +++ b/api/tests/integration_tests/workflow/nodes/test_parameter_extractor.py @@ -56,7 +56,7 @@ def init_parameter_extractor_node(config: dict, memory=None): ) # construct variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( user_id="aaa", files=[], query="what's the weather in SF", conversation_id="abababa" ), @@ -71,7 +71,7 @@ def init_parameter_extractor_node(config: dict, memory=None): node = ParameterExtractorNode( node_id=str(uuid.uuid4()), - config=ParameterExtractorNodeData.model_validate(config["data"]), + data=ParameterExtractorNodeData.model_validate(config["data"]), graph_init_params=init_params, graph_runtime_state=graph_runtime_state, credentials_provider=MagicMock(spec=CredentialsProvider), diff --git a/api/tests/integration_tests/workflow/nodes/test_template_transform.py b/api/tests/integration_tests/workflow/nodes/test_template_transform.py index e2e0723fb8..80489e6809 100644 --- a/api/tests/integration_tests/workflow/nodes/test_template_transform.py +++ b/api/tests/integration_tests/workflow/nodes/test_template_transform.py @@ -66,7 +66,7 @@ def test_execute_template_transform(): ) # construct variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id="aaa", files=[]), user_inputs={}, environment_variables=[], @@ -88,7 +88,7 @@ def test_execute_template_transform(): node = TemplateTransformNode( node_id=str(uuid.uuid4()), - config=TemplateTransformNodeData.model_validate(config["data"]), + data=TemplateTransformNodeData.model_validate(config["data"]), graph_init_params=init_params, graph_runtime_state=graph_runtime_state, jinja2_template_renderer=_SimpleJinja2Renderer(), diff --git a/api/tests/integration_tests/workflow/nodes/test_tool.py b/api/tests/integration_tests/workflow/nodes/test_tool.py index 493330e02b..78c12e7ea5 100644 --- a/api/tests/integration_tests/workflow/nodes/test_tool.py +++ b/api/tests/integration_tests/workflow/nodes/test_tool.py @@ -43,7 +43,7 @@ def init_tool_node(config: dict): ) # construct variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id="aaa", files=[]), user_inputs={}, environment_variables=[], @@ -64,7 +64,7 @@ def init_tool_node(config: dict): node = ToolNode( node_id=str(uuid.uuid4()), - config=ToolNodeData.model_validate(config["data"]), + data=ToolNodeData.model_validate(config["data"]), graph_init_params=init_params, graph_runtime_state=graph_runtime_state, tool_file_manager_factory=tool_file_manager_factory, diff --git a/api/tests/test_containers_integration_tests/core/app/layers/test_pause_state_persist_layer.py b/api/tests/test_containers_integration_tests/core/app/layers/test_pause_state_persist_layer.py index bd13527e14..66b3392a4b 100644 --- a/api/tests/test_containers_integration_tests/core/app/layers/test_pause_state_persist_layer.py +++ b/api/tests/test_containers_integration_tests/core/app/layers/test_pause_state_persist_layer.py @@ -210,7 +210,9 @@ class TestPauseStatePersistenceLayerTestContainers: execution_id = workflow_run_id or getattr(self, "test_workflow_run_id", None) or str(uuid.uuid4()) # Create variable pool - variable_pool = VariablePool(system_variables=build_system_variables(workflow_execution_id=execution_id)) + variable_pool = VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id=execution_id) + ) if variables: for (node_id, var_key), value in variables.items(): variable_pool.add([node_id, var_key], value) diff --git a/api/tests/test_containers_integration_tests/core/workflow/test_human_input_resume_node_execution.py b/api/tests/test_containers_integration_tests/core/workflow/test_human_input_resume_node_execution.py index 5aed230cd4..ad82b8fe2a 100644 --- a/api/tests/test_containers_integration_tests/core/workflow/test_human_input_resume_node_execution.py +++ b/api/tests/test_containers_integration_tests/core/workflow/test_human_input_resume_node_execution.py @@ -66,7 +66,7 @@ def _mock_form_repository_with_submission(action_id: str) -> HumanInputFormRepos def _build_runtime_state(workflow_execution_id: str, app_id: str, workflow_id: str, user_id: str) -> GraphRuntimeState: - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( workflow_execution_id=workflow_execution_id, app_id=app_id, @@ -102,7 +102,7 @@ def _build_graph( start_data = StartNodeData(title="start", variables=[]) start_node = StartNode( node_id="start", - config=start_data, + data=start_data, graph_init_params=params, graph_runtime_state=runtime_state, ) @@ -117,7 +117,7 @@ def _build_graph( ) human_node = HumanInputNode( node_id="human", - config=human_data, + data=human_data, graph_init_params=params, graph_runtime_state=runtime_state, form_repository=form_repository, @@ -131,7 +131,7 @@ def _build_graph( ) end_node = EndNode( node_id="end", - config=end_data, + data=end_data, graph_init_params=params, graph_runtime_state=runtime_state, ) diff --git a/api/tests/test_containers_integration_tests/services/test_credit_pool_service.py b/api/tests/test_containers_integration_tests/services/test_credit_pool_service.py index 09ba041244..07dc3a4e9e 100644 --- a/api/tests/test_containers_integration_tests/services/test_credit_pool_service.py +++ b/api/tests/test_containers_integration_tests/services/test_credit_pool_service.py @@ -90,16 +90,34 @@ class TestCreditPoolService: pool = CreditPoolService.get_pool(tenant_id=tenant_id) assert pool.quota_used == credits_required - def test_check_and_deduct_credits_caps_at_remaining(self, db_session_with_containers: Session): + def test_check_and_deduct_credits_raises_without_deducting_when_insufficient( + self, db_session_with_containers: Session + ): tenant_id = self._create_tenant_id() pool = CreditPoolService.create_default_pool(tenant_id) remaining = 5 pool.quota_used = pool.quota_limit - remaining + quota_used = pool.quota_used db_session_with_containers.commit() - result = CreditPoolService.check_and_deduct_credits(tenant_id=tenant_id, credits_required=200) + with pytest.raises(QuotaExceededError, match="Insufficient credits remaining"): + CreditPoolService.check_and_deduct_credits(tenant_id=tenant_id, credits_required=200) + + db_session_with_containers.expire_all() + updated_pool = CreditPoolService.get_pool(tenant_id=tenant_id) + assert updated_pool.quota_used == quota_used + + def test_deduct_credits_capped_depletes_available_balance(self, db_session_with_containers: Session): + tenant_id = self._create_tenant_id() + pool = CreditPoolService.create_default_pool(tenant_id) + remaining = 5 + pool.quota_used = pool.quota_limit - remaining + quota_limit = pool.quota_limit + db_session_with_containers.commit() + + result = CreditPoolService.deduct_credits_capped(tenant_id=tenant_id, credits_required=200) assert result == remaining db_session_with_containers.expire_all() updated_pool = CreditPoolService.get_pool(tenant_id=tenant_id) - assert updated_pool.quota_used == pool.quota_limit + assert updated_pool.quota_used == quota_limit diff --git a/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py b/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py index 1d72d7807d..d8f794b483 100644 --- a/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py +++ b/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py @@ -132,7 +132,9 @@ class TestAdvancedChatGenerateTaskPipeline: pipeline._task_state.answer = "partial answer" pipeline._workflow_run_id = "run-id" pipeline._graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), + variable_pool=build_test_variable_pool( + variables=build_system_variables(workflow_execution_id="run-id"), + ), start_at=0.0, total_tokens=7, node_run_steps=3, @@ -372,7 +374,9 @@ class TestAdvancedChatGenerateTaskPipeline: pipeline = _make_pipeline() pipeline._workflow_run_id = "run-id" pipeline._graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id="run-id") + ), start_at=0.0, ) pipeline._workflow_response_converter.workflow_finish_to_stream_response = lambda **kwargs: "finish" @@ -583,7 +587,9 @@ class TestAdvancedChatGenerateTaskPipeline: self.items = items graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id="run-id") + ), start_at=0.0, ) @@ -617,7 +623,9 @@ class TestAdvancedChatGenerateTaskPipeline: def test_handle_message_end_event_applies_output_moderation(self, monkeypatch: pytest.MonkeyPatch): pipeline = _make_pipeline() pipeline._graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id="run-id") + ), start_at=0.0, ) pipeline._base_task_pipeline.handle_output_moderation_when_task_finished = lambda answer: "safe" diff --git a/api/tests/unit_tests/core/app/apps/test_pause_resume.py b/api/tests/unit_tests/core/app/apps/test_pause_resume.py index aa71f4d9c4..1acebfee17 100644 --- a/api/tests/unit_tests/core/app/apps/test_pause_resume.py +++ b/api/tests/unit_tests/core/app/apps/test_pause_resume.py @@ -60,7 +60,7 @@ class _StubToolNode(Node[_StubToolNodeData]): def __init__( self, node_id: str, - config: _StubToolNodeData, + data: _StubToolNodeData, *, graph_init_params, graph_runtime_state, @@ -68,7 +68,7 @@ class _StubToolNode(Node[_StubToolNodeData]): ) -> None: super().__init__( node_id=node_id, - config=config, + data=data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, ) @@ -169,7 +169,7 @@ def _build_graph(runtime_state: GraphRuntimeState, *, pause_on: str | None) -> G def _build_runtime_state(run_id: str) -> GraphRuntimeState: - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id="user", app_id="app", workflow_id="workflow"), user_inputs={}, conversation_variables=[], diff --git a/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_core.py b/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_core.py index 4a0d4f490e..3949c41eae 100644 --- a/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_core.py +++ b/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_core.py @@ -54,7 +54,7 @@ class TestWorkflowBasedAppRunner: runner = WorkflowBasedAppRunner(queue_manager=SimpleNamespace(), app_id="app") runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=default_system_variables()), + variable_pool=VariablePool.from_bootstrap(system_variables=default_system_variables()), start_at=0.0, ) @@ -93,7 +93,7 @@ class TestWorkflowBasedAppRunner: def test_get_graph_and_variable_pool_for_single_node_run(self, monkeypatch: pytest.MonkeyPatch): runner = WorkflowBasedAppRunner(queue_manager=SimpleNamespace(), app_id="app") graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=default_system_variables()), + variable_pool=VariablePool.from_bootstrap(system_variables=default_system_variables()), start_at=0.0, ) @@ -164,7 +164,7 @@ class TestWorkflowBasedAppRunner: app_id="app", ) graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=default_system_variables()), + variable_pool=VariablePool.from_bootstrap(system_variables=default_system_variables()), start_at=0.0, ) @@ -243,7 +243,7 @@ class TestWorkflowBasedAppRunner: runner = WorkflowBasedAppRunner(queue_manager=_QueueManager(), app_id="app") graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=default_system_variables()), + variable_pool=VariablePool.from_bootstrap(system_variables=default_system_variables()), start_at=0.0, ) graph_runtime_state.register_paused_node("node-1") @@ -286,7 +286,7 @@ class TestWorkflowBasedAppRunner: runner = WorkflowBasedAppRunner(queue_manager=_QueueManager(), app_id="app") graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=default_system_variables()), + variable_pool=VariablePool.from_bootstrap(system_variables=default_system_variables()), start_at=0.0, ) workflow_entry = SimpleNamespace(graph_engine=SimpleNamespace(graph_runtime_state=graph_runtime_state)) @@ -425,7 +425,7 @@ class TestWorkflowBasedAppRunner: runner = WorkflowBasedAppRunner(queue_manager=_QueueManager(), app_id="app") graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=default_system_variables()), + variable_pool=VariablePool.from_bootstrap(system_variables=default_system_variables()), start_at=0.0, ) workflow_entry = SimpleNamespace(graph_engine=SimpleNamespace(graph_runtime_state=graph_runtime_state)) diff --git a/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_single_node.py b/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_single_node.py index 620a153204..248fed5388 100644 --- a/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_single_node.py +++ b/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_single_node.py @@ -16,7 +16,7 @@ from models.workflow import Workflow def _make_graph_state(): - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, environment_variables=[], diff --git a/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py b/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py index 1311d5e9cb..ea21a1cc1a 100644 --- a/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py +++ b/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py @@ -95,7 +95,9 @@ class TestWorkflowGenerateTaskPipeline: def test_to_blocking_response_falls_back_to_human_input_required_when_pause_event_missing(self): pipeline = _make_pipeline() pipeline._graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), + variable_pool=build_test_variable_pool( + variables=build_system_variables(workflow_execution_id="run-id"), + ), start_at=0.0, total_tokens=5, node_run_steps=2, @@ -283,7 +285,9 @@ class TestWorkflowGenerateTaskPipeline: pipeline = _make_pipeline() pipeline._workflow_execution_id = "run-id" pipeline._graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id="run-id") + ), start_at=0.0, ) pipeline._workflow_response_converter.workflow_finish_to_stream_response = lambda **kwargs: "finish" @@ -725,7 +729,9 @@ class TestWorkflowGenerateTaskPipeline: pipeline = _make_pipeline() pipeline._workflow_execution_id = "run-id" pipeline._graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id="run-id") + ), start_at=0.0, ) @@ -753,7 +759,9 @@ class TestWorkflowGenerateTaskPipeline: pipeline = _make_pipeline() pipeline._workflow_execution_id = "run-id" pipeline._graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id="run-id") + ), start_at=0.0, ) pipeline._handle_ping_event = lambda event, **kwargs: iter(["ping"]) @@ -769,7 +777,9 @@ class TestWorkflowGenerateTaskPipeline: def test_process_stream_response_main_match_paths_and_cleanup(self): pipeline = _make_pipeline() pipeline._graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id="run-id") + ), start_at=0.0, ) pipeline._base_task_pipeline.queue_manager.listen = lambda: iter( diff --git a/api/tests/unit_tests/core/app/layers/test_trigger_post_layer.py b/api/tests/unit_tests/core/app/layers/test_trigger_post_layer.py index d3bd15b6f3..320a3bc42c 100644 --- a/api/tests/unit_tests/core/app/layers/test_trigger_post_layer.py +++ b/api/tests/unit_tests/core/app/layers/test_trigger_post_layer.py @@ -21,7 +21,9 @@ class TestTriggerPostLayer: ) runtime_state = SimpleNamespace( outputs={"answer": "ok"}, - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-1")), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id="run-1") + ), total_tokens=12, ) @@ -60,7 +62,9 @@ class TestTriggerPostLayer: def test_on_event_handles_missing_trigger_log(self): runtime_state = SimpleNamespace( outputs={}, - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-1")), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id="run-1") + ), total_tokens=0, ) @@ -91,7 +95,9 @@ class TestTriggerPostLayer: def test_on_event_ignores_non_status_events(self): runtime_state = SimpleNamespace( outputs={}, - variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-1")), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(workflow_execution_id="run-1") + ), total_tokens=0, ) diff --git a/api/tests/unit_tests/core/app/test_llm_quota.py b/api/tests/unit_tests/core/app/test_llm_quota.py new file mode 100644 index 0000000000..d9390a4a8f --- /dev/null +++ b/api/tests/unit_tests/core/app/test_llm_quota.py @@ -0,0 +1,617 @@ +from types import SimpleNamespace +from unittest.mock import MagicMock, patch + +import pytest +from sqlalchemy import create_engine, select + +from configs import dify_config +from core.app.llm.quota import ( + deduct_llm_quota, + deduct_llm_quota_for_model, + ensure_llm_quota_available, + ensure_llm_quota_available_for_model, +) +from core.entities.model_entities import ModelStatus +from core.entities.provider_entities import ProviderQuotaType, QuotaUnit +from core.errors.error import QuotaExceededError +from graphon.model_runtime.entities.llm_entities import LLMUsage +from graphon.model_runtime.entities.model_entities import ModelType +from models import TenantCreditPool +from models.enums import ProviderQuotaType as ModelProviderQuotaType +from models.provider import Provider, ProviderType + + +def test_ensure_llm_quota_available_for_model_raises_when_system_model_is_exhausted() -> None: + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + get_provider_model=MagicMock(return_value=SimpleNamespace(status=ModelStatus.QUOTA_EXCEEDED)), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + pytest.raises(QuotaExceededError, match="Model provider openai quota exceeded."), + ): + ensure_llm_quota_available_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + ) + + provider_configuration.get_provider_model.assert_called_once_with( + model_type=ModelType.LLM, + model="gpt-4o", + ) + + +def test_ensure_llm_quota_available_for_model_raises_when_provider_is_missing() -> None: + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = None + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + pytest.raises(ValueError, match="Provider openai does not exist."), + ): + ensure_llm_quota_available_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + ) + + +def test_ensure_llm_quota_available_for_model_ignores_custom_provider_configuration() -> None: + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.CUSTOM, + get_provider_model=MagicMock(), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + + with patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager): + ensure_llm_quota_available_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + ) + + provider_configuration.get_provider_model.assert_not_called() + + +def test_deduct_llm_quota_for_model_uses_identity_based_trial_billing() -> None: + usage = LLMUsage.empty_usage() + usage.total_tokens = 42 + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=SimpleNamespace( + current_quota_type=ProviderQuotaType.TRIAL, + quota_configurations=[ + SimpleNamespace( + quota_type=ProviderQuotaType.TRIAL, + quota_unit=QuotaUnit.TOKENS, + quota_limit=100, + ) + ], + ), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch("services.credit_pool_service.CreditPoolService.deduct_credits_capped") as mock_deduct_credits, + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + mock_deduct_credits.assert_called_once_with( + tenant_id="tenant-id", + credits_required=42, + ) + + +def test_deduct_llm_quota_for_model_caps_trial_pool_when_usage_exceeds_remaining() -> None: + usage = LLMUsage.empty_usage() + usage.total_tokens = 3 + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=SimpleNamespace( + current_quota_type=ProviderQuotaType.TRIAL, + quota_configurations=[ + SimpleNamespace( + quota_type=ProviderQuotaType.TRIAL, + quota_unit=QuotaUnit.TOKENS, + quota_limit=100, + ) + ], + ), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + engine = create_engine("sqlite:///:memory:") + TenantCreditPool.__table__.create(engine) + with engine.begin() as connection: + connection.execute( + TenantCreditPool.__table__.insert(), + { + "id": "trial-pool", + "tenant_id": "tenant-id", + "pool_type": ModelProviderQuotaType.TRIAL, + "quota_limit": 10, + "quota_used": 9, + }, + ) + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)), + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + with engine.connect() as connection: + quota_used = connection.scalar(select(TenantCreditPool.quota_used).where(TenantCreditPool.id == "trial-pool")) + + assert quota_used == 10 + + +def test_deduct_llm_quota_for_model_returns_for_unbounded_quota() -> None: + usage = LLMUsage.empty_usage() + usage.total_tokens = 42 + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=SimpleNamespace( + current_quota_type=ProviderQuotaType.TRIAL, + quota_configurations=[ + SimpleNamespace( + quota_type=ProviderQuotaType.TRIAL, + quota_unit=QuotaUnit.TOKENS, + quota_limit=-1, + ) + ], + ), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch("services.credit_pool_service.CreditPoolService.deduct_credits_capped") as mock_deduct_credits, + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + mock_deduct_credits.assert_not_called() + + +def test_deduct_llm_quota_for_model_uses_credit_configuration() -> None: + usage = LLMUsage.empty_usage() + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=SimpleNamespace( + current_quota_type=ProviderQuotaType.TRIAL, + quota_configurations=[ + SimpleNamespace( + quota_type=ProviderQuotaType.TRIAL, + quota_unit=QuotaUnit.CREDITS, + quota_limit=100, + ) + ], + ), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch.object(type(dify_config), "get_model_credits", return_value=9) as mock_get_model_credits, + patch("services.credit_pool_service.CreditPoolService.deduct_credits_capped") as mock_deduct_credits, + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + mock_get_model_credits.assert_called_once_with("gpt-4o") + mock_deduct_credits.assert_called_once_with( + tenant_id="tenant-id", + credits_required=9, + ) + + +def test_deduct_llm_quota_for_model_uses_single_charge_for_times_quota() -> None: + usage = LLMUsage.empty_usage() + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=SimpleNamespace( + current_quota_type=ProviderQuotaType.TRIAL, + quota_configurations=[ + SimpleNamespace( + quota_type=ProviderQuotaType.TRIAL, + quota_unit=QuotaUnit.TIMES, + quota_limit=100, + ) + ], + ), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch("services.credit_pool_service.CreditPoolService.deduct_credits_capped") as mock_deduct_credits, + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + mock_deduct_credits.assert_called_once_with( + tenant_id="tenant-id", + credits_required=1, + ) + + +def test_deduct_llm_quota_for_model_uses_paid_billing_pool() -> None: + usage = LLMUsage.empty_usage() + usage.total_tokens = 5 + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=SimpleNamespace( + current_quota_type=ProviderQuotaType.PAID, + quota_configurations=[ + SimpleNamespace( + quota_type=ProviderQuotaType.PAID, + quota_unit=QuotaUnit.TOKENS, + quota_limit=100, + ) + ], + ), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch("services.credit_pool_service.CreditPoolService.deduct_credits_capped") as mock_deduct_credits, + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + mock_deduct_credits.assert_called_once_with( + tenant_id="tenant-id", + credits_required=5, + pool_type="paid", + ) + + +def test_deduct_llm_quota_for_model_updates_free_quota_usage() -> None: + usage = LLMUsage.empty_usage() + usage.total_tokens = 3 + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=SimpleNamespace( + current_quota_type=ProviderQuotaType.FREE, + quota_configurations=[ + SimpleNamespace( + quota_type=ProviderQuotaType.FREE, + quota_unit=QuotaUnit.TOKENS, + quota_limit=100, + ) + ], + ), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + engine = create_engine("sqlite:///:memory:") + Provider.__table__.create(engine) + with engine.begin() as connection: + connection.execute( + Provider.__table__.insert(), + [ + { + "id": "matching-provider", + "tenant_id": "tenant-id", + "provider_name": "openai", + "provider_type": ProviderType.SYSTEM, + "quota_type": ProviderQuotaType.FREE, + "quota_limit": 100, + "quota_used": 10, + "is_valid": True, + }, + { + "id": "other-tenant", + "tenant_id": "other-tenant-id", + "provider_name": "openai", + "provider_type": ProviderType.SYSTEM, + "quota_type": ProviderQuotaType.FREE, + "quota_limit": 100, + "quota_used": 20, + "is_valid": True, + }, + { + "id": "other-provider", + "tenant_id": "tenant-id", + "provider_name": "anthropic", + "provider_type": ProviderType.SYSTEM, + "quota_type": ProviderQuotaType.FREE, + "quota_limit": 100, + "quota_used": 30, + "is_valid": True, + }, + { + "id": "custom-provider", + "tenant_id": "tenant-id", + "provider_name": "openai", + "provider_type": ProviderType.CUSTOM, + "quota_type": ProviderQuotaType.FREE, + "quota_limit": 100, + "quota_used": 40, + "is_valid": True, + }, + ], + ) + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch("core.app.llm.quota.db", SimpleNamespace(engine=engine)), + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + with engine.connect() as connection: + quota_used_by_id = dict(connection.execute(select(Provider.id, Provider.quota_used)).all()) + + assert quota_used_by_id == { + "matching-provider": 13, + "other-tenant": 20, + "other-provider": 30, + "custom-provider": 40, + } + + with engine.begin() as connection: + connection.execute( + Provider.__table__.update().where(Provider.id == "matching-provider").values(quota_limit=13, quota_used=13) + ) + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch("core.app.llm.quota.db", SimpleNamespace(engine=engine)), + pytest.raises(QuotaExceededError, match="Model provider openai quota exceeded."), + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + with engine.connect() as connection: + exhausted_quota_used = connection.scalar(select(Provider.quota_used).where(Provider.id == "matching-provider")) + + assert exhausted_quota_used == 13 + + +def test_deduct_llm_quota_for_model_caps_free_quota_and_raises_when_usage_exceeds_remaining() -> None: + usage = LLMUsage.empty_usage() + usage.total_tokens = 3 + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=SimpleNamespace( + current_quota_type=ProviderQuotaType.FREE, + quota_configurations=[ + SimpleNamespace( + quota_type=ProviderQuotaType.FREE, + quota_unit=QuotaUnit.TOKENS, + quota_limit=100, + ) + ], + ), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + engine = create_engine("sqlite:///:memory:") + Provider.__table__.create(engine) + with engine.begin() as connection: + connection.execute( + Provider.__table__.insert(), + { + "id": "matching-provider", + "tenant_id": "tenant-id", + "provider_name": "openai", + "provider_type": ProviderType.SYSTEM, + "quota_type": ProviderQuotaType.FREE, + "quota_limit": 15, + "quota_used": 13, + "is_valid": True, + }, + ) + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch("core.app.llm.quota.db", SimpleNamespace(engine=engine)), + pytest.raises(QuotaExceededError, match="Model provider openai quota exceeded."), + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + with engine.connect() as connection: + quota_used = connection.scalar(select(Provider.quota_used).where(Provider.id == "matching-provider")) + + assert quota_used == 15 + + +def test_deduct_llm_quota_for_model_ignores_unknown_quota_type() -> None: + usage = LLMUsage.empty_usage() + usage.total_tokens = 2 + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=SimpleNamespace( + current_quota_type="unexpected", + quota_configurations=[ + SimpleNamespace( + quota_type="unexpected", + quota_unit=QuotaUnit.TOKENS, + quota_limit=100, + ) + ], + ), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch("services.credit_pool_service.CreditPoolService.deduct_credits_capped") as mock_deduct_credits, + patch("core.app.llm.quota.sessionmaker") as mock_sessionmaker, + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + mock_deduct_credits.assert_not_called() + mock_sessionmaker.assert_not_called() + + +def test_deduct_llm_quota_for_model_ignores_custom_provider_configuration() -> None: + usage = LLMUsage.empty_usage() + usage.total_tokens = 2 + provider_configuration = SimpleNamespace( + using_provider_type=ProviderType.CUSTOM, + system_configuration=SimpleNamespace( + current_quota_type=ProviderQuotaType.TRIAL, + quota_configurations=[], + ), + ) + provider_manager = MagicMock() + provider_manager.get_configurations.return_value.get.return_value = provider_configuration + + with ( + patch("core.app.llm.quota.create_plugin_provider_manager", return_value=provider_manager), + patch("services.credit_pool_service.CreditPoolService.deduct_credits_capped") as mock_deduct_credits, + patch("core.app.llm.quota.sessionmaker") as mock_sessionmaker, + ): + deduct_llm_quota_for_model( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + mock_deduct_credits.assert_not_called() + mock_sessionmaker.assert_not_called() + + +def test_ensure_llm_quota_available_wrapper_warns_and_delegates() -> None: + model_instance = SimpleNamespace( + provider="openai", + model_name="gpt-4o", + provider_model_bundle=SimpleNamespace(configuration=SimpleNamespace(tenant_id="tenant-id")), + model_type_instance=SimpleNamespace(model_type=ModelType.LLM), + ) + + with ( + pytest.deprecated_call(match="ensure_llm_quota_available\\(model_instance=.*deprecated"), + patch("core.app.llm.quota.ensure_llm_quota_available_for_model") as mock_ensure, + ): + ensure_llm_quota_available(model_instance=model_instance) + + mock_ensure.assert_called_once_with( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + ) + + +def test_ensure_llm_quota_available_wrapper_rejects_non_llm_model_instances() -> None: + model_instance = SimpleNamespace( + provider="openai", + model_name="gpt-4o", + provider_model_bundle=SimpleNamespace(configuration=SimpleNamespace(tenant_id="tenant-id")), + model_type_instance=SimpleNamespace(model_type=ModelType.TEXT_EMBEDDING), + ) + + with ( + pytest.deprecated_call(match="ensure_llm_quota_available\\(model_instance=.*deprecated"), + pytest.raises(ValueError, match="only support LLM model instances"), + ): + ensure_llm_quota_available(model_instance=model_instance) + + +def test_deduct_llm_quota_wrapper_warns_and_delegates() -> None: + usage = LLMUsage.empty_usage() + usage.total_tokens = 7 + model_instance = SimpleNamespace( + provider="openai", + model_name="gpt-4o", + model_type_instance=SimpleNamespace(model_type=ModelType.LLM), + provider_model_bundle=SimpleNamespace(configuration=SimpleNamespace()), + ) + + with ( + pytest.deprecated_call(match="deduct_llm_quota\\(tenant_id=.*deprecated"), + patch("core.app.llm.quota.deduct_llm_quota_for_model") as mock_deduct, + ): + deduct_llm_quota( + tenant_id="tenant-id", + model_instance=model_instance, + usage=usage, + ) + + mock_deduct.assert_called_once_with( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=usage, + ) + + +def test_deduct_llm_quota_wrapper_rejects_non_llm_model_instances() -> None: + usage = LLMUsage.empty_usage() + model_instance = SimpleNamespace( + provider="openai", + model_name="gpt-4o", + model_type_instance=SimpleNamespace(model_type=ModelType.TEXT_EMBEDDING), + provider_model_bundle=SimpleNamespace(configuration=SimpleNamespace()), + ) + + with ( + pytest.deprecated_call(match="deduct_llm_quota\\(tenant_id=.*deprecated"), + pytest.raises(ValueError, match="only support LLM model instances"), + ): + deduct_llm_quota( + tenant_id="tenant-id", + model_instance=model_instance, + usage=usage, + ) diff --git a/api/tests/unit_tests/core/app/workflow/test_node_factory.py b/api/tests/unit_tests/core/app/workflow/test_node_factory.py index 7c9f174129..addce649d5 100644 --- a/api/tests/unit_tests/core/app/workflow/test_node_factory.py +++ b/api/tests/unit_tests/core/app/workflow/test_node_factory.py @@ -8,9 +8,9 @@ from graphon.enums import BuiltinNodeTypes class DummyNode: - def __init__(self, *, node_id, config, graph_init_params, graph_runtime_state, **kwargs): + def __init__(self, *, node_id, data, graph_init_params, graph_runtime_state, **kwargs): self.id = node_id - self.config = config + self.data = data self.graph_init_params = graph_init_params self.graph_runtime_state = graph_runtime_state self.kwargs = kwargs diff --git a/api/tests/unit_tests/core/app/workflow/test_persistence_layer.py b/api/tests/unit_tests/core/app/workflow/test_persistence_layer.py index 23fe682017..7e87c088ce 100644 --- a/api/tests/unit_tests/core/app/workflow/test_persistence_layer.py +++ b/api/tests/unit_tests/core/app/workflow/test_persistence_layer.py @@ -60,7 +60,10 @@ def _make_layer( workflow_execution_id="run-id", conversation_id="conv-id", ) - runtime_state = GraphRuntimeState(variable_pool=VariablePool(system_variables=system_variables), start_at=0.0) + runtime_state = GraphRuntimeState( + variable_pool=VariablePool.from_bootstrap(system_variables=system_variables), + start_at=0.0, + ) read_only_state = ReadOnlyGraphRuntimeStateWrapper(runtime_state) application_generate_entity = WorkflowAppGenerateEntity.model_construct( diff --git a/api/tests/unit_tests/core/entities/test_entities_provider_configuration.py b/api/tests/unit_tests/core/entities/test_entities_provider_configuration.py index a28143026f..1b714d6830 100644 --- a/api/tests/unit_tests/core/entities/test_entities_provider_configuration.py +++ b/api/tests/unit_tests/core/entities/test_entities_provider_configuration.py @@ -354,7 +354,8 @@ def test_validate_provider_credentials_handles_hidden_secret_value() -> None: with _patched_session(mock_session): with patch( - "core.entities.provider_configuration.create_plugin_model_provider_factory", return_value=mock_factory + "core.entities.provider_configuration.create_plugin_model_assembly", + return_value=SimpleNamespace(model_runtime=Mock(), model_provider_factory=mock_factory), ): with patch("core.entities.provider_configuration.encrypter.decrypt_token", return_value="restored-key"): with patch( @@ -379,7 +380,10 @@ def test_validate_provider_credentials_without_credential_id() -> None: mock_factory = Mock() mock_factory.provider_credentials_validate.return_value = {"region": "us"} - with patch("core.entities.provider_configuration.create_plugin_model_provider_factory", return_value=mock_factory): + with patch( + "core.entities.provider_configuration.create_plugin_model_assembly", + return_value=SimpleNamespace(model_runtime=Mock(), model_provider_factory=mock_factory), + ): validated = configuration.validate_provider_credentials(credentials={"region": "us"}) assert validated == {"region": "us"} @@ -426,23 +430,37 @@ def test_switch_preferred_provider_type_creates_record_when_missing() -> None: def test_get_model_type_instance_and_schema_delegate_to_factory() -> None: configuration = _build_provider_configuration() - mock_factory = Mock() mock_model_type_instance = Mock() mock_schema = _build_ai_model("gpt-4o") - mock_factory.get_model_type_instance.return_value = mock_model_type_instance + mock_factory = Mock() + mock_factory.get_provider_schema.return_value = configuration.provider mock_factory.get_model_schema.return_value = mock_schema + mock_assembly = Mock() + mock_assembly.model_runtime = Mock() + mock_assembly.model_provider_factory = mock_factory - with patch( - "core.entities.provider_configuration.create_plugin_model_provider_factory", - return_value=mock_factory, - ) as mock_factory_builder: + with ( + patch( + "core.entities.provider_configuration.create_plugin_model_assembly", + return_value=mock_assembly, + ) as mock_assembly_builder, + patch( + "core.entities.provider_configuration.create_model_type_instance", + return_value=mock_model_type_instance, + ) as mock_model_builder, + ): model_type_instance = configuration.get_model_type_instance(ModelType.LLM) model_schema = configuration.get_model_schema(ModelType.LLM, "gpt-4o", {"api_key": "x"}) assert model_type_instance is mock_model_type_instance assert model_schema is mock_schema - assert mock_factory_builder.call_count == 2 - mock_factory.get_model_type_instance.assert_called_once_with(provider="openai", model_type=ModelType.LLM) + assert mock_assembly_builder.call_count == 2 + mock_factory.get_provider_schema.assert_called_once_with(provider="openai") + mock_model_builder.assert_called_once_with( + runtime=mock_assembly.model_runtime, + provider_schema=configuration.provider, + model_type=ModelType.LLM, + ) mock_factory.get_model_schema.assert_called_once_with( provider="openai", model_type=ModelType.LLM, @@ -456,17 +474,21 @@ def test_get_model_type_instance_and_schema_reuse_bound_runtime_factory() -> Non bound_runtime = Mock() configuration.bind_model_runtime(bound_runtime) - mock_factory = Mock() mock_model_type_instance = Mock() mock_schema = _build_ai_model("gpt-4o") - mock_factory.get_model_type_instance.return_value = mock_model_type_instance + mock_factory = Mock() + mock_factory.get_provider_schema.return_value = configuration.provider mock_factory.get_model_schema.return_value = mock_schema with ( patch( "core.entities.provider_configuration.ModelProviderFactory", return_value=mock_factory ) as mock_factory_cls, - patch("core.entities.provider_configuration.create_plugin_model_provider_factory") as mock_factory_builder, + patch("core.entities.provider_configuration.create_plugin_model_assembly") as mock_assembly_builder, + patch( + "core.entities.provider_configuration.create_model_type_instance", + return_value=mock_model_type_instance, + ) as mock_model_builder, ): model_type_instance = configuration.get_model_type_instance(ModelType.LLM) model_schema = configuration.get_model_schema(ModelType.LLM, "gpt-4o", {"api_key": "x"}) @@ -474,8 +496,14 @@ def test_get_model_type_instance_and_schema_reuse_bound_runtime_factory() -> Non assert model_type_instance is mock_model_type_instance assert model_schema is mock_schema assert mock_factory_cls.call_count == 2 - mock_factory_cls.assert_called_with(model_runtime=bound_runtime) - mock_factory_builder.assert_not_called() + mock_factory_cls.assert_called_with(runtime=bound_runtime) + mock_assembly_builder.assert_not_called() + mock_factory.get_provider_schema.assert_called_once_with(provider="openai") + mock_model_builder.assert_called_once_with( + runtime=bound_runtime, + provider_schema=configuration.provider, + model_type=ModelType.LLM, + ) def test_get_provider_model_returns_none_when_model_not_found() -> None: @@ -504,7 +532,10 @@ def test_get_provider_models_system_deduplicates_sorts_and_filters_active() -> N mock_factory = Mock() mock_factory.get_provider_schema.return_value = provider_schema - with patch("core.entities.provider_configuration.create_plugin_model_provider_factory", return_value=mock_factory): + with patch( + "core.entities.provider_configuration.create_plugin_model_assembly", + return_value=SimpleNamespace(model_runtime=Mock(), model_provider_factory=mock_factory), + ): all_models = configuration.get_provider_models(model_type=ModelType.LLM, only_active=False) active_models = configuration.get_provider_models(model_type=ModelType.LLM, only_active=True) @@ -722,7 +753,8 @@ def test_validate_provider_credentials_handles_invalid_original_json() -> None: with _patched_session(mock_session): with patch( - "core.entities.provider_configuration.create_plugin_model_provider_factory", return_value=mock_factory + "core.entities.provider_configuration.create_plugin_model_assembly", + return_value=SimpleNamespace(model_runtime=Mock(), model_provider_factory=mock_factory), ): with patch("core.entities.provider_configuration.encrypter.encrypt_token", return_value="enc-key"): validated = configuration.validate_provider_credentials( @@ -1069,7 +1101,8 @@ def test_validate_custom_model_credentials_supports_hidden_reuse_and_sessionless with _patched_session(mock_session): with patch( - "core.entities.provider_configuration.create_plugin_model_provider_factory", return_value=mock_factory + "core.entities.provider_configuration.create_plugin_model_assembly", + return_value=SimpleNamespace(model_runtime=Mock(), model_provider_factory=mock_factory), ): with patch("core.entities.provider_configuration.encrypter.decrypt_token", return_value="raw"): with patch("core.entities.provider_configuration.encrypter.encrypt_token", return_value="enc-new"): @@ -1083,7 +1116,10 @@ def test_validate_custom_model_credentials_supports_hidden_reuse_and_sessionless mock_factory2 = Mock() mock_factory2.model_credentials_validate.return_value = {"region": "us"} - with patch("core.entities.provider_configuration.create_plugin_model_provider_factory", return_value=mock_factory2): + with patch( + "core.entities.provider_configuration.create_plugin_model_assembly", + return_value=SimpleNamespace(model_runtime=Mock(), model_provider_factory=mock_factory2), + ): validated = configuration.validate_custom_model_credentials( model_type=ModelType.LLM, model="gpt-4o", @@ -1575,7 +1611,8 @@ def test_validate_provider_credentials_uses_empty_original_when_record_missing() with _patched_session(mock_session): with patch( - "core.entities.provider_configuration.create_plugin_model_provider_factory", return_value=mock_factory + "core.entities.provider_configuration.create_plugin_model_assembly", + return_value=SimpleNamespace(model_runtime=Mock(), model_provider_factory=mock_factory), ): with patch("core.entities.provider_configuration.encrypter.encrypt_token", return_value="enc-new"): validated = configuration.validate_provider_credentials( @@ -1701,7 +1738,8 @@ def test_validate_custom_model_credentials_handles_invalid_original_json() -> No with _patched_session(mock_session): with patch( - "core.entities.provider_configuration.create_plugin_model_provider_factory", return_value=mock_factory + "core.entities.provider_configuration.create_plugin_model_assembly", + return_value=SimpleNamespace(model_runtime=Mock(), model_provider_factory=mock_factory), ): with patch("core.entities.provider_configuration.encrypter.encrypt_token", return_value="enc-new"): validated = configuration.validate_custom_model_credentials( diff --git a/api/tests/unit_tests/core/helper/test_moderation.py b/api/tests/unit_tests/core/helper/test_moderation.py index a0dfa86d20..c33002329b 100644 --- a/api/tests/unit_tests/core/helper/test_moderation.py +++ b/api/tests/unit_tests/core/helper/test_moderation.py @@ -68,8 +68,8 @@ def test_check_moderation_returns_true_when_model_accepts_text(mocker: MockerFix mocker.patch("core.helper.moderation.secrets.choice", return_value="chunk") moderation_model = SimpleNamespace(invoke=lambda **invoke_kwargs: invoke_kwargs["text"] == "chunk") - factory = SimpleNamespace(get_model_type_instance=lambda **_factory_kwargs: moderation_model) - mocker.patch("core.helper.moderation.create_plugin_model_provider_factory", return_value=factory) + assembly = SimpleNamespace(create_model_type_instance=lambda **_factory_kwargs: moderation_model) + mocker.patch("core.helper.moderation.create_plugin_model_assembly", return_value=assembly) assert ( check_moderation( @@ -91,7 +91,7 @@ def test_check_moderation_returns_true_when_text_is_empty(mocker: MockerFixture) provider_map={openai_provider: hosting_openai}, ), ) - factory_mock = mocker.patch("core.helper.moderation.create_plugin_model_provider_factory") + factory_mock = mocker.patch("core.helper.moderation.create_plugin_model_assembly") choice_mock = mocker.patch("core.helper.moderation.secrets.choice") assert ( @@ -119,8 +119,8 @@ def test_check_moderation_returns_false_when_model_rejects_text(mocker: MockerFi mocker.patch("core.helper.moderation.secrets.choice", return_value="chunk") moderation_model = SimpleNamespace(invoke=lambda **_invoke_kwargs: False) - factory = SimpleNamespace(get_model_type_instance=lambda **_factory_kwargs: moderation_model) - mocker.patch("core.helper.moderation.create_plugin_model_provider_factory", return_value=factory) + assembly = SimpleNamespace(create_model_type_instance=lambda **_factory_kwargs: moderation_model) + mocker.patch("core.helper.moderation.create_plugin_model_assembly", return_value=assembly) assert ( check_moderation( @@ -147,8 +147,8 @@ def test_check_moderation_raises_bad_request_when_provider_call_fails(mocker: Mo failing_model = SimpleNamespace( invoke=lambda **_invoke_kwargs: (_ for _ in ()).throw(RuntimeError("boom")), ) - factory = SimpleNamespace(get_model_type_instance=lambda **_factory_kwargs: failing_model) - mocker.patch("core.helper.moderation.create_plugin_model_provider_factory", return_value=factory) + assembly = SimpleNamespace(create_model_type_instance=lambda **_factory_kwargs: failing_model) + mocker.patch("core.helper.moderation.create_plugin_model_assembly", return_value=assembly) with pytest.raises(InvokeBadRequestError, match="Rate limit exceeded, please try again later."): check_moderation( diff --git a/api/tests/unit_tests/core/model_runtime/test_model_provider_factory.py b/api/tests/unit_tests/core/model_runtime/test_model_provider_factory.py index c4fd970562..2b51dc8182 100644 --- a/api/tests/unit_tests/core/model_runtime/test_model_provider_factory.py +++ b/api/tests/unit_tests/core/model_runtime/test_model_provider_factory.py @@ -2,6 +2,7 @@ from unittest.mock import Mock import pytest +from core.plugin.impl.model_runtime_factory import create_model_type_instance from graphon.model_runtime.entities.common_entities import I18nObject from graphon.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType from graphon.model_runtime.entities.provider_entities import ( @@ -73,7 +74,7 @@ def test_model_provider_factory_resolves_runtime_provider_name() -> None: supported_model_types=[ModelType.LLM], configurate_methods=[ConfigurateMethod.PREDEFINED_MODEL], ) - factory = ModelProviderFactory(model_runtime=_FakeModelRuntime([provider])) + factory = ModelProviderFactory(runtime=_FakeModelRuntime([provider])) provider_schema = factory.get_model_provider("openai") @@ -98,7 +99,7 @@ def test_model_provider_factory_resolves_canonical_short_name_independent_of_pro configurate_methods=[ConfigurateMethod.PREDEFINED_MODEL], ), ] - factory = ModelProviderFactory(model_runtime=_FakeModelRuntime(providers)) + factory = ModelProviderFactory(runtime=_FakeModelRuntime(providers)) provider_schema = factory.get_model_provider("openai") @@ -107,8 +108,8 @@ def test_model_provider_factory_resolves_canonical_short_name_independent_of_pro def test_model_provider_factory_requires_runtime() -> None: - with pytest.raises(ValueError, match="model_runtime is required"): - ModelProviderFactory(model_runtime=None) # type: ignore[arg-type] + with pytest.raises(ValueError, match="runtime is required"): + ModelProviderFactory(runtime=None) # type: ignore[arg-type] def test_model_provider_factory_get_providers_returns_runtime_providers() -> None: @@ -119,7 +120,7 @@ def test_model_provider_factory_get_providers_returns_runtime_providers() -> Non supported_model_types=[ModelType.LLM], ) ] - factory = ModelProviderFactory(model_runtime=_FakeModelRuntime(providers)) + factory = ModelProviderFactory(runtime=_FakeModelRuntime(providers)) result = factory.get_providers() @@ -133,7 +134,7 @@ def test_model_provider_factory_get_provider_schema_delegates_to_provider_lookup provider_name="openai", supported_model_types=[ModelType.LLM], ) - factory = ModelProviderFactory(model_runtime=_FakeModelRuntime([provider])) + factory = ModelProviderFactory(runtime=_FakeModelRuntime([provider])) result = factory.get_provider_schema("openai") @@ -142,7 +143,7 @@ def test_model_provider_factory_get_provider_schema_delegates_to_provider_lookup def test_model_provider_factory_raises_for_unknown_provider() -> None: factory = ModelProviderFactory( - model_runtime=_FakeModelRuntime( + runtime=_FakeModelRuntime( [ _build_provider( provider="langgenius/openai/openai", @@ -172,7 +173,7 @@ def test_model_provider_factory_get_models_filters_provider_and_model_type() -> models=[_build_model("rerank-v3", ModelType.RERANK)], ), ] - factory = ModelProviderFactory(model_runtime=_FakeModelRuntime(providers)) + factory = ModelProviderFactory(runtime=_FakeModelRuntime(providers)) results = factory.get_models(provider="openai", model_type=ModelType.LLM) @@ -196,7 +197,7 @@ def test_model_provider_factory_get_models_skips_providers_without_requested_mod models=[_build_model("eleven_multilingual_v2", ModelType.TTS)], ), ] - factory = ModelProviderFactory(model_runtime=_FakeModelRuntime(providers)) + factory = ModelProviderFactory(runtime=_FakeModelRuntime(providers)) results = factory.get_models(model_type=ModelType.TTS) @@ -214,7 +215,7 @@ def test_model_provider_factory_get_models_without_model_type_keeps_all_provider models=[_build_model("gpt-4o-mini", ModelType.LLM), _build_model("tts-1", ModelType.TTS)], ) ] - factory = ModelProviderFactory(model_runtime=_FakeModelRuntime(providers)) + factory = ModelProviderFactory(runtime=_FakeModelRuntime(providers)) results = factory.get_models(provider="openai") @@ -242,7 +243,7 @@ def test_model_provider_factory_validates_provider_credentials() -> None: ) ] ) - factory = ModelProviderFactory(model_runtime=runtime) + factory = ModelProviderFactory(runtime=runtime) filtered = factory.provider_credentials_validate( provider="openai", @@ -258,7 +259,7 @@ def test_model_provider_factory_validates_provider_credentials() -> None: def test_model_provider_factory_provider_credentials_validate_requires_schema() -> None: factory = ModelProviderFactory( - model_runtime=_FakeModelRuntime( + runtime=_FakeModelRuntime( [ _build_provider( provider="langgenius/openai/openai", @@ -294,7 +295,7 @@ def test_model_provider_factory_validates_model_credentials() -> None: ) ] ) - factory = ModelProviderFactory(model_runtime=runtime) + factory = ModelProviderFactory(runtime=runtime) filtered = factory.model_credentials_validate( provider="openai", @@ -314,7 +315,7 @@ def test_model_provider_factory_validates_model_credentials() -> None: def test_model_provider_factory_model_credentials_validate_requires_schema() -> None: factory = ModelProviderFactory( - model_runtime=_FakeModelRuntime( + runtime=_FakeModelRuntime( [ _build_provider( provider="langgenius/openai/openai", @@ -346,7 +347,7 @@ def test_model_provider_factory_get_model_schema_and_icon_use_canonical_provider ) runtime.get_model_schema.return_value = "schema" runtime.get_provider_icon.return_value = (b"icon", "image/png") - factory = ModelProviderFactory(model_runtime=runtime) + factory = ModelProviderFactory(runtime=runtime) assert ( factory.get_model_schema( @@ -382,39 +383,43 @@ def test_model_provider_factory_get_model_schema_and_icon_use_canonical_provider (ModelType.TTS, TTSModel), ], ) -def test_model_provider_factory_builds_model_type_instances( +def test_create_model_type_instance_builds_model_wrappers( model_type: ModelType, expected_type: type[object], ) -> None: - factory = ModelProviderFactory( - model_runtime=_FakeModelRuntime( - [ - _build_provider( - provider="langgenius/openai/openai", - provider_name="openai", - supported_model_types=[model_type], - ) - ] - ) + runtime = _FakeModelRuntime( + [ + _build_provider( + provider="langgenius/openai/openai", + provider_name="openai", + supported_model_types=[model_type], + ) + ] ) - instance = factory.get_model_type_instance("openai", model_type) + instance = create_model_type_instance( + runtime=runtime, + provider_schema=runtime.fetch_model_providers()[0], + model_type=model_type, + ) assert isinstance(instance, expected_type) -def test_model_provider_factory_rejects_unsupported_model_type() -> None: - factory = ModelProviderFactory( - model_runtime=_FakeModelRuntime( - [ - _build_provider( - provider="langgenius/openai/openai", - provider_name="openai", - supported_model_types=[ModelType.LLM], - ) - ] - ) +def test_create_model_type_instance_rejects_unsupported_model_type() -> None: + runtime = _FakeModelRuntime( + [ + _build_provider( + provider="langgenius/openai/openai", + provider_name="openai", + supported_model_types=[ModelType.LLM], + ) + ] ) with pytest.raises(ValueError, match="Unsupported model type: unsupported"): - factory.get_model_type_instance("openai", "unsupported") # type: ignore[arg-type] + create_model_type_instance( + runtime=runtime, + provider_schema=runtime.fetch_model_providers()[0], + model_type="unsupported", # type: ignore[arg-type] + ) diff --git a/api/tests/unit_tests/core/plugin/impl/test_model_runtime_factory.py b/api/tests/unit_tests/core/plugin/impl/test_model_runtime_factory.py index 7491e79f30..52da674f06 100644 --- a/api/tests/unit_tests/core/plugin/impl/test_model_runtime_factory.py +++ b/api/tests/unit_tests/core/plugin/impl/test_model_runtime_factory.py @@ -31,6 +31,6 @@ def test_plugin_model_assembly_reuses_single_runtime_across_views(): assert assembly.model_manager is model_manager mock_runtime_factory.assert_called_once_with(tenant_id="tenant-1", user_id="user-1") - mock_provider_factory_cls.assert_called_once_with(model_runtime=runtime) + mock_provider_factory_cls.assert_called_once_with(runtime=runtime) mock_provider_manager_cls.assert_called_once_with(model_runtime=runtime) mock_model_manager_cls.assert_called_once_with(provider_manager=provider_manager) diff --git a/api/tests/unit_tests/core/plugin/test_model_runtime_adapter.py b/api/tests/unit_tests/core/plugin/test_model_runtime_adapter.py index 88bf555594..b1ecaa4ead 100644 --- a/api/tests/unit_tests/core/plugin/test_model_runtime_adapter.py +++ b/api/tests/unit_tests/core/plugin/test_model_runtime_adapter.py @@ -3,7 +3,7 @@ import datetime import uuid from types import SimpleNamespace -from unittest.mock import Mock, sentinel +from unittest.mock import Mock, patch, sentinel import pytest @@ -13,6 +13,8 @@ from core.plugin.impl.model import PluginModelClient from core.plugin.impl.model_runtime import TENANT_SCOPE_SCHEMA_CACHE_USER_ID, PluginModelRuntime from core.plugin.impl.model_runtime_factory import create_plugin_model_runtime from graphon.model_runtime.entities.common_entities import I18nObject +from graphon.model_runtime.entities.llm_entities import LLMResultChunk, LLMResultChunkDelta, LLMUsage +from graphon.model_runtime.entities.message_entities import AssistantPromptMessage from graphon.model_runtime.entities.model_entities import AIModelEntity, FetchFrom, ModelType from graphon.model_runtime.entities.provider_entities import ConfigurateMethod, ProviderEntity @@ -146,7 +148,31 @@ class TestPluginModelRuntime: def test_invoke_llm_resolves_plugin_fields(self) -> None: client = Mock(spec=PluginModelClient) - client.invoke_llm.return_value = sentinel.result + usage = LLMUsage.empty_usage() + client.invoke_llm.return_value = iter( + [ + LLMResultChunk( + model="gpt-4o-mini", + prompt_messages=[], + system_fingerprint="fp-plugin", + delta=LLMResultChunkDelta( + index=0, + message=AssistantPromptMessage(content="plugin "), + ), + ), + LLMResultChunk( + model="gpt-4o-mini", + prompt_messages=[], + system_fingerprint="fp-plugin", + delta=LLMResultChunkDelta( + index=1, + message=AssistantPromptMessage(content="response"), + usage=usage, + finish_reason="stop", + ), + ), + ] + ) runtime = PluginModelRuntime(tenant_id="tenant", user_id="user", client=client) result = runtime.invoke_llm( @@ -160,7 +186,11 @@ class TestPluginModelRuntime: stream=False, ) - assert result is sentinel.result + assert result.model == "gpt-4o-mini" + assert result.prompt_messages == [] + assert result.message.content == "plugin response" + assert result.usage == usage + assert result.system_fingerprint == "fp-plugin" client.invoke_llm.assert_called_once_with( tenant_id="tenant", user_id="user", @@ -175,6 +205,38 @@ class TestPluginModelRuntime: stream=False, ) + def test_invoke_llm_returns_plugin_stream_directly(self) -> None: + client = Mock(spec=PluginModelClient) + stream_result = iter([]) + client.invoke_llm.return_value = stream_result + runtime = PluginModelRuntime(tenant_id="tenant", user_id="user", client=client) + + result = runtime.invoke_llm( + provider="langgenius/openai/openai", + model="gpt-4o-mini", + credentials={"api_key": "secret"}, + model_parameters={"temperature": 0.3}, + prompt_messages=[], + tools=None, + stop=("END",), + stream=True, + ) + + assert result is stream_result + client.invoke_llm.assert_called_once_with( + tenant_id="tenant", + user_id="user", + plugin_id="langgenius/openai", + provider="openai", + model="gpt-4o-mini", + credentials={"api_key": "secret"}, + model_parameters={"temperature": 0.3}, + prompt_messages=[], + tools=None, + stop=["END"], + stream=True, + ) + def test_invoke_llm_rejects_per_call_user_override(self) -> None: client = Mock(spec=PluginModelClient) client.invoke_llm.return_value = sentinel.result @@ -267,6 +329,129 @@ def test_get_model_schema_uses_cached_schema_without_hitting_client(monkeypatch: client.get_model_schema.assert_not_called() +def test_structured_output_adapter_invokes_bound_runtime_streaming() -> None: + runtime = Mock() + runtime.invoke_llm.return_value = sentinel.stream_result + adapter = model_runtime_module._PluginStructuredOutputModelInstance( + runtime=runtime, + provider="langgenius/openai/openai", + model="gpt-4o-mini", + credentials={"api_key": "secret"}, + ) + tool = Mock() + + result = adapter.invoke_llm( + prompt_messages=[], + model_parameters=None, + tools=[tool], + stop=["END"], + stream=True, + callbacks=sentinel.callbacks, + ) + + assert result is sentinel.stream_result + runtime.invoke_llm.assert_called_once_with( + provider="langgenius/openai/openai", + model="gpt-4o-mini", + credentials={"api_key": "secret"}, + model_parameters={}, + prompt_messages=[], + tools=[tool], + stop=["END"], + stream=True, + ) + + +def test_structured_output_adapter_invokes_bound_runtime_non_streaming() -> None: + runtime = Mock() + runtime.invoke_llm.return_value = sentinel.result + adapter = model_runtime_module._PluginStructuredOutputModelInstance( + runtime=runtime, + provider="langgenius/openai/openai", + model="gpt-4o-mini", + credentials={"api_key": "secret"}, + ) + + result = adapter.invoke_llm( + prompt_messages=[], + model_parameters={"temperature": 0}, + tools=None, + stop=None, + stream=False, + ) + + assert result is sentinel.result + runtime.invoke_llm.assert_called_once_with( + provider="langgenius/openai/openai", + model="gpt-4o-mini", + credentials={"api_key": "secret"}, + model_parameters={"temperature": 0}, + prompt_messages=[], + tools=None, + stop=None, + stream=False, + ) + + +def test_invoke_llm_with_structured_output_delegates_with_bound_adapter() -> None: + client = Mock(spec=PluginModelClient) + runtime = PluginModelRuntime(tenant_id="tenant", user_id="user", client=client) + schema = _build_model_schema() + runtime.get_model_schema = Mock(return_value=schema) # type: ignore[method-assign] + + with patch.object( + model_runtime_module, + "invoke_llm_with_structured_output_helper", + return_value=sentinel.structured_result, + ) as mock_helper: + result = runtime.invoke_llm_with_structured_output( + provider="langgenius/openai/openai", + model="gpt-4o-mini", + credentials={"api_key": "secret"}, + json_schema={"type": "object"}, + model_parameters={"temperature": 0}, + prompt_messages=[], + stop=("END",), + stream=False, + ) + + assert result is sentinel.structured_result + runtime.get_model_schema.assert_called_once_with( + provider="langgenius/openai/openai", + model_type=ModelType.LLM, + model="gpt-4o-mini", + credentials={"api_key": "secret"}, + ) + helper_kwargs = mock_helper.call_args.kwargs + assert helper_kwargs["provider"] == "langgenius/openai/openai" + assert helper_kwargs["model_schema"] == schema + assert helper_kwargs["json_schema"] == {"type": "object"} + assert helper_kwargs["model_parameters"] == {"temperature": 0} + assert helper_kwargs["prompt_messages"] == [] + assert helper_kwargs["tools"] is None + assert helper_kwargs["stop"] == ["END"] + assert helper_kwargs["stream"] is False + assert isinstance(helper_kwargs["model_instance"], model_runtime_module._PluginStructuredOutputModelInstance) + + +def test_invoke_llm_with_structured_output_raises_when_model_schema_is_missing() -> None: + client = Mock(spec=PluginModelClient) + runtime = PluginModelRuntime(tenant_id="tenant", user_id="user", client=client) + runtime.get_model_schema = Mock(return_value=None) # type: ignore[method-assign] + + with pytest.raises(ValueError, match="Model schema not found for gpt-4o-mini"): + runtime.invoke_llm_with_structured_output( + provider="langgenius/openai/openai", + model="gpt-4o-mini", + credentials={"api_key": "secret"}, + json_schema={"type": "object"}, + model_parameters={}, + prompt_messages=[], + stop=None, + stream=False, + ) + + def test_get_model_schema_deletes_invalid_cache_and_refetches(monkeypatch: pytest.MonkeyPatch) -> None: client = Mock(spec=PluginModelClient) schema = _build_model_schema() diff --git a/api/tests/unit_tests/core/test_provider_manager.py b/api/tests/unit_tests/core/test_provider_manager.py index 02f12fb3b4..e84fcba3d9 100644 --- a/api/tests/unit_tests/core/test_provider_manager.py +++ b/api/tests/unit_tests/core/test_provider_manager.py @@ -289,7 +289,7 @@ def test_get_default_model_uses_injected_runtime_for_existing_default_record(moc result = manager.get_default_model("tenant-id", ModelType.LLM) - mock_factory_cls.assert_called_once_with(model_runtime=manager._model_runtime) + mock_factory_cls.assert_called_once_with(runtime=manager._model_runtime) assert result is not None assert result.model == "gpt-4" assert result.provider.provider == "openai" @@ -316,7 +316,7 @@ def test_get_configurations_uses_injected_runtime_and_adds_provider_aliases(mock result = manager.get_configurations("tenant-id") expected_alias = str(ModelProviderID("openai")) - mock_factory_cls.assert_called_once_with(model_runtime=manager._model_runtime) + mock_factory_cls.assert_called_once_with(runtime=manager._model_runtime) assert result.tenant_id == "tenant-id" assert expected_alias in provider_records assert expected_alias in provider_model_records @@ -402,7 +402,7 @@ def test_get_configurations_reuses_cached_result_for_same_tenant(mocker: MockerF assert first is second mock_get_all_providers.assert_called_once_with("tenant-id") - mock_factory_cls.assert_called_once_with(model_runtime=manager._model_runtime) + mock_factory_cls.assert_called_once_with(runtime=manager._model_runtime) mock_provider_configuration.assert_called_once() provider_configuration.bind_model_runtime.assert_called_once_with(manager._model_runtime) diff --git a/api/tests/unit_tests/core/workflow/graph_engine/layers/test_llm_quota.py b/api/tests/unit_tests/core/workflow/graph_engine/layers/test_llm_quota.py index 5d6667257f..12c7f8113c 100644 --- a/api/tests/unit_tests/core/workflow/graph_engine/layers/test_llm_quota.py +++ b/api/tests/unit_tests/core/workflow/graph_engine/layers/test_llm_quota.py @@ -1,12 +1,11 @@ +import logging import threading from datetime import datetime from types import SimpleNamespace from unittest.mock import MagicMock, patch -from core.app.entities.app_invoke_entities import DifyRunContext, InvokeFrom, UserFrom from core.app.workflow.layers.llm_quota import LLMQuotaLayer from core.errors.error import QuotaExceededError -from core.model_manager import ModelInstance from graphon.enums import BuiltinNodeTypes, WorkflowNodeExecutionStatus from graphon.graph_engine.entities.commands import CommandType from graphon.graph_events import NodeRunSucceededEvent @@ -14,17 +13,7 @@ from graphon.model_runtime.entities.llm_entities import LLMUsage from graphon.node_events import NodeRunResult -def _build_dify_context() -> DifyRunContext: - return DifyRunContext( - tenant_id="tenant-id", - app_id="app-id", - user_id="user-id", - user_from=UserFrom.ACCOUNT, - invoke_from=InvokeFrom.DEBUGGER, - ) - - -def _build_succeeded_event() -> NodeRunSucceededEvent: +def _build_succeeded_event(*, provider: str = "openai", model_name: str = "gpt-4o") -> NodeRunSucceededEvent: return NodeRunSucceededEvent( id="execution-id", node_id="llm-node-id", @@ -32,113 +21,162 @@ def _build_succeeded_event() -> NodeRunSucceededEvent: start_at=datetime.now(), node_run_result=NodeRunResult( status=WorkflowNodeExecutionStatus.SUCCEEDED, - inputs={"question": "hello"}, + inputs={ + "question": "hello", + "model_provider": provider, + "model_name": model_name, + }, llm_usage=LLMUsage.empty_usage(), ), ) -def _build_wrapped_model_instance() -> tuple[SimpleNamespace, ModelInstance]: - raw_model_instance = ModelInstance.__new__(ModelInstance) - return SimpleNamespace(_model_instance=raw_model_instance), raw_model_instance +def _build_public_model_identity(*, provider: str = "openai", model_name: str = "gpt-4o") -> SimpleNamespace: + return SimpleNamespace(provider=provider, name=model_name) + + +def _build_node_data(*, model: SimpleNamespace | None = None) -> SimpleNamespace: + return SimpleNamespace( + error_strategy=None, + retry_config=SimpleNamespace(retry_enabled=False), + model=model, + ) + + +def _build_node(*, node_type: BuiltinNodeTypes = BuiltinNodeTypes.LLM) -> MagicMock: + node = MagicMock() + node.id = "node-id" + node.execution_id = "execution-id" + node.node_type = node_type + node.node_data = _build_node_data(model=_build_public_model_identity()) + node.model_instance = SimpleNamespace(provider="stale-provider", model_name="stale-model") + return node + + +class _RunnableQuotaNode: + id = "node-id" + execution_id = "execution-id" + node_type = BuiltinNodeTypes.LLM + title = "LLM node" + + def __init__(self, *, stop_event: threading.Event, node_data: SimpleNamespace | None = None) -> None: + self.node_data = node_data or _build_node_data(model=_build_public_model_identity()) + self.graph_runtime_state = SimpleNamespace(stop_event=stop_event) + self.original_run_called = False + + def _run(self) -> NodeRunResult: + self.original_run_called = True + return NodeRunResult(status=WorkflowNodeExecutionStatus.SUCCEEDED) def test_deduct_quota_called_for_successful_llm_node() -> None: - layer = LLMQuotaLayer() - node = MagicMock() - node.id = "llm-node-id" - node.execution_id = "execution-id" - node.node_type = BuiltinNodeTypes.LLM - node.tenant_id = "tenant-id" - node.require_run_context_value.return_value = _build_dify_context() - node.model_instance, raw_model_instance = _build_wrapped_model_instance() - + layer = LLMQuotaLayer(tenant_id="tenant-id") + node = _build_node(node_type=BuiltinNodeTypes.LLM) result_event = _build_succeeded_event() - with patch("core.app.workflow.layers.llm_quota.deduct_llm_quota", autospec=True) as mock_deduct: + + with patch("core.app.workflow.layers.llm_quota.deduct_llm_quota_for_model", autospec=True) as mock_deduct: layer.on_node_run_end(node=node, error=None, result_event=result_event) mock_deduct.assert_called_once_with( tenant_id="tenant-id", - model_instance=raw_model_instance, + provider="openai", + model="gpt-4o", usage=result_event.node_run_result.llm_usage, ) def test_deduct_quota_called_for_question_classifier_node() -> None: - layer = LLMQuotaLayer() - node = MagicMock() - node.id = "question-classifier-node-id" - node.execution_id = "execution-id" - node.node_type = BuiltinNodeTypes.QUESTION_CLASSIFIER - node.tenant_id = "tenant-id" - node.require_run_context_value.return_value = _build_dify_context() - node.model_instance, raw_model_instance = _build_wrapped_model_instance() + layer = LLMQuotaLayer(tenant_id="tenant-id") + node = _build_node(node_type=BuiltinNodeTypes.QUESTION_CLASSIFIER) + result_event = _build_succeeded_event(provider="anthropic", model_name="claude-3-7-sonnet") - result_event = _build_succeeded_event() - with patch("core.app.workflow.layers.llm_quota.deduct_llm_quota", autospec=True) as mock_deduct: + with patch("core.app.workflow.layers.llm_quota.deduct_llm_quota_for_model", autospec=True) as mock_deduct: layer.on_node_run_end(node=node, error=None, result_event=result_event) mock_deduct.assert_called_once_with( tenant_id="tenant-id", - model_instance=raw_model_instance, + provider="anthropic", + model="claude-3-7-sonnet", usage=result_event.node_run_result.llm_usage, ) def test_non_llm_node_is_ignored() -> None: - layer = LLMQuotaLayer() - node = MagicMock() - node.id = "start-node-id" - node.execution_id = "execution-id" - node.node_type = BuiltinNodeTypes.START - node.tenant_id = "tenant-id" - node.require_run_context_value.return_value = _build_dify_context() - node._model_instance = object() - + layer = LLMQuotaLayer(tenant_id="tenant-id") + node = _build_node(node_type=BuiltinNodeTypes.START) result_event = _build_succeeded_event() - with patch("core.app.workflow.layers.llm_quota.deduct_llm_quota", autospec=True) as mock_deduct: + + with patch("core.app.workflow.layers.llm_quota.deduct_llm_quota_for_model", autospec=True) as mock_deduct: layer.on_node_run_end(node=node, error=None, result_event=result_event) mock_deduct.assert_not_called() -def test_quota_error_is_handled_in_layer() -> None: - layer = LLMQuotaLayer() - node = MagicMock() - node.id = "llm-node-id" - node.execution_id = "execution-id" - node.node_type = BuiltinNodeTypes.LLM - node.tenant_id = "tenant-id" - node.require_run_context_value.return_value = _build_dify_context() - node.model_instance = object() +def test_precheck_ignores_non_quota_node() -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") + node = _build_node(node_type=BuiltinNodeTypes.START) - result_event = _build_succeeded_event() - with patch( - "core.app.workflow.layers.llm_quota.deduct_llm_quota", - autospec=True, - side_effect=ValueError("quota exceeded"), - ): - layer.on_node_run_end(node=node, error=None, result_event=result_event) + with patch("core.app.workflow.layers.llm_quota.ensure_llm_quota_available_for_model", autospec=True) as mock_check: + layer.on_node_run_start(node) + + mock_check.assert_not_called() -def test_quota_deduction_exceeded_aborts_workflow_immediately() -> None: - layer = LLMQuotaLayer() +def test_quota_error_is_handled_in_layer(caplog) -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") stop_event = threading.Event() layer.command_channel = MagicMock() - node = MagicMock() - node.id = "llm-node-id" - node.execution_id = "execution-id" - node.node_type = BuiltinNodeTypes.LLM - node.tenant_id = "tenant-id" - node.require_run_context_value.return_value = _build_dify_context() - node.model_instance, _ = _build_wrapped_model_instance() + node = _build_node(node_type=BuiltinNodeTypes.LLM) + node.graph_runtime_state = MagicMock() + node.graph_runtime_state.stop_event = stop_event + result_event = _build_succeeded_event() + + with ( + caplog.at_level(logging.ERROR, logger="core.app.workflow.layers.llm_quota"), + patch( + "core.app.workflow.layers.llm_quota.deduct_llm_quota_for_model", + autospec=True, + side_effect=ValueError("quota exceeded"), + ) as mock_deduct, + ): + layer.on_node_run_end(node=node, error=None, result_event=result_event) + + mock_deduct.assert_called_once_with( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + usage=result_event.node_run_result.llm_usage, + ) + assert "LLM quota deduction failed, node_id=node-id" in caplog.text + assert not stop_event.is_set() + layer.command_channel.send_command.assert_not_called() + + +def test_send_abort_command_is_noop_without_channel_or_after_abort() -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") + + layer._send_abort_command(reason="no channel") + + layer.command_channel = MagicMock() + layer._abort_sent = True + layer._send_abort_command(reason="already aborted") + + layer.command_channel.send_command.assert_not_called() + + +def test_quota_deduction_exceeded_aborts_workflow_immediately() -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") + stop_event = threading.Event() + layer.command_channel = MagicMock() + + node = _build_node(node_type=BuiltinNodeTypes.LLM) node.graph_runtime_state = MagicMock() node.graph_runtime_state.stop_event = stop_event result_event = _build_succeeded_event() with patch( - "core.app.workflow.layers.llm_quota.deduct_llm_quota", + "core.app.workflow.layers.llm_quota.deduct_llm_quota_for_model", autospec=True, side_effect=QuotaExceededError("No credits remaining"), ): @@ -152,19 +190,16 @@ def test_quota_deduction_exceeded_aborts_workflow_immediately() -> None: def test_quota_precheck_failure_aborts_workflow_immediately() -> None: - layer = LLMQuotaLayer() + layer = LLMQuotaLayer(tenant_id="tenant-id") stop_event = threading.Event() layer.command_channel = MagicMock() - node = MagicMock() - node.id = "llm-node-id" - node.node_type = BuiltinNodeTypes.LLM - node.model_instance, _ = _build_wrapped_model_instance() + node = _build_node(node_type=BuiltinNodeTypes.LLM) node.graph_runtime_state = MagicMock() node.graph_runtime_state.stop_event = stop_event with patch( - "core.app.workflow.layers.llm_quota.ensure_llm_quota_available", + "core.app.workflow.layers.llm_quota.ensure_llm_quota_available_for_model", autospec=True, side_effect=QuotaExceededError("Model provider openai quota exceeded."), ): @@ -177,21 +212,140 @@ def test_quota_precheck_failure_aborts_workflow_immediately() -> None: assert abort_command.reason == "Model provider openai quota exceeded." -def test_quota_precheck_passes_without_abort() -> None: - layer = LLMQuotaLayer() +def test_quota_precheck_failure_blocks_current_node_run() -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") stop_event = threading.Event() layer.command_channel = MagicMock() - node = MagicMock() - node.id = "llm-node-id" - node.node_type = BuiltinNodeTypes.LLM - node.model_instance, raw_model_instance = _build_wrapped_model_instance() + node = _RunnableQuotaNode(stop_event=stop_event) + + with patch( + "core.app.workflow.layers.llm_quota.ensure_llm_quota_available_for_model", + autospec=True, + side_effect=QuotaExceededError("Model provider openai quota exceeded."), + ): + layer.on_node_run_start(node) + + result = node._run() + assert not node.original_run_called + assert result.status == WorkflowNodeExecutionStatus.FAILED + assert result.error == "Model provider openai quota exceeded." + assert result.error_type == QuotaExceededError.__name__ + + +def test_missing_model_identity_blocks_current_node_run() -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") + stop_event = threading.Event() + layer.command_channel = MagicMock() + + node = _RunnableQuotaNode(stop_event=stop_event, node_data=_build_node_data()) + + with patch("core.app.workflow.layers.llm_quota.ensure_llm_quota_available_for_model", autospec=True) as mock_check: + layer.on_node_run_start(node) + + result = node._run() + assert not node.original_run_called + assert result.status == WorkflowNodeExecutionStatus.FAILED + assert result.error == "LLM quota check requires public node model identity before execution." + assert result.error_type == "LLMQuotaIdentityError" + mock_check.assert_not_called() + + +def test_quota_precheck_passes_without_abort() -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") + stop_event = threading.Event() + layer.command_channel = MagicMock() + + node = _build_node(node_type=BuiltinNodeTypes.LLM) node.graph_runtime_state = MagicMock() node.graph_runtime_state.stop_event = stop_event - with patch("core.app.workflow.layers.llm_quota.ensure_llm_quota_available", autospec=True) as mock_check: + with patch("core.app.workflow.layers.llm_quota.ensure_llm_quota_available_for_model", autospec=True) as mock_check: layer.on_node_run_start(node) assert not stop_event.is_set() - mock_check.assert_called_once_with(model_instance=raw_model_instance) + mock_check.assert_called_once_with( + tenant_id="tenant-id", + provider="openai", + model="gpt-4o", + ) layer.command_channel.send_command.assert_not_called() + + +def test_precheck_reads_model_identity_from_data_when_node_data_is_absent() -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") + node = SimpleNamespace( + id="node-id", + node_type=BuiltinNodeTypes.LLM, + data=_build_node_data(model=_build_public_model_identity(provider="anthropic", model_name="claude")), + ) + + with patch("core.app.workflow.layers.llm_quota.ensure_llm_quota_available_for_model", autospec=True) as mock_check: + layer.on_node_run_start(node) + + mock_check.assert_called_once_with( + tenant_id="tenant-id", + provider="anthropic", + model="claude", + ) + + +def test_precheck_rejects_invalid_public_model_identity() -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") + stop_event = threading.Event() + layer.command_channel = MagicMock() + + node = _build_node(node_type=BuiltinNodeTypes.LLM) + node.node_data = _build_node_data(model=_build_public_model_identity(provider="", model_name="gpt-4o")) + node.graph_runtime_state = MagicMock() + node.graph_runtime_state.stop_event = stop_event + + with patch("core.app.workflow.layers.llm_quota.ensure_llm_quota_available_for_model", autospec=True) as mock_check: + layer.on_node_run_start(node) + + assert stop_event.is_set() + mock_check.assert_not_called() + layer.command_channel.send_command.assert_called_once() + + +def test_precheck_requires_public_node_model_config() -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") + stop_event = threading.Event() + layer.command_channel = MagicMock() + + node = _build_node(node_type=BuiltinNodeTypes.LLM) + node.node_data = _build_node_data() + node.graph_runtime_state = MagicMock() + node.graph_runtime_state.stop_event = stop_event + + with patch("core.app.workflow.layers.llm_quota.ensure_llm_quota_available_for_model", autospec=True) as mock_check: + layer.on_node_run_start(node) + + assert stop_event.is_set() + mock_check.assert_not_called() + layer.command_channel.send_command.assert_called_once() + abort_command = layer.command_channel.send_command.call_args.args[0] + assert abort_command.command_type == CommandType.ABORT + assert abort_command.reason == "LLM quota check requires public node model identity before execution." + + +def test_deduction_requires_public_event_model_identity() -> None: + layer = LLMQuotaLayer(tenant_id="tenant-id") + stop_event = threading.Event() + layer.command_channel = MagicMock() + + node = _build_node(node_type=BuiltinNodeTypes.LLM) + node.graph_runtime_state = MagicMock() + node.graph_runtime_state.stop_event = stop_event + result_event = _build_succeeded_event() + result_event.node_run_result.inputs = {"question": "hello"} + + with patch("core.app.workflow.layers.llm_quota.deduct_llm_quota_for_model", autospec=True) as mock_deduct: + layer.on_node_run_end(node=node, error=None, result_event=result_event) + + assert stop_event.is_set() + mock_deduct.assert_not_called() + layer.command_channel.send_command.assert_called_once() + abort_command = layer.command_channel.send_command.call_args.args[0] + assert abort_command.command_type == CommandType.ABORT + assert abort_command.reason == "LLM quota deduction requires model identity in the node result event." diff --git a/api/tests/unit_tests/core/workflow/graph_engine/test_mock_factory.py b/api/tests/unit_tests/core/workflow/graph_engine/test_mock_factory.py index 9f3e3b00b9..c721c7b0eb 100644 --- a/api/tests/unit_tests/core/workflow/graph_engine/test_mock_factory.py +++ b/api/tests/unit_tests/core/workflow/graph_engine/test_mock_factory.py @@ -96,7 +96,7 @@ class MockNodeFactory(DifyNodeFactory): if node_type == BuiltinNodeTypes.CODE: mock_instance = mock_class( node_id=node_id, - config=resolved_node_data, + data=resolved_node_data, graph_init_params=self.graph_init_params, graph_runtime_state=self.graph_runtime_state, mock_config=self.mock_config, @@ -106,7 +106,7 @@ class MockNodeFactory(DifyNodeFactory): elif node_type == BuiltinNodeTypes.HTTP_REQUEST: mock_instance = mock_class( node_id=node_id, - config=resolved_node_data, + data=resolved_node_data, graph_init_params=self.graph_init_params, graph_runtime_state=self.graph_runtime_state, mock_config=self.mock_config, @@ -122,7 +122,7 @@ class MockNodeFactory(DifyNodeFactory): }: mock_instance = mock_class( node_id=node_id, - config=resolved_node_data, + data=resolved_node_data, graph_init_params=self.graph_init_params, graph_runtime_state=self.graph_runtime_state, mock_config=self.mock_config, @@ -132,7 +132,7 @@ class MockNodeFactory(DifyNodeFactory): else: mock_instance = mock_class( node_id=node_id, - config=resolved_node_data, + data=resolved_node_data, graph_init_params=self.graph_init_params, graph_runtime_state=self.graph_runtime_state, mock_config=self.mock_config, diff --git a/api/tests/unit_tests/core/workflow/graph_engine/test_mock_nodes.py b/api/tests/unit_tests/core/workflow/graph_engine/test_mock_nodes.py index f9819c47ec..e0eb4e7361 100644 --- a/api/tests/unit_tests/core/workflow/graph_engine/test_mock_nodes.py +++ b/api/tests/unit_tests/core/workflow/graph_engine/test_mock_nodes.py @@ -56,7 +56,7 @@ class MockNodeMixin: def __init__( self, node_id: str, - config: Any, + data: Any, *, graph_init_params: "GraphInitParams", graph_runtime_state: "GraphRuntimeState", @@ -98,7 +98,7 @@ class MockNodeMixin: super().__init__( node_id=node_id, - config=config, + data=data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, **kwargs, diff --git a/api/tests/unit_tests/core/workflow/graph_engine/test_parallel_human_input_join_resume.py b/api/tests/unit_tests/core/workflow/graph_engine/test_parallel_human_input_join_resume.py index 75bc6d05f7..6156f7b576 100644 --- a/api/tests/unit_tests/core/workflow/graph_engine/test_parallel_human_input_join_resume.py +++ b/api/tests/unit_tests/core/workflow/graph_engine/test_parallel_human_input_join_resume.py @@ -111,7 +111,7 @@ class StaticRepo(HumanInputFormRepository): def _build_runtime_state() -> GraphRuntimeState: - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( user_id="user", app_id="app", @@ -140,7 +140,7 @@ def _build_graph(runtime_state: GraphRuntimeState, repo: HumanInputFormRepositor start_config = {"id": "start", "data": StartNodeData(title="Start", variables=[]).model_dump()} start_node = StartNode( node_id=start_config["id"], - config=StartNodeData(title="Start", variables=[]), + data=StartNodeData(title="Start", variables=[]), graph_init_params=graph_init_params, graph_runtime_state=runtime_state, ) @@ -155,7 +155,7 @@ def _build_graph(runtime_state: GraphRuntimeState, repo: HumanInputFormRepositor human_a_config = {"id": "human_a", "data": human_data.model_dump()} human_a = HumanInputNode( node_id=human_a_config["id"], - config=human_data, + data=human_data, graph_init_params=graph_init_params, graph_runtime_state=runtime_state, form_repository=repo, @@ -165,7 +165,7 @@ def _build_graph(runtime_state: GraphRuntimeState, repo: HumanInputFormRepositor human_b_config = {"id": "human_b", "data": human_data.model_dump()} human_b = HumanInputNode( node_id=human_b_config["id"], - config=human_data, + data=human_data, graph_init_params=graph_init_params, graph_runtime_state=runtime_state, form_repository=repo, @@ -183,7 +183,7 @@ def _build_graph(runtime_state: GraphRuntimeState, repo: HumanInputFormRepositor end_config = {"id": "end", "data": end_data.model_dump()} end_node = EndNode( node_id=end_config["id"], - config=end_data, + data=end_data, graph_init_params=graph_init_params, graph_runtime_state=runtime_state, ) diff --git a/api/tests/unit_tests/core/workflow/nodes/answer/test_answer.py b/api/tests/unit_tests/core/workflow/nodes/answer/test_answer.py index ae9dae0646..2603e29be6 100644 --- a/api/tests/unit_tests/core/workflow/nodes/answer/test_answer.py +++ b/api/tests/unit_tests/core/workflow/nodes/answer/test_answer.py @@ -1,41 +1,36 @@ import time import uuid -from unittest.mock import MagicMock from core.app.entities.app_invoke_entities import InvokeFrom, UserFrom -from core.workflow.node_factory import DifyNodeFactory from core.workflow.system_variables import build_system_variables -from extensions.ext_database import db from graphon.enums import WorkflowNodeExecutionStatus -from graphon.graph import Graph from graphon.nodes.answer.answer_node import AnswerNode from graphon.nodes.answer.entities import AnswerNodeData from graphon.runtime import GraphRuntimeState, VariablePool from tests.workflow_test_utils import build_test_graph_init_params -def test_execute_answer(): +def _build_variable_pool() -> VariablePool: + return VariablePool.from_bootstrap( + system_variables=build_system_variables(user_id="aaa", files=[]), + user_inputs={}, + ) + + +def _build_answer_node(*, answer: str, variable_pool: VariablePool) -> AnswerNode: graph_config = { - "edges": [ - { - "id": "start-source-answer-target", - "source": "start", - "target": "answer", - }, - ], + "edges": [], "nodes": [ - {"data": {"type": "start", "title": "Start"}, "id": "start"}, { "data": { - "title": "123", + "title": "Answer", "type": "answer", - "answer": "Today's weather is {{#start.weather#}}\n{{#llm.text#}}\n{{img}}\nFin.", + "answer": answer, }, "id": "answer", - }, + } ], } - init_params = build_test_graph_init_params( workflow_id="1", graph_config=graph_config, @@ -46,42 +41,31 @@ def test_execute_answer(): invoke_from=InvokeFrom.DEBUGGER, call_depth=0, ) - - # construct variable pool - variable_pool = VariablePool( - system_variables=build_system_variables(user_id="aaa", files=[]), - user_inputs={}, - environment_variables=[], - conversation_variables=[], + graph_runtime_state = GraphRuntimeState( + variable_pool=variable_pool, + start_at=time.perf_counter(), ) - variable_pool.add(["start", "weather"], "sunny") - variable_pool.add(["llm", "text"], "You are a helpful AI.") - - graph_runtime_state = GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter()) - - # create node factory - node_factory = DifyNodeFactory( - graph_init_params=init_params, - graph_runtime_state=graph_runtime_state, - ) - - graph = Graph.init(graph_config=graph_config, node_factory=node_factory, root_node_id="start") - - node = AnswerNode( + return AnswerNode( node_id=str(uuid.uuid4()), graph_init_params=init_params, graph_runtime_state=graph_runtime_state, - config=AnswerNodeData( - title="123", + data=AnswerNodeData( + title="Answer", type="answer", - answer="Today's weather is {{#start.weather#}}\n{{#llm.text#}}\n{{img}}\nFin.", + answer=answer, ), ) - # Mock db.session.close() - db.session.close = MagicMock() - # execute node +def test_execute_answer_renders_variable_selectors() -> None: + variable_pool = _build_variable_pool() + variable_pool.add(["start", "weather"], "sunny") + variable_pool.add(["llm", "text"], "You are a helpful AI.") + node = _build_answer_node( + answer="Today's weather is {{#start.weather#}}\n{{#llm.text#}}\n{{img}}\nFin.", + variable_pool=variable_pool, + ) + result = node._run() assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED @@ -89,36 +73,11 @@ def test_execute_answer(): def test_execute_answer_renders_structured_output_object_as_json() -> None: - init_params = build_test_graph_init_params( - workflow_id="1", - graph_config={"nodes": [], "edges": []}, - tenant_id="1", - app_id="1", - user_id="1", - user_from=UserFrom.ACCOUNT, - invoke_from=InvokeFrom.DEBUGGER, - call_depth=0, - ) - - variable_pool = VariablePool( - system_variables=build_system_variables(user_id="aaa", files=[]), - user_inputs={}, - environment_variables=[], - conversation_variables=[], - ) + variable_pool = _build_variable_pool() variable_pool.add(["1777539038857", "structured_output"], {"type": "greeting"}) - - graph_runtime_state = GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter()) - - node = AnswerNode( - node_id=str(uuid.uuid4()), - graph_init_params=init_params, - graph_runtime_state=graph_runtime_state, - config=AnswerNodeData( - title="123", - type="answer", - answer="{{#1777539038857.structured_output#}}", - ), + node = _build_answer_node( + answer="{{#1777539038857.structured_output#}}", + variable_pool=variable_pool, ) result = node._run() @@ -128,35 +87,9 @@ def test_execute_answer_renders_structured_output_object_as_json() -> None: def test_execute_answer_falls_back_to_plain_selector_text_when_structured_output_missing() -> None: - init_params = build_test_graph_init_params( - workflow_id="1", - graph_config={"nodes": [], "edges": []}, - tenant_id="1", - app_id="1", - user_id="1", - user_from=UserFrom.ACCOUNT, - invoke_from=InvokeFrom.DEBUGGER, - call_depth=0, - ) - - variable_pool = VariablePool( - system_variables=build_system_variables(user_id="aaa", files=[]), - user_inputs={}, - environment_variables=[], - conversation_variables=[], - ) - - graph_runtime_state = GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter()) - - node = AnswerNode( - node_id=str(uuid.uuid4()), - graph_init_params=init_params, - graph_runtime_state=graph_runtime_state, - config=AnswerNodeData( - title="123", - type="answer", - answer="{{#1777539038857.structured_output#}}", - ), + node = _build_answer_node( + answer="{{#1777539038857.structured_output#}}", + variable_pool=_build_variable_pool(), ) result = node._run() diff --git a/api/tests/unit_tests/core/workflow/nodes/datasource/test_datasource_node.py b/api/tests/unit_tests/core/workflow/nodes/datasource/test_datasource_node.py index a18a36a099..235d56e989 100644 --- a/api/tests/unit_tests/core/workflow/nodes/datasource/test_datasource_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/datasource/test_datasource_node.py @@ -81,7 +81,7 @@ def test_datasource_node_delegates_to_manager_stream(mocker: MockerFixture): node = DatasourceNode( node_id="n", - config=DatasourceNodeData( + data=DatasourceNodeData( type="datasource", version="1", title="Datasource", diff --git a/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_executor.py b/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_executor.py index be7cc073db..796fc7719d 100644 --- a/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_executor.py +++ b/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_executor.py @@ -29,7 +29,7 @@ HTTP_REQUEST_CONFIG = HttpRequestNodeConfig( def test_executor_with_json_body_and_number_variable(): # Prepare the variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -85,7 +85,7 @@ def test_executor_with_json_body_and_number_variable(): def test_executor_with_json_body_and_object_variable(): # Prepare the variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -143,7 +143,7 @@ def test_executor_with_json_body_and_object_variable(): def test_executor_with_json_body_and_nested_object_variable(): # Prepare the variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -201,7 +201,7 @@ def test_executor_with_json_body_and_nested_object_variable(): def test_extract_selectors_from_template_with_newline(): - variable_pool = VariablePool(system_variables=default_system_variables()) + variable_pool = VariablePool.from_bootstrap(system_variables=default_system_variables()) variable_pool.add(("node_id", "custom_query"), "line1\nline2") node_data = HttpRequestNodeData( title="Test JSON Body with Nested Object Variable", @@ -230,7 +230,7 @@ def test_extract_selectors_from_template_with_newline(): def test_executor_with_form_data(): # Prepare the variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -320,7 +320,7 @@ def test_init_headers(): node_data=node_data, timeout=timeout, http_request_config=HTTP_REQUEST_CONFIG, - variable_pool=VariablePool(system_variables=default_system_variables()), + variable_pool=VariablePool.from_bootstrap(system_variables=default_system_variables()), http_client=ssrf_proxy, file_manager=file_manager, ) @@ -357,7 +357,7 @@ def test_init_params(): node_data=node_data, timeout=timeout, http_request_config=HTTP_REQUEST_CONFIG, - variable_pool=VariablePool(system_variables=default_system_variables()), + variable_pool=VariablePool.from_bootstrap(system_variables=default_system_variables()), http_client=ssrf_proxy, file_manager=file_manager, ) @@ -390,7 +390,7 @@ def test_init_params(): def test_empty_api_key_raises_error_bearer(): """Test that empty API key raises AuthorizationConfigError for bearer auth.""" - variable_pool = VariablePool(system_variables=default_system_variables()) + variable_pool = VariablePool.from_bootstrap(system_variables=default_system_variables()) node_data = HttpRequestNodeData( title="test", method="get", @@ -417,7 +417,7 @@ def test_empty_api_key_raises_error_bearer(): def test_empty_api_key_raises_error_basic(): """Test that empty API key raises AuthorizationConfigError for basic auth.""" - variable_pool = VariablePool(system_variables=default_system_variables()) + variable_pool = VariablePool.from_bootstrap(system_variables=default_system_variables()) node_data = HttpRequestNodeData( title="test", method="get", @@ -444,7 +444,7 @@ def test_empty_api_key_raises_error_basic(): def test_empty_api_key_raises_error_custom(): """Test that empty API key raises AuthorizationConfigError for custom auth.""" - variable_pool = VariablePool(system_variables=default_system_variables()) + variable_pool = VariablePool.from_bootstrap(system_variables=default_system_variables()) node_data = HttpRequestNodeData( title="test", method="get", @@ -471,7 +471,7 @@ def test_empty_api_key_raises_error_custom(): def test_whitespace_only_api_key_raises_error(): """Test that whitespace-only API key raises AuthorizationConfigError.""" - variable_pool = VariablePool(system_variables=default_system_variables()) + variable_pool = VariablePool.from_bootstrap(system_variables=default_system_variables()) node_data = HttpRequestNodeData( title="test", method="get", @@ -498,7 +498,7 @@ def test_whitespace_only_api_key_raises_error(): def test_valid_api_key_works(): """Test that valid API key works correctly for bearer auth.""" - variable_pool = VariablePool(system_variables=default_system_variables()) + variable_pool = VariablePool.from_bootstrap(system_variables=default_system_variables()) node_data = HttpRequestNodeData( title="test", method="get", @@ -536,7 +536,7 @@ def test_executor_with_json_body_and_unquoted_uuid_variable(): # UUID that triggers the json_repair truncation bug test_uuid = "57eeeeb1-450b-482c-81b9-4be77e95dee2" - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -583,7 +583,7 @@ def test_executor_with_json_body_and_unquoted_uuid_with_newlines(): """ test_uuid = "57eeeeb1-450b-482c-81b9-4be77e95dee2" - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -624,7 +624,7 @@ def test_executor_with_json_body_and_unquoted_uuid_with_newlines(): def test_executor_with_json_body_preserves_numbers_and_strings(): """Test that numbers are preserved and string values are properly quoted.""" - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) diff --git a/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_node.py b/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_node.py index 2e89a2da3c..afde541beb 100644 --- a/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/http_request/test_http_request_node.py @@ -110,12 +110,15 @@ def _build_http_node( call_depth=0, ) graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(user_id="user", files=[]), user_inputs={}), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(user_id="user", files=[]), + user_inputs={}, + ), start_at=time.perf_counter(), ) return HttpRequestNode( node_id="http-node", - config=HttpRequestNodeData.model_validate(node_data), + data=HttpRequestNodeData.model_validate(node_data), graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, http_request_config=HTTP_REQUEST_CONFIG, diff --git a/api/tests/unit_tests/core/workflow/nodes/human_input/test_entities.py b/api/tests/unit_tests/core/workflow/nodes/human_input/test_entities.py index 0659984c76..715292b85c 100644 --- a/api/tests/unit_tests/core/workflow/nodes/human_input/test_entities.py +++ b/api/tests/unit_tests/core/workflow/nodes/human_input/test_entities.py @@ -149,7 +149,7 @@ def _build_human_input_node( ) return HumanInputNode( node_id=node_id, - config=typed_node_data, + data=typed_node_data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, runtime=runtime, @@ -241,16 +241,16 @@ class TestUserAction: def test_user_action_length_boundaries(self): """Test user action id and title length boundaries.""" - action = UserAction(id="a" * 20, title="b" * 20) + action = UserAction(id="a" * 20, title="b" * 100) assert action.id == "a" * 20 - assert action.title == "b" * 20 + assert action.title == "b" * 100 @pytest.mark.parametrize( ("field_name", "value"), [ ("id", "a" * 21), - ("title", "b" * 21), + ("title", "b" * 101), ], ) def test_user_action_length_limits(self, field_name: str, value: str): @@ -427,7 +427,7 @@ class TestHumanInputNodeVariableResolution: """Tests for resolving variable-based defaults in HumanInputNode.""" def test_resolves_variable_defaults(self): - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( user_id="user", app_id="app", @@ -504,7 +504,7 @@ class TestHumanInputNodeVariableResolution: assert params.resolved_default_values == expected_values def test_debugger_falls_back_to_recipient_token_when_webapp_disabled(self): - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( user_id="user", app_id="app", @@ -565,7 +565,7 @@ class TestHumanInputNodeVariableResolution: assert not hasattr(pause_event.reason, "form_token") def test_webapp_runtime_keeps_form_visible_in_ui_when_webapp_delivery_is_enabled(self): - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( user_id="user", app_id="app", @@ -631,7 +631,7 @@ class TestHumanInputNodeVariableResolution: assert params.display_in_ui is True def test_debugger_debug_mode_overrides_email_recipients(self): - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( user_id="user-123", app_id="app", @@ -748,7 +748,7 @@ class TestHumanInputNodeRenderedContent: """Tests for rendering submitted content.""" def test_replaces_outputs_placeholders_after_submission(self): - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( user_id="user", app_id="app", diff --git a/api/tests/unit_tests/core/workflow/nodes/human_input/test_human_input_form_filled_event.py b/api/tests/unit_tests/core/workflow/nodes/human_input/test_human_input_form_filled_event.py index 4a9438b14f..741b104393 100644 --- a/api/tests/unit_tests/core/workflow/nodes/human_input/test_human_input_form_filled_event.py +++ b/api/tests/unit_tests/core/workflow/nodes/human_input/test_human_input_form_filled_event.py @@ -40,7 +40,7 @@ def _create_human_input_node( ) return HumanInputNode( node_id=config["id"], - config=node_data, + data=node_data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, form_repository=repo, @@ -51,7 +51,11 @@ def _create_human_input_node( def _build_node(form_content: str = "Please enter your name:\n\n{{#$output.name#}}") -> HumanInputNode: system_variables = default_system_variables() graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=system_variables, user_inputs={}, environment_variables=[]), + variable_pool=VariablePool.from_bootstrap( + system_variables=system_variables, + user_inputs={}, + environment_variables=[], + ), start_at=0.0, ) graph_init_params = GraphInitParams( @@ -114,7 +118,11 @@ def _build_node(form_content: str = "Please enter your name:\n\n{{#$output.name# def _build_timeout_node() -> HumanInputNode: system_variables = default_system_variables() graph_runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=system_variables, user_inputs={}, environment_variables=[]), + variable_pool=VariablePool.from_bootstrap( + system_variables=system_variables, + user_inputs={}, + environment_variables=[], + ), start_at=0.0, ) graph_init_params = GraphInitParams( diff --git a/api/tests/unit_tests/core/workflow/nodes/iteration/test_iteration_child_engine_errors.py b/api/tests/unit_tests/core/workflow/nodes/iteration/test_iteration_child_engine_errors.py index 8ffce39cd6..18ed7a0b1d 100644 --- a/api/tests/unit_tests/core/workflow/nodes/iteration/test_iteration_child_engine_errors.py +++ b/api/tests/unit_tests/core/workflow/nodes/iteration/test_iteration_child_engine_errors.py @@ -32,7 +32,7 @@ class _MissingGraphBuilder: def _build_runtime_state() -> GraphRuntimeState: return GraphRuntimeState( - variable_pool=VariablePool(system_variables=default_system_variables(), user_inputs={}), + variable_pool=VariablePool.from_bootstrap(system_variables=default_system_variables(), user_inputs={}), start_at=0.0, ) @@ -46,7 +46,7 @@ def _build_iteration_node( init_params = build_test_graph_init_params(graph_config=graph_config) return IterationNode( node_id="iteration-node", - config=IterationNodeData( + data=IterationNodeData( type="iteration", title="Iteration", iterator_selector=["start", "items"], diff --git a/api/tests/unit_tests/core/workflow/nodes/knowledge_index/test_knowledge_index_node.py b/api/tests/unit_tests/core/workflow/nodes/knowledge_index/test_knowledge_index_node.py index 89433b34e6..0d760a2db7 100644 --- a/api/tests/unit_tests/core/workflow/nodes/knowledge_index/test_knowledge_index_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/knowledge_index/test_knowledge_index_node.py @@ -41,7 +41,7 @@ def mock_graph_init_params(): @pytest.fixture def mock_graph_runtime_state(): """Create mock GraphRuntimeState.""" - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id=str(uuid.uuid4()), files=[]), user_inputs={}, environment_variables=[], @@ -103,7 +103,7 @@ def _build_node( ) -> KnowledgeIndexNode: return KnowledgeIndexNode( node_id=node_id, - config=( + data=( node_data if isinstance(node_data, KnowledgeIndexNodeData) else KnowledgeIndexNodeData.model_validate(node_data) diff --git a/api/tests/unit_tests/core/workflow/nodes/knowledge_retrieval/test_knowledge_retrieval_node.py b/api/tests/unit_tests/core/workflow/nodes/knowledge_retrieval/test_knowledge_retrieval_node.py index d77a2ce363..3c821e75ba 100644 --- a/api/tests/unit_tests/core/workflow/nodes/knowledge_retrieval/test_knowledge_retrieval_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/knowledge_retrieval/test_knowledge_retrieval_node.py @@ -47,7 +47,7 @@ def mock_graph_init_params(): @pytest.fixture def mock_graph_runtime_state(): """Create mock GraphRuntimeState.""" - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id=str(uuid.uuid4()), files=[]), user_inputs={}, environment_variables=[], @@ -118,7 +118,7 @@ class TestKnowledgeRetrievalNode: # Act node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -147,7 +147,7 @@ class TestKnowledgeRetrievalNode: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -206,7 +206,7 @@ class TestKnowledgeRetrievalNode: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -250,7 +250,7 @@ class TestKnowledgeRetrievalNode: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -286,7 +286,7 @@ class TestKnowledgeRetrievalNode: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -321,7 +321,7 @@ class TestKnowledgeRetrievalNode: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -362,7 +362,7 @@ class TestKnowledgeRetrievalNode: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -401,7 +401,7 @@ class TestKnowledgeRetrievalNode: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -482,7 +482,7 @@ class TestFetchDatasetRetriever: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -519,7 +519,7 @@ class TestFetchDatasetRetriever: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -574,7 +574,7 @@ class TestFetchDatasetRetriever: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -622,7 +622,7 @@ class TestFetchDatasetRetriever: node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -683,7 +683,7 @@ class TestFetchDatasetRetriever: config = {"id": node_id, "data": node_data.model_dump()} node = KnowledgeRetrievalNode( node_id=node_id, - config=KnowledgeRetrievalNodeData.model_validate(config["data"]), + data=KnowledgeRetrievalNodeData.model_validate(config["data"]), graph_init_params=mock_graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) diff --git a/api/tests/unit_tests/core/workflow/nodes/list_operator/node_spec.py b/api/tests/unit_tests/core/workflow/nodes/list_operator/node_spec.py index 388654f279..20b94d5d50 100644 --- a/api/tests/unit_tests/core/workflow/nodes/list_operator/node_spec.py +++ b/api/tests/unit_tests/core/workflow/nodes/list_operator/node_spec.py @@ -16,10 +16,10 @@ class TestListOperatorNode: """Comprehensive tests for ListOperatorNode.""" @staticmethod - def _build_node(*, config, graph_init_params, graph_runtime_state): + def _build_node(*, data, graph_init_params, graph_runtime_state): return ListOperatorNode( node_id="test", - config=config if isinstance(config, ListOperatorNodeData) else ListOperatorNodeData.model_validate(config), + data=data if isinstance(data, ListOperatorNodeData) else ListOperatorNodeData.model_validate(data), graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, ) @@ -65,7 +65,7 @@ class TestListOperatorNode: def _create_node(config, mock_variable): mock_graph_runtime_state.variable_pool.get.return_value = mock_variable return self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -83,7 +83,7 @@ class TestListOperatorNode: } node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -127,7 +127,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -153,7 +153,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -177,7 +177,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -201,7 +201,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -228,7 +228,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -255,7 +255,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -282,7 +282,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -312,7 +312,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -335,7 +335,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = None node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -359,7 +359,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -384,7 +384,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -408,7 +408,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -432,7 +432,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -456,7 +456,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) @@ -483,7 +483,7 @@ class TestListOperatorNode: mock_graph_runtime_state.variable_pool.get.return_value = mock_var node = self._build_node( - config=config, + data=config, graph_init_params=graph_init_params, graph_runtime_state=mock_graph_runtime_state, ) diff --git a/api/tests/unit_tests/core/workflow/nodes/llm/test_node.py b/api/tests/unit_tests/core/workflow/nodes/llm/test_node.py index c09f2d3fb6..fb50723402 100644 --- a/api/tests/unit_tests/core/workflow/nodes/llm/test_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/llm/test_node.py @@ -15,7 +15,7 @@ from core.app.llm.model_access import ( ) from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle from core.entities.provider_entities import CustomConfiguration, SystemConfiguration -from core.plugin.impl.model_runtime_factory import create_plugin_model_runtime +from core.plugin.impl.model_runtime_factory import create_plugin_model_assembly from core.prompt.entities.advanced_prompt_entities import MemoryConfig from core.workflow.system_variables import default_system_variables from graphon.entities import GraphInitParams @@ -187,7 +187,7 @@ def graph_init_params() -> GraphInitParams: @pytest.fixture def graph_runtime_state() -> GraphRuntimeState: - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -208,7 +208,7 @@ def llm_node( http_client = mock.MagicMock() node = LLMNode( node_id="1", - config=llm_node_data, + data=llm_node_data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, credentials_provider=mock_credentials_provider, @@ -241,9 +241,10 @@ def model_config(monkeypatch: pytest.MonkeyPatch): ) # Create actual provider and model type instances - model_provider_factory = ModelProviderFactory(model_runtime=create_plugin_model_runtime(tenant_id="test")) + model_assembly = create_plugin_model_assembly(tenant_id="test") + model_provider_factory = model_assembly.model_provider_factory provider_instance = model_provider_factory.get_model_provider("openai") - model_type_instance = model_provider_factory.get_model_type_instance("openai", ModelType.LLM) + model_type_instance = model_assembly.create_model_type_instance(provider="openai", model_type=ModelType.LLM) # Create a ProviderModelBundle provider_model_bundle = ProviderModelBundle( @@ -1173,7 +1174,7 @@ def llm_node_for_multimodal(llm_node_data, graph_init_params, graph_runtime_stat http_client = mock.MagicMock() node = LLMNode( node_id="1", - config=llm_node_data, + data=llm_node_data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, credentials_provider=mock_credentials_provider, diff --git a/api/tests/unit_tests/core/workflow/nodes/template_transform/template_transform_node_spec.py b/api/tests/unit_tests/core/workflow/nodes/template_transform/template_transform_node_spec.py index 892f6cc586..dd57dde1fe 100644 --- a/api/tests/unit_tests/core/workflow/nodes/template_transform/template_transform_node_spec.py +++ b/api/tests/unit_tests/core/workflow/nodes/template_transform/template_transform_node_spec.py @@ -28,7 +28,7 @@ def _build_template_transform_node( ) return TemplateTransformNode( node_id=node_id, - config=typed_node_data, + data=typed_node_data, graph_init_params=graph_init_params, graph_runtime_state=graph_runtime_state, **kwargs, diff --git a/api/tests/unit_tests/core/workflow/nodes/template_transform/test_template_transform_node.py b/api/tests/unit_tests/core/workflow/nodes/template_transform/test_template_transform_node.py index a846efbb43..c25ac7da0f 100644 --- a/api/tests/unit_tests/core/workflow/nodes/template_transform/test_template_transform_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/template_transform/test_template_transform_node.py @@ -39,7 +39,7 @@ def mock_graph_runtime_state(): def test_node_uses_default_max_output_length_when_not_overridden(graph_init_params, mock_graph_runtime_state): node = TemplateTransformNode( node_id="test_node", - config=TemplateTransformNodeData( + data=TemplateTransformNodeData( title="Template Transform", type="template-transform", variables=[], diff --git a/api/tests/unit_tests/core/workflow/nodes/test_base_node.py b/api/tests/unit_tests/core/workflow/nodes/test_base_node.py index 364408ead6..a05151f79b 100644 --- a/api/tests/unit_tests/core/workflow/nodes/test_base_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/test_base_node.py @@ -35,7 +35,10 @@ def _build_context(graph_config: Mapping[str, object]) -> tuple[GraphInitParams, invoke_from="debugger", ) runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(user_id="user", files=[]), user_inputs={}), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(user_id="user", files=[]), + user_inputs={}, + ), start_at=0.0, ) return init_params, runtime_state @@ -62,7 +65,7 @@ def test_node_hydrates_data_during_initialization(): node = _SampleNode( node_id="node-1", - config=_build_node_data(), + data=_build_node_data(), graph_init_params=init_params, graph_runtime_state=runtime_state, ) @@ -82,13 +85,16 @@ def test_node_accepts_invoke_from_enum(): invoke_from=InvokeFrom.DEBUGGER, ) runtime_state = GraphRuntimeState( - variable_pool=VariablePool(system_variables=build_system_variables(user_id="user", files=[]), user_inputs={}), + variable_pool=VariablePool.from_bootstrap( + system_variables=build_system_variables(user_id="user", files=[]), + user_inputs={}, + ), start_at=0.0, ) node = _SampleNode( node_id="node-1", - config=_build_node_data(), + data=_build_node_data(), graph_init_params=init_params, graph_runtime_state=runtime_state, ) @@ -140,7 +146,7 @@ def test_node_hydration_preserves_compatibility_extra_fields(): node = _SampleNode( node_id="node-1", - config=node_config["data"], + data=node_config["data"], graph_init_params=init_params, graph_runtime_state=runtime_state, ) diff --git a/api/tests/unit_tests/core/workflow/nodes/test_document_extractor_node.py b/api/tests/unit_tests/core/workflow/nodes/test_document_extractor_node.py index dd75b32593..4c67f3fb02 100644 --- a/api/tests/unit_tests/core/workflow/nodes/test_document_extractor_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/test_document_extractor_node.py @@ -49,7 +49,7 @@ def document_extractor_node(graph_init_params): http_client = Mock() node = DocumentExtractorNode( node_id="test_node_id", - config=node_data, + data=node_data, graph_init_params=graph_init_params, graph_runtime_state=Mock(), http_client=http_client, @@ -186,12 +186,13 @@ def test_run_extract_text( monkeypatch.setattr("graphon.file.file_manager.download", mock_download) + dispatch_mock = None if mime_type == "application/pdf": - mock_pdf_extract = Mock(return_value=expected_text[0]) - monkeypatch.setattr("graphon.nodes.document_extractor.node._extract_text_from_pdf", mock_pdf_extract) + dispatch_mock = Mock(return_value=expected_text[0]) + monkeypatch.setattr("graphon.nodes.document_extractor.node._extract_text_by_file_extension", dispatch_mock) elif mime_type.startswith("application/vnd.openxmlformats"): - mock_docx_extract = Mock(return_value=expected_text[0]) - monkeypatch.setattr("graphon.nodes.document_extractor.node._extract_text_from_docx", mock_docx_extract) + dispatch_mock = Mock(return_value=expected_text[0]) + monkeypatch.setattr("graphon.nodes.document_extractor.node._extract_text_by_mime_type", dispatch_mock) result = document_extractor_node._run() @@ -200,6 +201,19 @@ def test_run_extract_text( assert result.outputs is not None assert result.outputs["text"] == ArrayStringSegment(value=expected_text) + if mime_type == "application/pdf": + dispatch_mock.assert_called_once_with( + file_content=file_content, + file_extension=extension, + unstructured_api_config=document_extractor_node._unstructured_api_config, + ) + elif mime_type.startswith("application/vnd.openxmlformats"): + dispatch_mock.assert_called_once_with( + file_content=file_content, + mime_type=mime_type, + unstructured_api_config=document_extractor_node._unstructured_api_config, + ) + if transfer_method == FileTransferMethod.REMOTE_URL: document_extractor_node._http_client.get.assert_called_once_with("https://example.com/file.txt") elif transfer_method == FileTransferMethod.LOCAL_FILE: @@ -439,24 +453,42 @@ def test_extract_text_from_file_routes_excel_inputs(document_extractor_node, ext file.extension = extension file.mime_type = mime_type - with ( - patch( - "graphon.nodes.document_extractor.node._download_file_content", - return_value=b"excel", - ), - patch( - "graphon.nodes.document_extractor.node._extract_text_from_excel", - return_value="excel text", - ) as mock_extract, + with patch( + "graphon.nodes.document_extractor.node._download_file_content", + return_value=b"excel", ): - result = _extract_text_from_file( - document_extractor_node.http_client, - file, - unstructured_api_config=document_extractor_node._unstructured_api_config, - ) + if extension: + with patch( + "graphon.nodes.document_extractor.node._extract_text_by_file_extension", + return_value="excel text", + ) as mock_extract: + result = _extract_text_from_file( + document_extractor_node.http_client, + file, + unstructured_api_config=document_extractor_node._unstructured_api_config, + ) + mock_extract.assert_called_once_with( + file_content=b"excel", + file_extension=extension, + unstructured_api_config=document_extractor_node._unstructured_api_config, + ) + else: + with patch( + "graphon.nodes.document_extractor.node._extract_text_by_mime_type", + return_value="excel text", + ) as mock_extract: + result = _extract_text_from_file( + document_extractor_node.http_client, + file, + unstructured_api_config=document_extractor_node._unstructured_api_config, + ) + mock_extract.assert_called_once_with( + file_content=b"excel", + mime_type=mime_type, + unstructured_api_config=document_extractor_node._unstructured_api_config, + ) assert result == "excel text" - mock_extract.assert_called_once_with(b"excel") def test_extract_text_from_file_rejects_missing_extension_and_mime_type(document_extractor_node): diff --git a/api/tests/unit_tests/core/workflow/nodes/test_if_else.py b/api/tests/unit_tests/core/workflow/nodes/test_if_else.py index aa9a1360b0..5965645c4f 100644 --- a/api/tests/unit_tests/core/workflow/nodes/test_if_else.py +++ b/api/tests/unit_tests/core/workflow/nodes/test_if_else.py @@ -29,7 +29,7 @@ def _build_if_else_node( node_id=str(uuid.uuid4()), graph_init_params=init_params, graph_runtime_state=graph_runtime_state, - config=node_data if isinstance(node_data, IfElseNodeData) else IfElseNodeData.model_validate(node_data), + data=node_data if isinstance(node_data, IfElseNodeData) else IfElseNodeData.model_validate(node_data), ) @@ -48,7 +48,10 @@ def test_execute_if_else_result_true(): ) # construct variable pool - pool = VariablePool(system_variables=build_system_variables(user_id="aaa", files=[]), user_inputs={}) + pool = VariablePool.from_bootstrap( + system_variables=build_system_variables(user_id="aaa", files=[]), + user_inputs={}, + ) pool.add(["start", "array_contains"], ["ab", "def"]) pool.add(["start", "array_not_contains"], ["ac", "def"]) pool.add(["start", "contains"], "cabcde") @@ -148,7 +151,7 @@ def test_execute_if_else_result_false(): ) # construct variable pool - pool = VariablePool( + pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id="aaa", files=[]), user_inputs={}, environment_variables=[], @@ -305,7 +308,7 @@ def test_execute_if_else_boolean_conditions(condition: Condition): ) # construct variable pool with boolean values - pool = VariablePool( + pool = VariablePool.from_bootstrap( system_variables=build_system_variables(files=[], user_id="aaa"), ) pool.add(["start", "bool_true"], True) @@ -359,7 +362,7 @@ def test_execute_if_else_boolean_false_conditions(): ) # construct variable pool with boolean values - pool = VariablePool( + pool = VariablePool.from_bootstrap( system_variables=build_system_variables(files=[], user_id="aaa"), ) pool.add(["start", "bool_true"], True) @@ -424,7 +427,7 @@ def test_execute_if_else_boolean_cases_structure(): ) # construct variable pool with boolean values - pool = VariablePool( + pool = VariablePool.from_bootstrap( system_variables=build_system_variables(files=[], user_id="aaa"), ) pool.add(["start", "bool_true"], True) diff --git a/api/tests/unit_tests/core/workflow/nodes/test_list_operator.py b/api/tests/unit_tests/core/workflow/nodes/test_list_operator.py index 465a4c0ff4..1b4cecc757 100644 --- a/api/tests/unit_tests/core/workflow/nodes/test_list_operator.py +++ b/api/tests/unit_tests/core/workflow/nodes/test_list_operator.py @@ -22,7 +22,7 @@ from graphon.variables import ArrayFileSegment def _build_list_operator_node(node_data: ListOperatorNodeData, graph_init_params) -> ListOperatorNode: return ListOperatorNode( node_id="test_node_id", - config=node_data, + data=node_data, graph_init_params=graph_init_params, graph_runtime_state=MagicMock(), ) diff --git a/api/tests/unit_tests/core/workflow/nodes/test_start_node_json_object.py b/api/tests/unit_tests/core/workflow/nodes/test_start_node_json_object.py index 5655f80737..f890f79511 100644 --- a/api/tests/unit_tests/core/workflow/nodes/test_start_node_json_object.py +++ b/api/tests/unit_tests/core/workflow/nodes/test_start_node_json_object.py @@ -31,7 +31,7 @@ def make_start_node(user_inputs, variables): return StartNode( node_id="start", - config=node_data, + data=node_data, graph_init_params=build_test_graph_init_params( workflow_id="wf", graph_config={}, @@ -260,7 +260,7 @@ def test_start_node_outputs_full_variable_pool_snapshot(): graph_runtime_state = GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter()) node = StartNode( node_id="start", - config=node_data, + data=node_data, graph_init_params=build_test_graph_init_params( workflow_id="wf", graph_config={}, diff --git a/api/tests/unit_tests/core/workflow/nodes/tool/test_tool_node.py b/api/tests/unit_tests/core/workflow/nodes/tool/test_tool_node.py index 284af68319..4aa5803ac7 100644 --- a/api/tests/unit_tests/core/workflow/nodes/tool/test_tool_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/tool/test_tool_node.py @@ -99,7 +99,7 @@ def tool_node(monkeypatch) -> ToolNode: call_depth=0, ) - variable_pool = VariablePool(system_variables=build_system_variables(user_id="user-id")) + variable_pool = VariablePool.from_bootstrap(system_variables=build_system_variables(user_id="user-id")) graph_runtime_state = GraphRuntimeState(variable_pool=variable_pool, start_at=0.0) config = graph_config["nodes"][0] @@ -110,7 +110,7 @@ def tool_node(monkeypatch) -> ToolNode: node = ToolNode( node_id="node-instance", - config=ToolNodeData.model_validate(config["data"]), + data=ToolNodeData.model_validate(config["data"]), graph_init_params=init_params, graph_runtime_state=graph_runtime_state, tool_file_manager_factory=tool_file_manager_factory, diff --git a/api/tests/unit_tests/core/workflow/nodes/trigger_plugin/test_trigger_event_node.py b/api/tests/unit_tests/core/workflow/nodes/trigger_plugin/test_trigger_event_node.py index e3b5e3b591..c5ac8d2ce2 100644 --- a/api/tests/unit_tests/core/workflow/nodes/trigger_plugin/test_trigger_event_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/trigger_plugin/test_trigger_event_node.py @@ -44,7 +44,7 @@ def test_trigger_event_node_run_populates_trigger_info_metadata() -> None: init_params, runtime_state = _build_context(graph_config={}) node = TriggerEventNode( node_id="node-1", - config=_build_node_data(), + data=_build_node_data(), graph_init_params=init_params, graph_runtime_state=runtime_state, ) diff --git a/api/tests/unit_tests/core/workflow/nodes/webhook/test_webhook_file_conversion.py b/api/tests/unit_tests/core/workflow/nodes/webhook/test_webhook_file_conversion.py index 07d03bec05..fccb5ab1c3 100644 --- a/api/tests/unit_tests/core/workflow/nodes/webhook/test_webhook_file_conversion.py +++ b/api/tests/unit_tests/core/workflow/nodes/webhook/test_webhook_file_conversion.py @@ -52,7 +52,7 @@ def create_webhook_node( node = TriggerWebhookNode( node_id="webhook-node-1", - config=webhook_data, + data=webhook_data, graph_init_params=graph_init_params, graph_runtime_state=runtime_state, ) diff --git a/api/tests/unit_tests/core/workflow/nodes/webhook/test_webhook_node.py b/api/tests/unit_tests/core/workflow/nodes/webhook/test_webhook_node.py index b839490d3c..c5ae542d8b 100644 --- a/api/tests/unit_tests/core/workflow/nodes/webhook/test_webhook_node.py +++ b/api/tests/unit_tests/core/workflow/nodes/webhook/test_webhook_node.py @@ -44,7 +44,7 @@ def create_webhook_node(webhook_data: WebhookData, variable_pool: VariablePool) ) node = TriggerWebhookNode( node_id="1", - config=webhook_data, + data=webhook_data, graph_init_params=graph_init_params, graph_runtime_state=runtime_state, ) diff --git a/api/tests/unit_tests/core/workflow/test_node_factory.py b/api/tests/unit_tests/core/workflow/test_node_factory.py index e93a7c7ccd..d6159e84d4 100644 --- a/api/tests/unit_tests/core/workflow/test_node_factory.py +++ b/api/tests/unit_tests/core/workflow/test_node_factory.py @@ -1,3 +1,4 @@ +from collections.abc import Mapping from types import SimpleNamespace from unittest.mock import MagicMock, patch, sentinel @@ -11,19 +12,20 @@ from graphon.entities.base_node_data import BaseNodeData from graphon.enums import BuiltinNodeTypes, NodeType from graphon.nodes.code.entities import CodeLanguage from graphon.nodes.llm.entities import LLMNodeData +from graphon.nodes.llm.node import LLMNode from graphon.variables.segments import StringSegment -def _assert_typed_node_config(config, *, node_id: str, node_type: NodeType, version: str = "1") -> None: +def _assert_constructor_node_data(data, *, node_id: str, node_type: NodeType, version: str = "1") -> None: _ = node_id - if isinstance(config, BaseNodeData): - assert config.type == node_type - assert config.version == version + if isinstance(data, BaseNodeData): + assert data.type == node_type + assert data.version == version return - assert isinstance(config, dict) - assert config["type"] == node_type - assert config["version"] == version + assert isinstance(data, Mapping) + assert data["type"] == node_type + assert data.get("version", "1") == version def _node_constructor(*, return_value): @@ -470,7 +472,7 @@ class TestDifyNodeFactoryCreateNode: matched_node_class.assert_called_once() kwargs = matched_node_class.call_args.kwargs assert kwargs["node_id"] == "node-id" - _assert_typed_node_config(kwargs["config"], node_id="node-id", node_type=BuiltinNodeTypes.START, version="9") + _assert_constructor_node_data(kwargs["data"], node_id="node-id", node_type=BuiltinNodeTypes.START, version="9") assert kwargs["graph_init_params"] is sentinel.graph_init_params assert kwargs["graph_runtime_state"] is factory.graph_runtime_state latest_node_class.assert_not_called() @@ -492,7 +494,7 @@ class TestDifyNodeFactoryCreateNode: latest_node_class.assert_called_once() kwargs = latest_node_class.call_args.kwargs assert kwargs["node_id"] == "node-id" - _assert_typed_node_config(kwargs["config"], node_id="node-id", node_type=BuiltinNodeTypes.START, version="9") + _assert_constructor_node_data(kwargs["data"], node_id="node-id", node_type=BuiltinNodeTypes.START, version="9") assert kwargs["graph_init_params"] is sentinel.graph_init_params assert kwargs["graph_runtime_state"] is factory.graph_runtime_state @@ -530,7 +532,7 @@ class TestDifyNodeFactoryCreateNode: assert result is created_node kwargs = constructor.call_args.kwargs assert kwargs["node_id"] == "node-id" - _assert_typed_node_config(kwargs["config"], node_id="node-id", node_type=node_type) + _assert_constructor_node_data(kwargs["data"], node_id="node-id", node_type=node_type) assert kwargs["graph_init_params"] is sentinel.graph_init_params assert kwargs["graph_runtime_state"] is factory.graph_runtime_state @@ -599,11 +601,12 @@ class TestDifyNodeFactoryCreateNode: prepared_llm.assert_called_once_with(sentinel.model_instance) assert kwargs["model_instance"] is wrapped_model_instance - def test_create_node_passes_alias_preserving_llm_config_to_constructor( - self, monkeypatch: pytest.MonkeyPatch, factory - ): + def test_create_node_passes_alias_preserving_llm_data_to_constructor(self, monkeypatch, factory): created_node = object() constructor = _node_constructor(return_value=created_node) + constructor.validate_node_data.side_effect = lambda node_data: LLMNodeData.model_validate( + node_data.model_dump(mode="python") if isinstance(node_data, BaseNodeData) else node_data + ) monkeypatch.setattr(factory, "_resolve_node_class", MagicMock(return_value=constructor)) monkeypatch.setattr(factory, "_build_llm_compatible_node_init_kwargs", MagicMock(return_value={})) @@ -629,10 +632,56 @@ class TestDifyNodeFactoryCreateNode: factory.create_node(node_config) - config = constructor.call_args.kwargs["config"] - assert isinstance(config, dict) - assert config["structured_output_enabled"] is True - assert "structured_output_switch_on" not in config + data = constructor.call_args.kwargs["data"] + assert isinstance(data, Mapping) + assert data["structured_output_enabled"] is True + assert "structured_output_switch_on" not in data + assert LLMNodeData.model_validate(data).structured_output_enabled is True + + def test_create_node_preserves_structured_output_switch_after_graphon_constructor(self, monkeypatch, factory): + factory.graph_init_params = SimpleNamespace( + workflow_id="workflow-id", + graph_config={}, + run_context={}, + call_depth=0, + ) + monkeypatch.setattr(factory, "_resolve_node_class", MagicMock(return_value=LLMNode)) + monkeypatch.setattr( + factory, + "_build_llm_compatible_node_init_kwargs", + MagicMock( + return_value={ + "model_instance": sentinel.model_instance, + "llm_file_saver": sentinel.llm_file_saver, + "prompt_message_serializer": sentinel.prompt_message_serializer, + } + ), + ) + + node_config = { + "id": "llm-node-id", + "data": { + "type": BuiltinNodeTypes.LLM, + "title": "LLM", + "model": {"provider": "provider", "name": "model", "mode": "chat", "completion_params": {}}, + "prompt_template": [{"role": "system", "text": "x"}], + "context": {"enabled": False, "variable_selector": []}, + "vision": {"enabled": False}, + "structured_output_enabled": True, + "structured_output": { + "schema": { + "type": "object", + "properties": {"type": {"type": "string"}}, + "required": ["type"], + } + }, + }, + } + + node = factory.create_node(node_config) + + assert node.node_data.structured_output_switch_on is True + assert node.node_data.structured_output_enabled is True @pytest.mark.parametrize( ("node_type", "constructor_name", "expected_extra_kwargs"), @@ -711,7 +760,7 @@ class TestDifyNodeFactoryCreateNode: constructor_kwargs = constructor.call_args.kwargs assert constructor_kwargs["node_id"] == "node-id" - _assert_typed_node_config(constructor_kwargs["config"], node_id="node-id", node_type=node_type) + _assert_constructor_node_data(constructor_kwargs["data"], node_id="node-id", node_type=node_type) assert constructor_kwargs["graph_init_params"] is sentinel.graph_init_params assert constructor_kwargs["graph_runtime_state"] is factory.graph_runtime_state assert constructor_kwargs["credentials_provider"] is sentinel.credentials_provider diff --git a/api/tests/unit_tests/core/workflow/test_variable_pool.py b/api/tests/unit_tests/core/workflow/test_variable_pool.py index 9dab38ed8e..0017cd8d3f 100644 --- a/api/tests/unit_tests/core/workflow/test_variable_pool.py +++ b/api/tests/unit_tests/core/workflow/test_variable_pool.py @@ -109,8 +109,8 @@ class TestVariablePool: assert pool.get([ENVIRONMENT_VARIABLE_NODE_ID, "env_var_1"]) is not None assert pool.get([CONVERSATION_VARIABLE_NODE_ID, "conv_var_1"]) is not None - def test_constructor_loads_legacy_bootstrap_kwargs(self): - pool = VariablePool( + def test_from_bootstrap_loads_legacy_bootstrap_kwargs(self): + pool = VariablePool.from_bootstrap( system_variables=build_system_variables(user_id="test_user_id"), environment_variables=[StringVariable(name="env_var", value="env-value")], conversation_variables=[StringVariable(name="conv_var", value="conv-value")], diff --git a/api/tests/unit_tests/core/workflow/test_workflow_entry.py b/api/tests/unit_tests/core/workflow/test_workflow_entry.py index 2e9e3468fd..661882f013 100644 --- a/api/tests/unit_tests/core/workflow/test_workflow_entry.py +++ b/api/tests/unit_tests/core/workflow/test_workflow_entry.py @@ -55,7 +55,7 @@ class TestWorkflowEntry: def test_mapping_user_inputs_to_variable_pool_with_system_variables(self): """Test mapping system variables from user inputs to variable pool.""" # Initialize variable pool with system variables - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( user_id="test_user_id", app_id="test_app_id", @@ -128,7 +128,7 @@ class TestWorkflowEntry: return NodeConfigDictAdapter.validate_python(node_config) workflow = StubWorkflow() - variable_pool = VariablePool(system_variables=default_system_variables(), user_inputs={}) + variable_pool = VariablePool.from_bootstrap(system_variables=default_system_variables(), user_inputs={}) expected_limits = CodeNodeLimits( max_string_length=dify_config.CODE_MAX_STRING_LENGTH, max_number=dify_config.CODE_MAX_NUMBER, @@ -157,7 +157,7 @@ class TestWorkflowEntry: """Test mapping environment variables from user inputs to variable pool.""" # Initialize variable pool with environment variables env_var = StringVariable(name="API_KEY", value="existing_key") - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), environment_variables=[env_var], user_inputs={}, @@ -198,7 +198,7 @@ class TestWorkflowEntry: """Test mapping conversation variables from user inputs to variable pool.""" # Initialize variable pool with conversation variables conv_var = StringVariable(name="last_message", value="Hello") - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), conversation_variables=[conv_var], user_inputs={}, @@ -239,7 +239,7 @@ class TestWorkflowEntry: def test_mapping_user_inputs_to_variable_pool_with_regular_variables(self): """Test mapping regular node variables from user inputs to variable pool.""" # Initialize empty variable pool - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -281,7 +281,7 @@ class TestWorkflowEntry: def test_mapping_user_inputs_with_file_handling(self): """Test mapping file inputs from user inputs to variable pool.""" - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -340,7 +340,7 @@ class TestWorkflowEntry: def test_mapping_user_inputs_missing_variable_error(self): """Test that mapping raises error when required variable is missing.""" - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -366,7 +366,7 @@ class TestWorkflowEntry: def test_mapping_user_inputs_with_alternative_key_format(self): """Test mapping with alternative key format (without node prefix).""" - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -396,7 +396,7 @@ class TestWorkflowEntry: def test_mapping_user_inputs_with_complex_selectors(self): """Test mapping with complex node variable keys.""" - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -432,7 +432,7 @@ class TestWorkflowEntry: def test_mapping_user_inputs_invalid_node_variable(self): """Test that mapping handles invalid node variable format.""" - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=default_system_variables(), user_inputs={}, ) @@ -463,7 +463,7 @@ class TestWorkflowEntry: env_var = StringVariable(name="API_KEY", value="existing_key") conv_var = StringVariable(name="session_id", value="session123") - variable_pool = VariablePool( + variable_pool = VariablePool.from_bootstrap( system_variables=build_system_variables( user_id="test_user", app_id="test_app", diff --git a/api/tests/unit_tests/core/workflow/test_workflow_entry_helpers.py b/api/tests/unit_tests/core/workflow/test_workflow_entry_helpers.py index 3978cbb1a0..a57cdd1337 100644 --- a/api/tests/unit_tests/core/workflow/test_workflow_entry_helpers.py +++ b/api/tests/unit_tests/core/workflow/test_workflow_entry_helpers.py @@ -7,7 +7,6 @@ import pytest from core.app.apps.exc import GenerateTaskStoppedError from core.app.entities.app_invoke_entities import InvokeFrom, UserFrom -from core.model_manager import ModelInstance from core.workflow import workflow_entry from core.workflow.system_variables import default_system_variables from graphon.entities.base_node_data import BaseNodeData @@ -16,10 +15,12 @@ from graphon.errors import WorkflowNodeRunFailedError from graphon.file import File, FileTransferMethod, FileType from graphon.graph import Graph from graphon.graph_events import GraphRunFailedEvent -from graphon.model_runtime.entities.llm_entities import LLMUsage +from graphon.model_runtime.entities.llm_entities import LLMMode, LLMUsage from graphon.node_events import NodeRunResult from graphon.nodes import BuiltinNodeTypes from graphon.nodes.base.node import Node +from graphon.nodes.llm.entities import ContextConfig, LLMNodeData, ModelConfig +from graphon.nodes.question_classifier.entities import QuestionClassifierNodeData from graphon.runtime import ChildGraphNotFoundError, VariablePool from graphon.variables.variables import StringVariable from tests.workflow_test_utils import build_test_graph_init_params, build_test_variable_pool @@ -29,9 +30,30 @@ def _build_typed_node_config(node_type: NodeType): return {"id": "node-id", "data": BaseNodeData(type=node_type)} -def _build_wrapped_model_instance() -> tuple[SimpleNamespace, ModelInstance]: - raw_model_instance = ModelInstance.__new__(ModelInstance) - return SimpleNamespace(_model_instance=raw_model_instance), raw_model_instance +def _build_model_config(*, provider: str = "openai", model_name: str = "gpt-4o") -> ModelConfig: + return ModelConfig(provider=provider, name=model_name, mode=LLMMode.CHAT) + + +def _build_llm_node_data(*, provider: str = "openai", model_name: str = "gpt-4o") -> LLMNodeData: + return LLMNodeData( + type=BuiltinNodeTypes.LLM, + title="Child Model", + model=_build_model_config(provider=provider, model_name=model_name), + prompt_template=[], + context=ContextConfig(enabled=False), + ) + + +def _build_question_classifier_node_data( + *, provider: str = "openai", model_name: str = "gpt-4o" +) -> QuestionClassifierNodeData: + return QuestionClassifierNodeData( + type=BuiltinNodeTypes.QUESTION_CLASSIFIER, + title="Child Model", + query_variable_selector=["sys", "query"], + model=_build_model_config(provider=provider, model_name=model_name), + classes=[], + ) class _FakeModelNodeMixin: @@ -40,22 +62,26 @@ class _FakeModelNodeMixin: return "1" def post_init(self) -> None: - self.model_instance, self.raw_model_instance = _build_wrapped_model_instance() + self.model_instance = SimpleNamespace(provider="stale-provider", model_name="stale-model") self.usage_snapshot = LLMUsage.empty_usage() self.usage_snapshot.total_tokens = 1 def _run(self) -> NodeRunResult: return NodeRunResult( status=WorkflowNodeExecutionStatus.SUCCEEDED, + inputs={ + "model_provider": self.node_data.model.provider, + "model_name": self.node_data.model.name, + }, llm_usage=self.usage_snapshot, ) -class _FakeLLMNode(_FakeModelNodeMixin, Node[BaseNodeData]): +class _FakeLLMNode(_FakeModelNodeMixin, Node[LLMNodeData]): node_type = BuiltinNodeTypes.LLM -class _FakeQuestionClassifierNode(_FakeModelNodeMixin, Node[BaseNodeData]): +class _FakeQuestionClassifierNode(_FakeModelNodeMixin, Node[QuestionClassifierNodeData]): node_type = BuiltinNodeTypes.QUESTION_CLASSIFIER @@ -75,7 +101,7 @@ class TestWorkflowChildEngineBuilder: assert result is expected def test_build_child_engine_raises_when_root_node_is_missing(self): - builder = workflow_entry._WorkflowChildEngineBuilder() + builder = workflow_entry._WorkflowChildEngineBuilder(tenant_id="tenant-id") graph_init_params = SimpleNamespace(graph_config={"nodes": []}) parent_graph_runtime_state = SimpleNamespace( execution_context=sentinel.execution_context, @@ -92,7 +118,7 @@ class TestWorkflowChildEngineBuilder: ) def test_build_child_engine_constructs_graph_engine_with_quota_layer_only(self): - builder = workflow_entry._WorkflowChildEngineBuilder() + builder = workflow_entry._WorkflowChildEngineBuilder(tenant_id="tenant-id") graph_init_params = SimpleNamespace(graph_config={"nodes": [{"id": "root"}]}) parent_graph_runtime_state = SimpleNamespace( execution_context=sentinel.execution_context, @@ -114,7 +140,7 @@ class TestWorkflowChildEngineBuilder: patch.object(workflow_entry, "GraphEngine", return_value=child_engine) as graph_engine_cls, patch.object(workflow_entry, "GraphEngineConfig", return_value=sentinel.graph_engine_config), patch.object(workflow_entry, "InMemoryChannel", return_value=sentinel.command_channel), - patch.object(workflow_entry, "LLMQuotaLayer", return_value=sentinel.llm_quota_layer), + patch.object(workflow_entry, "LLMQuotaLayer", return_value=sentinel.llm_quota_layer) as llm_quota_layer_cls, ): result = builder.build_child_engine( workflow_id="workflow-id", @@ -147,11 +173,12 @@ class TestWorkflowChildEngineBuilder: config=sentinel.graph_engine_config, child_engine_builder=builder, ) + llm_quota_layer_cls.assert_called_once_with(tenant_id="tenant-id") assert child_engine.layer.call_args_list == [((sentinel.llm_quota_layer,), {})] @pytest.mark.parametrize("node_cls", [_FakeLLMNode, _FakeQuestionClassifierNode]) def test_build_child_engine_runs_llm_quota_layer_for_child_model_nodes(self, node_cls): - builder = workflow_entry._WorkflowChildEngineBuilder() + builder = workflow_entry._WorkflowChildEngineBuilder(tenant_id="tenant-id") graph_init_params = build_test_graph_init_params( graph_config={"nodes": [{"id": "root"}], "edges": []}, ) @@ -163,12 +190,10 @@ class TestWorkflowChildEngineBuilder: def build_graph(*, graph_config, node_factory, root_node_id): _ = graph_config + node_data = _build_llm_node_data() if node_cls is _FakeLLMNode else _build_question_classifier_node_data() node = node_cls( node_id=root_node_id, - config=BaseNodeData( - type=node_cls.node_type, - title="Child Model", - ), + data=node_data, graph_init_params=node_factory.graph_init_params, graph_runtime_state=node_factory.graph_runtime_state, ) @@ -191,8 +216,8 @@ class TestWorkflowChildEngineBuilder: ), ), patch.object(workflow_entry.Graph, "init", side_effect=build_graph), - patch("core.app.workflow.layers.llm_quota.ensure_llm_quota_available") as ensure_quota, - patch("core.app.workflow.layers.llm_quota.deduct_llm_quota") as deduct_quota, + patch("core.app.workflow.layers.llm_quota.ensure_llm_quota_available_for_model") as ensure_quota, + patch("core.app.workflow.layers.llm_quota.deduct_llm_quota_for_model") as deduct_quota, ): child_engine = builder.build_child_engine( workflow_id="workflow-id", @@ -203,10 +228,15 @@ class TestWorkflowChildEngineBuilder: list(child_engine.run()) node = created_node["node"] - ensure_quota.assert_called_once_with(model_instance=node.raw_model_instance) + ensure_quota.assert_called_once_with( + tenant_id="tenant-id", + provider=node.node_data.model.provider, + model=node.node_data.model.name, + ) deduct_quota.assert_called_once_with( - tenant_id="tenant", - model_instance=node.raw_model_instance, + tenant_id="tenant-id", + provider=node.node_data.model.provider, + model=node.node_data.model.name, usage=node.usage_snapshot, ) @@ -252,7 +282,7 @@ class TestWorkflowEntryInit: "ExecutionLimitsLayer", return_value=execution_limits_layer, ) as execution_limits_layer_cls, - patch.object(workflow_entry, "LLMQuotaLayer", return_value=llm_quota_layer), + patch.object(workflow_entry, "LLMQuotaLayer", return_value=llm_quota_layer) as llm_quota_layer_cls, patch.object(workflow_entry, "ObservabilityLayer", return_value=observability_layer), ): entry = workflow_entry.WorkflowEntry( @@ -291,6 +321,7 @@ class TestWorkflowEntryInit: max_steps=workflow_entry.dify_config.WORKFLOW_MAX_EXECUTION_STEPS, max_time=workflow_entry.dify_config.WORKFLOW_MAX_EXECUTION_TIME, ) + llm_quota_layer_cls.assert_called_once_with(tenant_id="tenant-id") assert graph_engine.layer.call_args_list == [ ((debug_layer,), {}), ((execution_limits_layer,), {}), @@ -334,7 +365,7 @@ class TestWorkflowEntrySingleStepRun: def extract_variable_selector_to_variable_mapping(**_kwargs): return {} - variable_pool = VariablePool(system_variables=default_system_variables(), user_inputs={}) + variable_pool = VariablePool.from_bootstrap(system_variables=default_system_variables(), user_inputs={}) variable_loader = MagicMock() variable_loader.load_variables.return_value = [ StringVariable( diff --git a/api/tests/unit_tests/events/test_update_provider_when_message_created.py b/api/tests/unit_tests/events/test_update_provider_when_message_created.py new file mode 100644 index 0000000000..9cb8ca7854 --- /dev/null +++ b/api/tests/unit_tests/events/test_update_provider_when_message_created.py @@ -0,0 +1,130 @@ +from types import SimpleNamespace +from unittest.mock import patch +from uuid import uuid4 + +from sqlalchemy import create_engine, select + +from core.app.entities.app_invoke_entities import ChatAppGenerateEntity +from core.entities.provider_entities import ProviderQuotaType, QuotaUnit +from events.event_handlers import update_provider_when_message_created +from models import TenantCreditPool +from models.provider import ProviderType + + +def test_message_created_trial_credit_accounting_does_not_raise_when_balance_is_insufficient() -> None: + engine = create_engine("sqlite:///:memory:") + TenantCreditPool.__table__.create(engine) + tenant_id = str(uuid4()) + pool_id = str(uuid4()) + with engine.begin() as connection: + connection.execute( + TenantCreditPool.__table__.insert(), + { + "id": pool_id, + "tenant_id": tenant_id, + "pool_type": ProviderQuotaType.TRIAL, + "quota_limit": 10, + "quota_used": 9, + }, + ) + + system_configuration = SimpleNamespace( + current_quota_type=ProviderQuotaType.TRIAL, + quota_configurations=[ + SimpleNamespace( + quota_type=ProviderQuotaType.TRIAL, + quota_unit=QuotaUnit.TOKENS, + quota_limit=10, + ) + ], + ) + application_generate_entity = ChatAppGenerateEntity.model_construct( + app_config=SimpleNamespace(tenant_id=tenant_id), + model_conf=SimpleNamespace( + provider="openai", + model="gpt-4o", + provider_model_bundle=SimpleNamespace( + configuration=SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=system_configuration, + ) + ), + ), + ) + message = SimpleNamespace(message_tokens=2, answer_tokens=1) + + with ( + patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)), + patch.object(update_provider_when_message_created, "_execute_provider_updates"), + ): + update_provider_when_message_created.handle( + sender=message, + application_generate_entity=application_generate_entity, + ) + + with engine.connect() as connection: + quota_used = connection.scalar(select(TenantCreditPool.quota_used).where(TenantCreditPool.id == pool_id)) + + assert quota_used == 10 + + +def test_message_created_paid_credit_accounting_uses_paid_pool() -> None: + tenant_id = str(uuid4()) + system_configuration = SimpleNamespace( + current_quota_type=ProviderQuotaType.PAID, + quota_configurations=[ + SimpleNamespace( + quota_type=ProviderQuotaType.PAID, + quota_unit=QuotaUnit.TOKENS, + quota_limit=10, + ) + ], + ) + application_generate_entity = ChatAppGenerateEntity.model_construct( + app_config=SimpleNamespace(tenant_id=tenant_id), + model_conf=SimpleNamespace( + provider="openai", + model="gpt-4o", + provider_model_bundle=SimpleNamespace( + configuration=SimpleNamespace( + using_provider_type=ProviderType.SYSTEM, + system_configuration=system_configuration, + ) + ), + ), + ) + message = SimpleNamespace(message_tokens=2, answer_tokens=1) + + with ( + patch.object(update_provider_when_message_created, "_deduct_credit_pool_quota_capped") as mock_deduct, + patch.object(update_provider_when_message_created, "_execute_provider_updates"), + ): + update_provider_when_message_created.handle( + sender=message, + application_generate_entity=application_generate_entity, + ) + + mock_deduct.assert_called_once_with( + tenant_id=tenant_id, + credits_required=3, + pool_type="paid", + ) + + +def test_capped_credit_pool_accounting_skips_exhaustion_warning_when_full_amount_is_deducted(caplog) -> None: + with patch( + "services.credit_pool_service.CreditPoolService.deduct_credits_capped", + return_value=3, + ) as mock_deduct: + update_provider_when_message_created._deduct_credit_pool_quota_capped( + tenant_id="tenant-id", + credits_required=3, + pool_type="trial", + ) + + mock_deduct.assert_called_once_with( + tenant_id="tenant-id", + credits_required=3, + pool_type="trial", + ) + assert "Credit pool exhausted during message-created accounting" not in caplog.text diff --git a/api/tests/unit_tests/services/test_credit_pool_service.py b/api/tests/unit_tests/services/test_credit_pool_service.py new file mode 100644 index 0000000000..e77ef894e7 --- /dev/null +++ b/api/tests/unit_tests/services/test_credit_pool_service.py @@ -0,0 +1,158 @@ +from types import SimpleNamespace +from unittest.mock import patch +from uuid import uuid4 + +import pytest +from sqlalchemy import create_engine, select +from sqlalchemy.engine import Engine + +from core.errors.error import QuotaExceededError +from models import TenantCreditPool +from models.enums import ProviderQuotaType +from services.credit_pool_service import CreditPoolService + + +def _create_engine_with_pool(*, quota_limit: int, quota_used: int) -> tuple[Engine, str, str]: + engine = create_engine("sqlite:///:memory:") + TenantCreditPool.__table__.create(engine) + tenant_id = str(uuid4()) + pool_id = str(uuid4()) + with engine.begin() as connection: + connection.execute( + TenantCreditPool.__table__.insert(), + { + "id": pool_id, + "tenant_id": tenant_id, + "pool_type": ProviderQuotaType.TRIAL, + "quota_limit": quota_limit, + "quota_used": quota_used, + }, + ) + return engine, tenant_id, pool_id + + +def _get_quota_used(*, engine: Engine, pool_id: str) -> int | None: + with engine.connect() as connection: + return connection.scalar(select(TenantCreditPool.quota_used).where(TenantCreditPool.id == pool_id)) + + +def test_check_and_deduct_credits_deducts_exact_amount_when_sufficient() -> None: + engine, tenant_id, pool_id = _create_engine_with_pool(quota_limit=10, quota_used=2) + + with patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)): + deducted_credits = CreditPoolService.check_and_deduct_credits(tenant_id=tenant_id, credits_required=3) + + assert deducted_credits == 3 + assert _get_quota_used(engine=engine, pool_id=pool_id) == 5 + + +def test_check_and_deduct_credits_returns_zero_for_non_positive_request() -> None: + assert CreditPoolService.check_and_deduct_credits(tenant_id=str(uuid4()), credits_required=0) == 0 + + +def test_check_and_deduct_credits_raises_when_pool_is_missing() -> None: + engine = create_engine("sqlite:///:memory:") + TenantCreditPool.__table__.create(engine) + + with ( + patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)), + pytest.raises(QuotaExceededError, match="Credit pool not found"), + ): + CreditPoolService.check_and_deduct_credits(tenant_id=str(uuid4()), credits_required=1) + + +def test_check_and_deduct_credits_raises_when_pool_is_empty() -> None: + engine, tenant_id, pool_id = _create_engine_with_pool(quota_limit=10, quota_used=10) + + with ( + patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)), + pytest.raises(QuotaExceededError, match="No credits remaining"), + ): + CreditPoolService.check_and_deduct_credits(tenant_id=tenant_id, credits_required=1) + + assert _get_quota_used(engine=engine, pool_id=pool_id) == 10 + + +def test_check_and_deduct_credits_raises_without_partial_deduction_when_insufficient() -> None: + engine, tenant_id, pool_id = _create_engine_with_pool(quota_limit=10, quota_used=9) + + with ( + patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)), + pytest.raises(QuotaExceededError, match="Insufficient credits remaining"), + ): + CreditPoolService.check_and_deduct_credits(tenant_id=tenant_id, credits_required=3) + + assert _get_quota_used(engine=engine, pool_id=pool_id) == 9 + + +def test_check_and_deduct_credits_wraps_unexpected_deduction_errors() -> None: + engine, tenant_id, pool_id = _create_engine_with_pool(quota_limit=10, quota_used=2) + + with ( + patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)), + patch.object(CreditPoolService, "_get_locked_pool", side_effect=RuntimeError("database unavailable")), + pytest.raises(QuotaExceededError, match="Failed to deduct credits"), + ): + CreditPoolService.check_and_deduct_credits(tenant_id=tenant_id, credits_required=1) + + assert _get_quota_used(engine=engine, pool_id=pool_id) == 2 + + +def test_deduct_credits_capped_returns_zero_for_non_positive_request() -> None: + assert CreditPoolService.deduct_credits_capped(tenant_id=str(uuid4()), credits_required=0) == 0 + + +def test_deduct_credits_capped_returns_zero_when_pool_is_missing() -> None: + engine = create_engine("sqlite:///:memory:") + TenantCreditPool.__table__.create(engine) + + with patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)): + deducted_credits = CreditPoolService.deduct_credits_capped(tenant_id=str(uuid4()), credits_required=1) + + assert deducted_credits == 0 + + +def test_deduct_credits_capped_returns_zero_when_pool_is_empty() -> None: + engine, tenant_id, pool_id = _create_engine_with_pool(quota_limit=10, quota_used=10) + + with patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)): + deducted_credits = CreditPoolService.deduct_credits_capped(tenant_id=tenant_id, credits_required=1) + + assert deducted_credits == 0 + assert _get_quota_used(engine=engine, pool_id=pool_id) == 10 + + +def test_deduct_credits_capped_deducts_only_remaining_balance_when_insufficient() -> None: + engine, tenant_id, pool_id = _create_engine_with_pool(quota_limit=10, quota_used=9) + + with patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)): + deducted_credits = CreditPoolService.deduct_credits_capped(tenant_id=tenant_id, credits_required=3) + + assert deducted_credits == 1 + assert _get_quota_used(engine=engine, pool_id=pool_id) == 10 + + +def test_deduct_credits_capped_wraps_unexpected_deduction_errors() -> None: + engine, tenant_id, pool_id = _create_engine_with_pool(quota_limit=10, quota_used=2) + + with ( + patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)), + patch.object(CreditPoolService, "_get_locked_pool", side_effect=RuntimeError("database unavailable")), + pytest.raises(QuotaExceededError, match="Failed to deduct credits"), + ): + CreditPoolService.deduct_credits_capped(tenant_id=tenant_id, credits_required=1) + + assert _get_quota_used(engine=engine, pool_id=pool_id) == 2 + + +def test_deduct_credits_capped_reraises_quota_exceeded_errors() -> None: + engine, tenant_id, pool_id = _create_engine_with_pool(quota_limit=10, quota_used=2) + + with ( + patch("services.credit_pool_service.db", SimpleNamespace(engine=engine)), + patch.object(CreditPoolService, "_get_locked_pool", side_effect=QuotaExceededError("quota unavailable")), + pytest.raises(QuotaExceededError, match="quota unavailable"), + ): + CreditPoolService.deduct_credits_capped(tenant_id=tenant_id, credits_required=1) + + assert _get_quota_used(engine=engine, pool_id=pool_id) == 2 diff --git a/api/tests/unit_tests/services/test_workflow_service.py b/api/tests/unit_tests/services/test_workflow_service.py index 08c6ec76e2..1711e66b23 100644 --- a/api/tests/unit_tests/services/test_workflow_service.py +++ b/api/tests/unit_tests/services/test_workflow_service.py @@ -2845,7 +2845,7 @@ class TestWorkflowServiceFreeNodeExecution: mock_node_cls.validate_node_data.assert_called_once_with(sentinel.adapted_node_data) mock_node_cls.assert_called_once_with( node_id="n-1", - config=sentinel.node_data, + data=sentinel.node_data, graph_init_params=mock_graph_init_context_cls.return_value.to_graph_init_params.return_value, graph_runtime_state=ANY, runtime=mock_runtime_cls.return_value, diff --git a/api/uv.lock b/api/uv.lock index 10487f6bac..ad9ce2c4a4 100644 --- a/api/uv.lock +++ b/api/uv.lock @@ -1597,7 +1597,7 @@ requires-dist = [ { name = "gmpy2", specifier = ">=2.3.0" }, { name = "google-api-python-client", specifier = ">=2.195.0" }, { name = "google-cloud-aiplatform", specifier = ">=1.149.0,<2.0.0" }, - { name = "graphon", specifier = "~=0.2.2" }, + { name = "graphon", specifier = "~=0.3.0" }, { name = "gunicorn", specifier = ">=25.3.0" }, { name = "httpx", extras = ["socks"], specifier = ">=0.28.1,<1.0.0" }, { name = "httpx-sse", specifier = "~=0.4.0" }, @@ -2940,7 +2940,7 @@ httpx = [ [[package]] name = "graphon" -version = "0.2.2" +version = "0.3.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "charset-normalizer" }, @@ -2961,9 +2961,9 @@ dependencies = [ { name = "unstructured", extra = ["docx", "epub", "md", "ppt", "pptx"] }, { name = "webvtt-py" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/08/50/e745a79c5f742f88f6011a1f7c9ba2c2f9cc1beedd982f0b192f1ab8c748/graphon-0.2.2.tar.gz", hash = "sha256:141f0de536171850f1af6f738dc66f0285aadd3c097f1dad2a038636789e0aa5", size = 236360, upload-time = "2026-04-17T08:52:28.047Z" } +sdist = { url = "https://files.pythonhosted.org/packages/bf/62/83593d6e7a139ff124711ea05882cadca7065c11a38763aa9360d7e76804/graphon-0.3.0.tar.gz", hash = "sha256:cd38f842ae3dcfa956428b952efbe2a3ea9c1581446647142accbbdeb638b876", size = 241176, upload-time = "2026-04-21T15:18:48.291Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/de/89/a6340afdaf5169d17a318e00fc685fb67ed99baa602c2cbbbf6af6a76096/graphon-0.2.2-py3-none-any.whl", hash = "sha256:754e544d08779138f99eac6547ab08559463680e2c76488b05e1c978210392b4", size = 340808, upload-time = "2026-04-17T08:52:26.5Z" }, + { url = "https://files.pythonhosted.org/packages/b3/f7/81ee8f0368aa6a2d47f97fecc5d4a12865c987906798cbddd0e3b8387f33/graphon-0.3.0-py3-none-any.whl", hash = "sha256:9cca45ebab2a79fd4d04432f55b5b962e9e4f34fa037cc20fee7f18ec80eaa5d", size = 348486, upload-time = "2026-04-21T15:18:46.737Z" }, ] [[package]] From 65c36a51eff15b398638a881caebfea09dd6a5f8 Mon Sep 17 00:00:00 2001 From: Asuka Minato <i@asukaminato.eu.org> Date: Sat, 9 May 2026 16:53:42 +0900 Subject: [PATCH 13/13] ci: update comment (#35968) --- .github/workflows/pyrefly-diff-comment.yml | 22 ++++++++++++++++++++-- .github/workflows/pyrefly-diff.yml | 21 +++++++++++++++++++-- 2 files changed, 39 insertions(+), 4 deletions(-) diff --git a/.github/workflows/pyrefly-diff-comment.yml b/.github/workflows/pyrefly-diff-comment.yml index 7f82942e7e..8e16baf933 100644 --- a/.github/workflows/pyrefly-diff-comment.yml +++ b/.github/workflows/pyrefly-diff-comment.yml @@ -77,10 +77,28 @@ jobs: } if (diff.trim()) { - await github.rest.issues.createComment({ + const body = '### Pyrefly Diff\n<details>\n<summary>base → PR</summary>\n\n```diff\n' + diff + '\n```\n</details>'; + const marker = '### Pyrefly Diff'; + const { data: comments } = await github.rest.issues.listComments({ issue_number: prNumber, owner: context.repo.owner, repo: context.repo.repo, - body: '### Pyrefly Diff\n<details>\n<summary>base → PR</summary>\n\n```diff\n' + diff + '\n```\n</details>', }); + const existing = comments.find((comment) => comment.body.startsWith(marker)); + + if (existing) { + await github.rest.issues.updateComment({ + comment_id: existing.id, + owner: context.repo.owner, + repo: context.repo.repo, + body, + }); + } else { + await github.rest.issues.createComment({ + issue_number: prNumber, + owner: context.repo.owner, + repo: context.repo.repo, + body, + }); + } } diff --git a/.github/workflows/pyrefly-diff.yml b/.github/workflows/pyrefly-diff.yml index 0cf54e3585..386bd25751 100644 --- a/.github/workflows/pyrefly-diff.yml +++ b/.github/workflows/pyrefly-diff.yml @@ -103,9 +103,26 @@ jobs: ].join('\n') : '### Pyrefly Diff\nNo changes detected.'; - await github.rest.issues.createComment({ + const marker = '### Pyrefly Diff'; + const { data: comments } = await github.rest.issues.listComments({ issue_number: prNumber, owner: context.repo.owner, repo: context.repo.repo, - body, }); + const existing = comments.find((comment) => comment.body.startsWith(marker)); + + if (existing) { + await github.rest.issues.updateComment({ + comment_id: existing.id, + owner: context.repo.owner, + repo: context.repo.repo, + body, + }); + } else { + await github.rest.issues.createComment({ + issue_number: prNumber, + owner: context.repo.owner, + repo: context.repo.repo, + body, + }); + }