diff --git a/.claude/settings.json b/.claude/settings.json
new file mode 100644
index 0000000000..c5c514b5f5
--- /dev/null
+++ b/.claude/settings.json
@@ -0,0 +1,9 @@
+{
+ "enabledPlugins": {
+ "feature-dev@claude-plugins-official": true,
+ "context7@claude-plugins-official": true,
+ "typescript-lsp@claude-plugins-official": true,
+ "pyright-lsp@claude-plugins-official": true,
+ "ralph-wiggum@claude-plugins-official": true
+ }
+}
diff --git a/.claude/settings.json.example b/.claude/settings.json.example
deleted file mode 100644
index 1149895340..0000000000
--- a/.claude/settings.json.example
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "permissions": {
- "allow": [],
- "deny": []
- },
- "env": {
- "__comment": "Environment variables for MCP servers. Override in .claude/settings.local.json with actual values.",
- "GITHUB_PERSONAL_ACCESS_TOKEN": "ghp_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
- },
- "enabledMcpjsonServers": [
- "context7",
- "sequential-thinking",
- "github",
- "fetch",
- "playwright",
- "ide"
- ],
- "enableAllProjectMcpServers": true
- }
\ No newline at end of file
diff --git a/.claude/skills/component-refactoring/SKILL.md b/.claude/skills/component-refactoring/SKILL.md
new file mode 100644
index 0000000000..7006c382c8
--- /dev/null
+++ b/.claude/skills/component-refactoring/SKILL.md
@@ -0,0 +1,483 @@
+---
+name: component-refactoring
+description: Refactor high-complexity React components in Dify frontend. Use when `pnpm analyze-component --json` shows complexity > 50 or lineCount > 300, when the user asks for code splitting, hook extraction, or complexity reduction, or when `pnpm analyze-component` warns to refactor before testing; avoid for simple/well-structured components, third-party wrappers, or when the user explicitly wants testing without refactoring.
+---
+
+# Dify Component Refactoring Skill
+
+Refactor high-complexity React components in the Dify frontend codebase with the patterns and workflow below.
+
+> **Complexity Threshold**: Components with complexity > 50 (measured by `pnpm analyze-component`) should be refactored before testing.
+
+## Quick Reference
+
+### Commands (run from `web/`)
+
+Use paths relative to `web/` (e.g., `app/components/...`).
+Use `refactor-component` for refactoring prompts and `analyze-component` for testing prompts and metrics.
+
+```bash
+cd web
+
+# Generate refactoring prompt
+pnpm refactor-component
+
+# Output refactoring analysis as JSON
+pnpm refactor-component --json
+
+# Generate testing prompt (after refactoring)
+pnpm analyze-component
+
+# Output testing analysis as JSON
+pnpm analyze-component --json
+```
+
+### Complexity Analysis
+
+```bash
+# Analyze component complexity
+pnpm analyze-component --json
+
+# Key metrics to check:
+# - complexity: normalized score 0-100 (target < 50)
+# - maxComplexity: highest single function complexity
+# - lineCount: total lines (target < 300)
+```
+
+### Complexity Score Interpretation
+
+| Score | Level | Action |
+|-------|-------|--------|
+| 0-25 | 🟢 Simple | Ready for testing |
+| 26-50 | 🟡 Medium | Consider minor refactoring |
+| 51-75 | 🟠 Complex | **Refactor before testing** |
+| 76-100 | 🔴 Very Complex | **Must refactor** |
+
+## Core Refactoring Patterns
+
+### Pattern 1: Extract Custom Hooks
+
+**When**: Component has complex state management, multiple `useState`/`useEffect`, or business logic mixed with UI.
+
+**Dify Convention**: Place hooks in a `hooks/` subdirectory or alongside the component as `use-.ts`.
+
+```typescript
+// ❌ Before: Complex state logic in component
+const Configuration: FC = () => {
+ const [modelConfig, setModelConfig] = useState(...)
+ const [datasetConfigs, setDatasetConfigs] = useState(...)
+ const [completionParams, setCompletionParams] = useState({})
+
+ // 50+ lines of state management logic...
+
+ return ...
+}
+
+// ✅ After: Extract to custom hook
+// hooks/use-model-config.ts
+export const useModelConfig = (appId: string) => {
+ const [modelConfig, setModelConfig] = useState(...)
+ const [completionParams, setCompletionParams] = useState({})
+
+ // Related state management logic here
+
+ return { modelConfig, setModelConfig, completionParams, setCompletionParams }
+}
+
+// Component becomes cleaner
+const Configuration: FC = () => {
+ const { modelConfig, setModelConfig } = useModelConfig(appId)
+ return ...
+}
+```
+
+**Dify Examples**:
+- `web/app/components/app/configuration/hooks/use-advanced-prompt-config.ts`
+- `web/app/components/app/configuration/debug/hooks.tsx`
+- `web/app/components/workflow/hooks/use-workflow.ts`
+
+### Pattern 2: Extract Sub-Components
+
+**When**: Single component has multiple UI sections, conditional rendering blocks, or repeated patterns.
+
+**Dify Convention**: Place sub-components in subdirectories or as separate files in the same directory.
+
+```typescript
+// ❌ Before: Monolithic JSX with multiple sections
+const AppInfo = () => {
+ return (
+
+ {/* 100 lines of header UI */}
+ {/* 100 lines of operations UI */}
+ {/* 100 lines of modals */}
+
+ )
+}
+
+// ✅ After: Split into focused components
+// app-info/
+// ├── index.tsx (orchestration only)
+// ├── app-header.tsx (header UI)
+// ├── app-operations.tsx (operations UI)
+// └── app-modals.tsx (modal management)
+
+const AppInfo = () => {
+ const { showModal, setShowModal } = useAppInfoModals()
+
+ return (
+
+
+
+
setShowModal(null)} />
+
+ )
+}
+```
+
+**Dify Examples**:
+- `web/app/components/app/configuration/` directory structure
+- `web/app/components/workflow/nodes/` per-node organization
+
+### Pattern 3: Simplify Conditional Logic
+
+**When**: Deep nesting (> 3 levels), complex ternaries, or multiple `if/else` chains.
+
+```typescript
+// ❌ Before: Deeply nested conditionals
+const Template = useMemo(() => {
+ if (appDetail?.mode === AppModeEnum.CHAT) {
+ switch (locale) {
+ case LanguagesSupported[1]:
+ return
+ case LanguagesSupported[7]:
+ return
+ default:
+ return
+ }
+ }
+ if (appDetail?.mode === AppModeEnum.ADVANCED_CHAT) {
+ // Another 15 lines...
+ }
+ // More conditions...
+}, [appDetail, locale])
+
+// ✅ After: Use lookup tables + early returns
+const TEMPLATE_MAP = {
+ [AppModeEnum.CHAT]: {
+ [LanguagesSupported[1]]: TemplateChatZh,
+ [LanguagesSupported[7]]: TemplateChatJa,
+ default: TemplateChatEn,
+ },
+ [AppModeEnum.ADVANCED_CHAT]: {
+ [LanguagesSupported[1]]: TemplateAdvancedChatZh,
+ // ...
+ },
+}
+
+const Template = useMemo(() => {
+ const modeTemplates = TEMPLATE_MAP[appDetail?.mode]
+ if (!modeTemplates) return null
+
+ const TemplateComponent = modeTemplates[locale] || modeTemplates.default
+ return
+}, [appDetail, locale])
+```
+
+### Pattern 4: Extract API/Data Logic
+
+**When**: Component directly handles API calls, data transformation, or complex async operations.
+
+**Dify Convention**: Use `@tanstack/react-query` hooks from `web/service/use-*.ts` or create custom data hooks.
+
+```typescript
+// ❌ Before: API logic in component
+const MCPServiceCard = () => {
+ const [basicAppConfig, setBasicAppConfig] = useState({})
+
+ useEffect(() => {
+ if (isBasicApp && appId) {
+ (async () => {
+ const res = await fetchAppDetail({ url: '/apps', id: appId })
+ setBasicAppConfig(res?.model_config || {})
+ })()
+ }
+ }, [appId, isBasicApp])
+
+ // More API-related logic...
+}
+
+// ✅ After: Extract to data hook using React Query
+// use-app-config.ts
+import { useQuery } from '@tanstack/react-query'
+import { get } from '@/service/base'
+
+const NAME_SPACE = 'appConfig'
+
+export const useAppConfig = (appId: string, isBasicApp: boolean) => {
+ return useQuery({
+ enabled: isBasicApp && !!appId,
+ queryKey: [NAME_SPACE, 'detail', appId],
+ queryFn: () => get(`/apps/${appId}`),
+ select: data => data?.model_config || {},
+ })
+}
+
+// Component becomes cleaner
+const MCPServiceCard = () => {
+ const { data: config, isLoading } = useAppConfig(appId, isBasicApp)
+ // UI only
+}
+```
+
+**React Query Best Practices in Dify**:
+- Define `NAME_SPACE` for query key organization
+- Use `enabled` option for conditional fetching
+- Use `select` for data transformation
+- Export invalidation hooks: `useInvalidXxx`
+
+**Dify Examples**:
+- `web/service/use-workflow.ts`
+- `web/service/use-common.ts`
+- `web/service/knowledge/use-dataset.ts`
+- `web/service/knowledge/use-document.ts`
+
+### Pattern 5: Extract Modal/Dialog Management
+
+**When**: Component manages multiple modals with complex open/close states.
+
+**Dify Convention**: Modals should be extracted with their state management.
+
+```typescript
+// ❌ Before: Multiple modal states in component
+const AppInfo = () => {
+ const [showEditModal, setShowEditModal] = useState(false)
+ const [showDuplicateModal, setShowDuplicateModal] = useState(false)
+ const [showConfirmDelete, setShowConfirmDelete] = useState(false)
+ const [showSwitchModal, setShowSwitchModal] = useState(false)
+ const [showImportDSLModal, setShowImportDSLModal] = useState(false)
+ // 5+ more modal states...
+}
+
+// ✅ After: Extract to modal management hook
+type ModalType = 'edit' | 'duplicate' | 'delete' | 'switch' | 'import' | null
+
+const useAppInfoModals = () => {
+ const [activeModal, setActiveModal] = useState(null)
+
+ const openModal = useCallback((type: ModalType) => setActiveModal(type), [])
+ const closeModal = useCallback(() => setActiveModal(null), [])
+
+ return {
+ activeModal,
+ openModal,
+ closeModal,
+ isOpen: (type: ModalType) => activeModal === type,
+ }
+}
+```
+
+### Pattern 6: Extract Form Logic
+
+**When**: Complex form validation, submission handling, or field transformation.
+
+**Dify Convention**: Use `@tanstack/react-form` patterns from `web/app/components/base/form/`.
+
+```typescript
+// ✅ Use existing form infrastructure
+import { useAppForm } from '@/app/components/base/form'
+
+const ConfigForm = () => {
+ const form = useAppForm({
+ defaultValues: { name: '', description: '' },
+ onSubmit: handleSubmit,
+ })
+
+ return ...
+}
+```
+
+## Dify-Specific Refactoring Guidelines
+
+### 1. Context Provider Extraction
+
+**When**: Component provides complex context values with multiple states.
+
+```typescript
+// ❌ Before: Large context value object
+const value = {
+ appId, isAPIKeySet, isTrailFinished, mode, modelModeType,
+ promptMode, isAdvancedMode, isAgent, isOpenAI, isFunctionCall,
+ // 50+ more properties...
+}
+return ...
+
+// ✅ After: Split into domain-specific contexts
+
+
+
+ {children}
+
+
+
+```
+
+**Dify Reference**: `web/context/` directory structure
+
+### 2. Workflow Node Components
+
+**When**: Refactoring workflow node components (`web/app/components/workflow/nodes/`).
+
+**Conventions**:
+- Keep node logic in `use-interactions.ts`
+- Extract panel UI to separate files
+- Use `_base` components for common patterns
+
+```
+nodes//
+ ├── index.tsx # Node registration
+ ├── node.tsx # Node visual component
+ ├── panel.tsx # Configuration panel
+ ├── use-interactions.ts # Node-specific hooks
+ └── types.ts # Type definitions
+```
+
+### 3. Configuration Components
+
+**When**: Refactoring app configuration components.
+
+**Conventions**:
+- Separate config sections into subdirectories
+- Use existing patterns from `web/app/components/app/configuration/`
+- Keep feature toggles in dedicated components
+
+### 4. Tool/Plugin Components
+
+**When**: Refactoring tool-related components (`web/app/components/tools/`).
+
+**Conventions**:
+- Follow existing modal patterns
+- Use service hooks from `web/service/use-tools.ts`
+- Keep provider-specific logic isolated
+
+## Refactoring Workflow
+
+### Step 1: Generate Refactoring Prompt
+
+```bash
+pnpm refactor-component
+```
+
+This command will:
+- Analyze component complexity and features
+- Identify specific refactoring actions needed
+- Generate a prompt for AI assistant (auto-copied to clipboard on macOS)
+- Provide detailed requirements based on detected patterns
+
+### Step 2: Analyze Details
+
+```bash
+pnpm analyze-component --json
+```
+
+Identify:
+- Total complexity score
+- Max function complexity
+- Line count
+- Features detected (state, effects, API, etc.)
+
+### Step 3: Plan
+
+Create a refactoring plan based on detected features:
+
+| Detected Feature | Refactoring Action |
+|------------------|-------------------|
+| `hasState: true` + `hasEffects: true` | Extract custom hook |
+| `hasAPI: true` | Extract data/service hook |
+| `hasEvents: true` (many) | Extract event handlers |
+| `lineCount > 300` | Split into sub-components |
+| `maxComplexity > 50` | Simplify conditional logic |
+
+### Step 4: Execute Incrementally
+
+1. **Extract one piece at a time**
+2. **Run lint, type-check, and tests after each extraction**
+3. **Verify functionality before next step**
+
+```
+For each extraction:
+ ┌────────────────────────────────────────┐
+ │ 1. Extract code │
+ │ 2. Run: pnpm lint:fix │
+ │ 3. Run: pnpm type-check:tsgo │
+ │ 4. Run: pnpm test │
+ │ 5. Test functionality manually │
+ │ 6. PASS? → Next extraction │
+ │ FAIL? → Fix before continuing │
+ └────────────────────────────────────────┘
+```
+
+### Step 5: Verify
+
+After refactoring:
+
+```bash
+# Re-run refactor command to verify improvements
+pnpm refactor-component
+
+# If complexity < 25 and lines < 200, you'll see:
+# ✅ COMPONENT IS WELL-STRUCTURED
+
+# For detailed metrics:
+pnpm analyze-component --json
+
+# Target metrics:
+# - complexity < 50
+# - lineCount < 300
+# - maxComplexity < 30
+```
+
+## Common Mistakes to Avoid
+
+### ❌ Over-Engineering
+
+```typescript
+// ❌ Too many tiny hooks
+const useButtonText = () => useState('Click')
+const useButtonDisabled = () => useState(false)
+const useButtonLoading = () => useState(false)
+
+// ✅ Cohesive hook with related state
+const useButtonState = () => {
+ const [text, setText] = useState('Click')
+ const [disabled, setDisabled] = useState(false)
+ const [loading, setLoading] = useState(false)
+ return { text, setText, disabled, setDisabled, loading, setLoading }
+}
+```
+
+### ❌ Breaking Existing Patterns
+
+- Follow existing directory structures
+- Maintain naming conventions
+- Preserve export patterns for compatibility
+
+### ❌ Premature Abstraction
+
+- Only extract when there's clear complexity benefit
+- Don't create abstractions for single-use code
+- Keep refactored code in the same domain area
+
+## References
+
+### Dify Codebase Examples
+
+- **Hook extraction**: `web/app/components/app/configuration/hooks/`
+- **Component splitting**: `web/app/components/app/configuration/`
+- **Service hooks**: `web/service/use-*.ts`
+- **Workflow patterns**: `web/app/components/workflow/hooks/`
+- **Form patterns**: `web/app/components/base/form/`
+
+### Related Skills
+
+- `frontend-testing` - For testing refactored components
+- `web/testing/testing.md` - Testing specification
diff --git a/.claude/skills/component-refactoring/references/complexity-patterns.md b/.claude/skills/component-refactoring/references/complexity-patterns.md
new file mode 100644
index 0000000000..5a0a268f38
--- /dev/null
+++ b/.claude/skills/component-refactoring/references/complexity-patterns.md
@@ -0,0 +1,493 @@
+# Complexity Reduction Patterns
+
+This document provides patterns for reducing cognitive complexity in Dify React components.
+
+## Understanding Complexity
+
+### SonarJS Cognitive Complexity
+
+The `pnpm analyze-component` tool uses SonarJS cognitive complexity metrics:
+
+- **Total Complexity**: Sum of all functions' complexity in the file
+- **Max Complexity**: Highest single function complexity
+
+### What Increases Complexity
+
+| Pattern | Complexity Impact |
+|---------|-------------------|
+| `if/else` | +1 per branch |
+| Nested conditions | +1 per nesting level |
+| `switch/case` | +1 per case |
+| `for/while/do` | +1 per loop |
+| `&&`/`||` chains | +1 per operator |
+| Nested callbacks | +1 per nesting level |
+| `try/catch` | +1 per catch |
+| Ternary expressions | +1 per nesting |
+
+## Pattern 1: Replace Conditionals with Lookup Tables
+
+**Before** (complexity: ~15):
+
+```typescript
+const Template = useMemo(() => {
+ if (appDetail?.mode === AppModeEnum.CHAT) {
+ switch (locale) {
+ case LanguagesSupported[1]:
+ return
+ case LanguagesSupported[7]:
+ return
+ default:
+ return
+ }
+ }
+ if (appDetail?.mode === AppModeEnum.ADVANCED_CHAT) {
+ switch (locale) {
+ case LanguagesSupported[1]:
+ return
+ case LanguagesSupported[7]:
+ return
+ default:
+ return
+ }
+ }
+ if (appDetail?.mode === AppModeEnum.WORKFLOW) {
+ // Similar pattern...
+ }
+ return null
+}, [appDetail, locale])
+```
+
+**After** (complexity: ~3):
+
+```typescript
+// Define lookup table outside component
+const TEMPLATE_MAP: Record>> = {
+ [AppModeEnum.CHAT]: {
+ [LanguagesSupported[1]]: TemplateChatZh,
+ [LanguagesSupported[7]]: TemplateChatJa,
+ default: TemplateChatEn,
+ },
+ [AppModeEnum.ADVANCED_CHAT]: {
+ [LanguagesSupported[1]]: TemplateAdvancedChatZh,
+ [LanguagesSupported[7]]: TemplateAdvancedChatJa,
+ default: TemplateAdvancedChatEn,
+ },
+ [AppModeEnum.WORKFLOW]: {
+ [LanguagesSupported[1]]: TemplateWorkflowZh,
+ [LanguagesSupported[7]]: TemplateWorkflowJa,
+ default: TemplateWorkflowEn,
+ },
+ // ...
+}
+
+// Clean component logic
+const Template = useMemo(() => {
+ if (!appDetail?.mode) return null
+
+ const templates = TEMPLATE_MAP[appDetail.mode]
+ if (!templates) return null
+
+ const TemplateComponent = templates[locale] ?? templates.default
+ return
+}, [appDetail, locale])
+```
+
+## Pattern 2: Use Early Returns
+
+**Before** (complexity: ~10):
+
+```typescript
+const handleSubmit = () => {
+ if (isValid) {
+ if (hasChanges) {
+ if (isConnected) {
+ submitData()
+ } else {
+ showConnectionError()
+ }
+ } else {
+ showNoChangesMessage()
+ }
+ } else {
+ showValidationError()
+ }
+}
+```
+
+**After** (complexity: ~4):
+
+```typescript
+const handleSubmit = () => {
+ if (!isValid) {
+ showValidationError()
+ return
+ }
+
+ if (!hasChanges) {
+ showNoChangesMessage()
+ return
+ }
+
+ if (!isConnected) {
+ showConnectionError()
+ return
+ }
+
+ submitData()
+}
+```
+
+## Pattern 3: Extract Complex Conditions
+
+**Before** (complexity: high):
+
+```typescript
+const canPublish = (() => {
+ if (mode !== AppModeEnum.COMPLETION) {
+ if (!isAdvancedMode)
+ return true
+
+ if (modelModeType === ModelModeType.completion) {
+ if (!hasSetBlockStatus.history || !hasSetBlockStatus.query)
+ return false
+ return true
+ }
+ return true
+ }
+ return !promptEmpty
+})()
+```
+
+**After** (complexity: lower):
+
+```typescript
+// Extract to named functions
+const canPublishInCompletionMode = () => !promptEmpty
+
+const canPublishInChatMode = () => {
+ if (!isAdvancedMode) return true
+ if (modelModeType !== ModelModeType.completion) return true
+ return hasSetBlockStatus.history && hasSetBlockStatus.query
+}
+
+// Clean main logic
+const canPublish = mode === AppModeEnum.COMPLETION
+ ? canPublishInCompletionMode()
+ : canPublishInChatMode()
+```
+
+## Pattern 4: Replace Chained Ternaries
+
+**Before** (complexity: ~5):
+
+```typescript
+const statusText = serverActivated
+ ? t('status.running')
+ : serverPublished
+ ? t('status.inactive')
+ : appUnpublished
+ ? t('status.unpublished')
+ : t('status.notConfigured')
+```
+
+**After** (complexity: ~2):
+
+```typescript
+const getStatusText = () => {
+ if (serverActivated) return t('status.running')
+ if (serverPublished) return t('status.inactive')
+ if (appUnpublished) return t('status.unpublished')
+ return t('status.notConfigured')
+}
+
+const statusText = getStatusText()
+```
+
+Or use lookup:
+
+```typescript
+const STATUS_TEXT_MAP = {
+ running: 'status.running',
+ inactive: 'status.inactive',
+ unpublished: 'status.unpublished',
+ notConfigured: 'status.notConfigured',
+} as const
+
+const getStatusKey = (): keyof typeof STATUS_TEXT_MAP => {
+ if (serverActivated) return 'running'
+ if (serverPublished) return 'inactive'
+ if (appUnpublished) return 'unpublished'
+ return 'notConfigured'
+}
+
+const statusText = t(STATUS_TEXT_MAP[getStatusKey()])
+```
+
+## Pattern 5: Flatten Nested Loops
+
+**Before** (complexity: high):
+
+```typescript
+const processData = (items: Item[]) => {
+ const results: ProcessedItem[] = []
+
+ for (const item of items) {
+ if (item.isValid) {
+ for (const child of item.children) {
+ if (child.isActive) {
+ for (const prop of child.properties) {
+ if (prop.value !== null) {
+ results.push({
+ itemId: item.id,
+ childId: child.id,
+ propValue: prop.value,
+ })
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return results
+}
+```
+
+**After** (complexity: lower):
+
+```typescript
+// Use functional approach
+const processData = (items: Item[]) => {
+ return items
+ .filter(item => item.isValid)
+ .flatMap(item =>
+ item.children
+ .filter(child => child.isActive)
+ .flatMap(child =>
+ child.properties
+ .filter(prop => prop.value !== null)
+ .map(prop => ({
+ itemId: item.id,
+ childId: child.id,
+ propValue: prop.value,
+ }))
+ )
+ )
+}
+```
+
+## Pattern 6: Extract Event Handler Logic
+
+**Before** (complexity: high in component):
+
+```typescript
+const Component = () => {
+ const handleSelect = (data: DataSet[]) => {
+ if (isEqual(data.map(item => item.id), dataSets.map(item => item.id))) {
+ hideSelectDataSet()
+ return
+ }
+
+ formattingChangedDispatcher()
+ let newDatasets = data
+ if (data.find(item => !item.name)) {
+ const newSelected = produce(data, (draft) => {
+ data.forEach((item, index) => {
+ if (!item.name) {
+ const newItem = dataSets.find(i => i.id === item.id)
+ if (newItem)
+ draft[index] = newItem
+ }
+ })
+ })
+ setDataSets(newSelected)
+ newDatasets = newSelected
+ }
+ else {
+ setDataSets(data)
+ }
+ hideSelectDataSet()
+
+ // 40 more lines of logic...
+ }
+
+ return ...
+}
+```
+
+**After** (complexity: lower):
+
+```typescript
+// Extract to hook or utility
+const useDatasetSelection = (dataSets: DataSet[], setDataSets: SetState) => {
+ const normalizeSelection = (data: DataSet[]) => {
+ const hasUnloadedItem = data.some(item => !item.name)
+ if (!hasUnloadedItem) return data
+
+ return produce(data, (draft) => {
+ data.forEach((item, index) => {
+ if (!item.name) {
+ const existing = dataSets.find(i => i.id === item.id)
+ if (existing) draft[index] = existing
+ }
+ })
+ })
+ }
+
+ const hasSelectionChanged = (newData: DataSet[]) => {
+ return !isEqual(
+ newData.map(item => item.id),
+ dataSets.map(item => item.id)
+ )
+ }
+
+ return { normalizeSelection, hasSelectionChanged }
+}
+
+// Component becomes cleaner
+const Component = () => {
+ const { normalizeSelection, hasSelectionChanged } = useDatasetSelection(dataSets, setDataSets)
+
+ const handleSelect = (data: DataSet[]) => {
+ if (!hasSelectionChanged(data)) {
+ hideSelectDataSet()
+ return
+ }
+
+ formattingChangedDispatcher()
+ const normalized = normalizeSelection(data)
+ setDataSets(normalized)
+ hideSelectDataSet()
+ }
+
+ return ...
+}
+```
+
+## Pattern 7: Reduce Boolean Logic Complexity
+
+**Before** (complexity: ~8):
+
+```typescript
+const toggleDisabled = hasInsufficientPermissions
+ || appUnpublished
+ || missingStartNode
+ || triggerModeDisabled
+ || (isAdvancedApp && !currentWorkflow?.graph)
+ || (isBasicApp && !basicAppConfig.updated_at)
+```
+
+**After** (complexity: ~3):
+
+```typescript
+// Extract meaningful boolean functions
+const isAppReady = () => {
+ if (isAdvancedApp) return !!currentWorkflow?.graph
+ return !!basicAppConfig.updated_at
+}
+
+const hasRequiredPermissions = () => {
+ return isCurrentWorkspaceEditor && !hasInsufficientPermissions
+}
+
+const canToggle = () => {
+ if (!hasRequiredPermissions()) return false
+ if (!isAppReady()) return false
+ if (missingStartNode) return false
+ if (triggerModeDisabled) return false
+ return true
+}
+
+const toggleDisabled = !canToggle()
+```
+
+## Pattern 8: Simplify useMemo/useCallback Dependencies
+
+**Before** (complexity: multiple recalculations):
+
+```typescript
+const payload = useMemo(() => {
+ let parameters: Parameter[] = []
+ let outputParameters: OutputParameter[] = []
+
+ if (!published) {
+ parameters = (inputs || []).map((item) => ({
+ name: item.variable,
+ description: '',
+ form: 'llm',
+ required: item.required,
+ type: item.type,
+ }))
+ outputParameters = (outputs || []).map((item) => ({
+ name: item.variable,
+ description: '',
+ type: item.value_type,
+ }))
+ }
+ else if (detail && detail.tool) {
+ parameters = (inputs || []).map((item) => ({
+ // Complex transformation...
+ }))
+ outputParameters = (outputs || []).map((item) => ({
+ // Complex transformation...
+ }))
+ }
+
+ return {
+ icon: detail?.icon || icon,
+ label: detail?.label || name,
+ // ...more fields
+ }
+}, [detail, published, workflowAppId, icon, name, description, inputs, outputs])
+```
+
+**After** (complexity: separated concerns):
+
+```typescript
+// Separate transformations
+const useParameterTransform = (inputs: InputVar[], detail?: ToolDetail, published?: boolean) => {
+ return useMemo(() => {
+ if (!published) {
+ return inputs.map(item => ({
+ name: item.variable,
+ description: '',
+ form: 'llm',
+ required: item.required,
+ type: item.type,
+ }))
+ }
+
+ if (!detail?.tool) return []
+
+ return inputs.map(item => ({
+ name: item.variable,
+ required: item.required,
+ type: item.type === 'paragraph' ? 'string' : item.type,
+ description: detail.tool.parameters.find(p => p.name === item.variable)?.llm_description || '',
+ form: detail.tool.parameters.find(p => p.name === item.variable)?.form || 'llm',
+ }))
+ }, [inputs, detail, published])
+}
+
+// Component uses hook
+const parameters = useParameterTransform(inputs, detail, published)
+const outputParameters = useOutputTransform(outputs, detail, published)
+
+const payload = useMemo(() => ({
+ icon: detail?.icon || icon,
+ label: detail?.label || name,
+ parameters,
+ outputParameters,
+ // ...
+}), [detail, icon, name, parameters, outputParameters])
+```
+
+## Target Metrics After Refactoring
+
+| Metric | Target |
+|--------|--------|
+| Total Complexity | < 50 |
+| Max Function Complexity | < 30 |
+| Function Length | < 30 lines |
+| Nesting Depth | ≤ 3 levels |
+| Conditional Chains | ≤ 3 conditions |
diff --git a/.claude/skills/component-refactoring/references/component-splitting.md b/.claude/skills/component-refactoring/references/component-splitting.md
new file mode 100644
index 0000000000..78a3389100
--- /dev/null
+++ b/.claude/skills/component-refactoring/references/component-splitting.md
@@ -0,0 +1,477 @@
+# Component Splitting Patterns
+
+This document provides detailed guidance on splitting large components into smaller, focused components in Dify.
+
+## When to Split Components
+
+Split a component when you identify:
+
+1. **Multiple UI sections** - Distinct visual areas with minimal coupling that can be composed independently
+1. **Conditional rendering blocks** - Large `{condition && }` blocks
+1. **Repeated patterns** - Similar UI structures used multiple times
+1. **300+ lines** - Component exceeds manageable size
+1. **Modal clusters** - Multiple modals rendered in one component
+
+## Splitting Strategies
+
+### Strategy 1: Section-Based Splitting
+
+Identify visual sections and extract each as a component.
+
+```typescript
+// ❌ Before: Monolithic component (500+ lines)
+const ConfigurationPage = () => {
+ return (
+
+ {/* Header Section - 50 lines */}
+
+
{t('configuration.title')}
+
+ {isAdvancedMode &&
Advanced}
+
+
+
+
+
+ {/* Config Section - 200 lines */}
+
+
+
+
+ {/* Debug Section - 150 lines */}
+
+
+
+
+ {/* Modals Section - 100 lines */}
+ {showSelectDataSet &&
}
+ {showHistoryModal &&
}
+ {showUseGPT4Confirm &&
}
+
+ )
+}
+
+// ✅ After: Split into focused components
+// configuration/
+// ├── index.tsx (orchestration)
+// ├── configuration-header.tsx
+// ├── configuration-content.tsx
+// ├── configuration-debug.tsx
+// └── configuration-modals.tsx
+
+// configuration-header.tsx
+interface ConfigurationHeaderProps {
+ isAdvancedMode: boolean
+ onPublish: () => void
+}
+
+const ConfigurationHeader: FC = ({
+ isAdvancedMode,
+ onPublish,
+}) => {
+ const { t } = useTranslation()
+
+ return (
+
+
{t('configuration.title')}
+
+ {isAdvancedMode &&
Advanced}
+
+
+
+
+ )
+}
+
+// index.tsx (orchestration only)
+const ConfigurationPage = () => {
+ const { modelConfig, setModelConfig } = useModelConfig()
+ const { activeModal, openModal, closeModal } = useModalState()
+
+ return (
+
+
+
+ {!isMobile && (
+
+ )}
+
+
+ )
+}
+```
+
+### Strategy 2: Conditional Block Extraction
+
+Extract large conditional rendering blocks.
+
+```typescript
+// ❌ Before: Large conditional blocks
+const AppInfo = () => {
+ return (
+
+ {expand ? (
+
+ {/* 100 lines of expanded view */}
+
+ ) : (
+
+ {/* 50 lines of collapsed view */}
+
+ )}
+
+ )
+}
+
+// ✅ After: Separate view components
+const AppInfoExpanded: FC = ({ appDetail, onAction }) => {
+ return (
+
+ {/* Clean, focused expanded view */}
+
+ )
+}
+
+const AppInfoCollapsed: FC = ({ appDetail, onAction }) => {
+ return (
+
+ {/* Clean, focused collapsed view */}
+
+ )
+}
+
+const AppInfo = () => {
+ return (
+
+ )
+}
+```
+
+### Strategy 3: Modal Extraction
+
+Extract modals with their trigger logic.
+
+```typescript
+// ❌ Before: Multiple modals in one component
+const AppInfo = () => {
+ const [showEdit, setShowEdit] = useState(false)
+ const [showDuplicate, setShowDuplicate] = useState(false)
+ const [showDelete, setShowDelete] = useState(false)
+ const [showSwitch, setShowSwitch] = useState(false)
+
+ const onEdit = async (data) => { /* 20 lines */ }
+ const onDuplicate = async (data) => { /* 20 lines */ }
+ const onDelete = async () => { /* 15 lines */ }
+
+ return (
+
+ {/* Main content */}
+
+ {showEdit && setShowEdit(false)} />}
+ {showDuplicate && setShowDuplicate(false)} />}
+ {showDelete && setShowDelete(false)} />}
+ {showSwitch && }
+
+ )
+}
+
+// ✅ After: Modal manager component
+// app-info-modals.tsx
+type ModalType = 'edit' | 'duplicate' | 'delete' | 'switch' | null
+
+interface AppInfoModalsProps {
+ appDetail: AppDetail
+ activeModal: ModalType
+ onClose: () => void
+ onSuccess: () => void
+}
+
+const AppInfoModals: FC = ({
+ appDetail,
+ activeModal,
+ onClose,
+ onSuccess,
+}) => {
+ const handleEdit = async (data) => { /* logic */ }
+ const handleDuplicate = async (data) => { /* logic */ }
+ const handleDelete = async () => { /* logic */ }
+
+ return (
+ <>
+ {activeModal === 'edit' && (
+
+ )}
+ {activeModal === 'duplicate' && (
+
+ )}
+ {activeModal === 'delete' && (
+
+ )}
+ {activeModal === 'switch' && (
+
+ )}
+ >
+ )
+}
+
+// Parent component
+const AppInfo = () => {
+ const { activeModal, openModal, closeModal } = useModalState()
+
+ return (
+
+ {/* Main content with openModal triggers */}
+
+
+
+
+ )
+}
+```
+
+### Strategy 4: List Item Extraction
+
+Extract repeated item rendering.
+
+```typescript
+// ❌ Before: Inline item rendering
+const OperationsList = () => {
+ return (
+
+ {operations.map(op => (
+
+ {op.icon}
+ {op.title}
+ {op.description}
+
+ {op.badge && {op.badge}}
+ {/* More complex rendering... */}
+
+ ))}
+
+ )
+}
+
+// ✅ After: Extracted item component
+interface OperationItemProps {
+ operation: Operation
+ onAction: (id: string) => void
+}
+
+const OperationItem: FC = ({ operation, onAction }) => {
+ return (
+
+ {operation.icon}
+ {operation.title}
+ {operation.description}
+
+ {operation.badge && {operation.badge}}
+
+ )
+}
+
+const OperationsList = () => {
+ const handleAction = useCallback((id: string) => {
+ const op = operations.find(o => o.id === id)
+ op?.onClick()
+ }, [operations])
+
+ return (
+
+ {operations.map(op => (
+
+ ))}
+
+ )
+}
+```
+
+## Directory Structure Patterns
+
+### Pattern A: Flat Structure (Simple Components)
+
+For components with 2-3 sub-components:
+
+```
+component-name/
+ ├── index.tsx # Main component
+ ├── sub-component-a.tsx
+ ├── sub-component-b.tsx
+ └── types.ts # Shared types
+```
+
+### Pattern B: Nested Structure (Complex Components)
+
+For components with many sub-components:
+
+```
+component-name/
+ ├── index.tsx # Main orchestration
+ ├── types.ts # Shared types
+ ├── hooks/
+ │ ├── use-feature-a.ts
+ │ └── use-feature-b.ts
+ ├── components/
+ │ ├── header/
+ │ │ └── index.tsx
+ │ ├── content/
+ │ │ └── index.tsx
+ │ └── modals/
+ │ └── index.tsx
+ └── utils/
+ └── helpers.ts
+```
+
+### Pattern C: Feature-Based Structure (Dify Standard)
+
+Following Dify's existing patterns:
+
+```
+configuration/
+ ├── index.tsx # Main page component
+ ├── base/ # Base/shared components
+ │ ├── feature-panel/
+ │ ├── group-name/
+ │ └── operation-btn/
+ ├── config/ # Config section
+ │ ├── index.tsx
+ │ ├── agent/
+ │ └── automatic/
+ ├── dataset-config/ # Dataset section
+ │ ├── index.tsx
+ │ ├── card-item/
+ │ └── params-config/
+ ├── debug/ # Debug section
+ │ ├── index.tsx
+ │ └── hooks.tsx
+ └── hooks/ # Shared hooks
+ └── use-advanced-prompt-config.ts
+```
+
+## Props Design
+
+### Minimal Props Principle
+
+Pass only what's needed:
+
+```typescript
+// ❌ Bad: Passing entire objects when only some fields needed
+
+
+// ✅ Good: Destructure to minimum required
+
+```
+
+### Callback Props Pattern
+
+Use callbacks for child-to-parent communication:
+
+```typescript
+// Parent
+const Parent = () => {
+ const [value, setValue] = useState('')
+
+ return (
+
+ )
+}
+
+// Child
+interface ChildProps {
+ value: string
+ onChange: (value: string) => void
+ onSubmit: () => void
+}
+
+const Child: FC = ({ value, onChange, onSubmit }) => {
+ return (
+
+ onChange(e.target.value)} />
+
+
+ )
+}
+```
+
+### Render Props for Flexibility
+
+When sub-components need parent context:
+
+```typescript
+interface ListProps {
+ items: T[]
+ renderItem: (item: T, index: number) => React.ReactNode
+ renderEmpty?: () => React.ReactNode
+}
+
+function List({ items, renderItem, renderEmpty }: ListProps) {
+ if (items.length === 0 && renderEmpty) {
+ return <>{renderEmpty()}>
+ }
+
+ return (
+
+ {items.map((item, index) => renderItem(item, index))}
+
+ )
+}
+
+// Usage
+ }
+ renderEmpty={() => }
+/>
+```
diff --git a/.claude/skills/component-refactoring/references/hook-extraction.md b/.claude/skills/component-refactoring/references/hook-extraction.md
new file mode 100644
index 0000000000..a8d75deffd
--- /dev/null
+++ b/.claude/skills/component-refactoring/references/hook-extraction.md
@@ -0,0 +1,317 @@
+# Hook Extraction Patterns
+
+This document provides detailed guidance on extracting custom hooks from complex components in Dify.
+
+## When to Extract Hooks
+
+Extract a custom hook when you identify:
+
+1. **Coupled state groups** - Multiple `useState` hooks that are always used together
+1. **Complex effects** - `useEffect` with multiple dependencies or cleanup logic
+1. **Business logic** - Data transformations, validations, or calculations
+1. **Reusable patterns** - Logic that appears in multiple components
+
+## Extraction Process
+
+### Step 1: Identify State Groups
+
+Look for state variables that are logically related:
+
+```typescript
+// ❌ These belong together - extract to hook
+const [modelConfig, setModelConfig] = useState(...)
+const [completionParams, setCompletionParams] = useState({})
+const [modelModeType, setModelModeType] = useState(...)
+
+// These are model-related state that should be in useModelConfig()
+```
+
+### Step 2: Identify Related Effects
+
+Find effects that modify the grouped state:
+
+```typescript
+// ❌ These effects belong with the state above
+useEffect(() => {
+ if (hasFetchedDetail && !modelModeType) {
+ const mode = currModel?.model_properties.mode
+ if (mode) {
+ const newModelConfig = produce(modelConfig, (draft) => {
+ draft.mode = mode
+ })
+ setModelConfig(newModelConfig)
+ }
+ }
+}, [textGenerationModelList, hasFetchedDetail, modelModeType, currModel])
+```
+
+### Step 3: Create the Hook
+
+```typescript
+// hooks/use-model-config.ts
+import type { FormValue } from '@/app/components/header/account-setting/model-provider-page/declarations'
+import type { ModelConfig } from '@/models/debug'
+import { produce } from 'immer'
+import { useEffect, useState } from 'react'
+import { ModelModeType } from '@/types/app'
+
+interface UseModelConfigParams {
+ initialConfig?: Partial
+ currModel?: { model_properties?: { mode?: ModelModeType } }
+ hasFetchedDetail: boolean
+}
+
+interface UseModelConfigReturn {
+ modelConfig: ModelConfig
+ setModelConfig: (config: ModelConfig) => void
+ completionParams: FormValue
+ setCompletionParams: (params: FormValue) => void
+ modelModeType: ModelModeType
+}
+
+export const useModelConfig = ({
+ initialConfig,
+ currModel,
+ hasFetchedDetail,
+}: UseModelConfigParams): UseModelConfigReturn => {
+ const [modelConfig, setModelConfig] = useState({
+ provider: 'langgenius/openai/openai',
+ model_id: 'gpt-3.5-turbo',
+ mode: ModelModeType.unset,
+ // ... default values
+ ...initialConfig,
+ })
+
+ const [completionParams, setCompletionParams] = useState({})
+
+ const modelModeType = modelConfig.mode
+
+ // Fill old app data missing model mode
+ useEffect(() => {
+ if (hasFetchedDetail && !modelModeType) {
+ const mode = currModel?.model_properties?.mode
+ if (mode) {
+ setModelConfig(produce(modelConfig, (draft) => {
+ draft.mode = mode
+ }))
+ }
+ }
+ }, [hasFetchedDetail, modelModeType, currModel])
+
+ return {
+ modelConfig,
+ setModelConfig,
+ completionParams,
+ setCompletionParams,
+ modelModeType,
+ }
+}
+```
+
+### Step 4: Update Component
+
+```typescript
+// Before: 50+ lines of state management
+const Configuration: FC = () => {
+ const [modelConfig, setModelConfig] = useState(...)
+ // ... lots of related state and effects
+}
+
+// After: Clean component
+const Configuration: FC = () => {
+ const {
+ modelConfig,
+ setModelConfig,
+ completionParams,
+ setCompletionParams,
+ modelModeType,
+ } = useModelConfig({
+ currModel,
+ hasFetchedDetail,
+ })
+
+ // Component now focuses on UI
+}
+```
+
+## Naming Conventions
+
+### Hook Names
+
+- Use `use` prefix: `useModelConfig`, `useDatasetConfig`
+- Be specific: `useAdvancedPromptConfig` not `usePrompt`
+- Include domain: `useWorkflowVariables`, `useMCPServer`
+
+### File Names
+
+- Kebab-case: `use-model-config.ts`
+- Place in `hooks/` subdirectory when multiple hooks exist
+- Place alongside component for single-use hooks
+
+### Return Type Names
+
+- Suffix with `Return`: `UseModelConfigReturn`
+- Suffix params with `Params`: `UseModelConfigParams`
+
+## Common Hook Patterns in Dify
+
+### 1. Data Fetching Hook (React Query)
+
+```typescript
+// Pattern: Use @tanstack/react-query for data fetching
+import { useQuery, useQueryClient } from '@tanstack/react-query'
+import { get } from '@/service/base'
+import { useInvalid } from '@/service/use-base'
+
+const NAME_SPACE = 'appConfig'
+
+// Query keys for cache management
+export const appConfigQueryKeys = {
+ detail: (appId: string) => [NAME_SPACE, 'detail', appId] as const,
+}
+
+// Main data hook
+export const useAppConfig = (appId: string) => {
+ return useQuery({
+ enabled: !!appId,
+ queryKey: appConfigQueryKeys.detail(appId),
+ queryFn: () => get(`/apps/${appId}`),
+ select: data => data?.model_config || null,
+ })
+}
+
+// Invalidation hook for refreshing data
+export const useInvalidAppConfig = () => {
+ return useInvalid([NAME_SPACE])
+}
+
+// Usage in component
+const Component = () => {
+ const { data: config, isLoading, error, refetch } = useAppConfig(appId)
+ const invalidAppConfig = useInvalidAppConfig()
+
+ const handleRefresh = () => {
+ invalidAppConfig() // Invalidates cache and triggers refetch
+ }
+
+ return ...
+}
+```
+
+### 2. Form State Hook
+
+```typescript
+// Pattern: Form state + validation + submission
+export const useConfigForm = (initialValues: ConfigFormValues) => {
+ const [values, setValues] = useState(initialValues)
+ const [errors, setErrors] = useState>({})
+ const [isSubmitting, setIsSubmitting] = useState(false)
+
+ const validate = useCallback(() => {
+ const newErrors: Record = {}
+ if (!values.name) newErrors.name = 'Name is required'
+ setErrors(newErrors)
+ return Object.keys(newErrors).length === 0
+ }, [values])
+
+ const handleChange = useCallback((field: string, value: any) => {
+ setValues(prev => ({ ...prev, [field]: value }))
+ }, [])
+
+ const handleSubmit = useCallback(async (onSubmit: (values: ConfigFormValues) => Promise) => {
+ if (!validate()) return
+ setIsSubmitting(true)
+ try {
+ await onSubmit(values)
+ } finally {
+ setIsSubmitting(false)
+ }
+ }, [values, validate])
+
+ return { values, errors, isSubmitting, handleChange, handleSubmit }
+}
+```
+
+### 3. Modal State Hook
+
+```typescript
+// Pattern: Multiple modal management
+type ModalType = 'edit' | 'delete' | 'duplicate' | null
+
+export const useModalState = () => {
+ const [activeModal, setActiveModal] = useState(null)
+ const [modalData, setModalData] = useState(null)
+
+ const openModal = useCallback((type: ModalType, data?: any) => {
+ setActiveModal(type)
+ setModalData(data)
+ }, [])
+
+ const closeModal = useCallback(() => {
+ setActiveModal(null)
+ setModalData(null)
+ }, [])
+
+ return {
+ activeModal,
+ modalData,
+ openModal,
+ closeModal,
+ isOpen: useCallback((type: ModalType) => activeModal === type, [activeModal]),
+ }
+}
+```
+
+### 4. Toggle/Boolean Hook
+
+```typescript
+// Pattern: Boolean state with convenience methods
+export const useToggle = (initialValue = false) => {
+ const [value, setValue] = useState(initialValue)
+
+ const toggle = useCallback(() => setValue(v => !v), [])
+ const setTrue = useCallback(() => setValue(true), [])
+ const setFalse = useCallback(() => setValue(false), [])
+
+ return [value, { toggle, setTrue, setFalse, set: setValue }] as const
+}
+
+// Usage
+const [isExpanded, { toggle, setTrue: expand, setFalse: collapse }] = useToggle()
+```
+
+## Testing Extracted Hooks
+
+After extraction, test hooks in isolation:
+
+```typescript
+// use-model-config.spec.ts
+import { renderHook, act } from '@testing-library/react'
+import { useModelConfig } from './use-model-config'
+
+describe('useModelConfig', () => {
+ it('should initialize with default values', () => {
+ const { result } = renderHook(() => useModelConfig({
+ hasFetchedDetail: false,
+ }))
+
+ expect(result.current.modelConfig.provider).toBe('langgenius/openai/openai')
+ expect(result.current.modelModeType).toBe(ModelModeType.unset)
+ })
+
+ it('should update model config', () => {
+ const { result } = renderHook(() => useModelConfig({
+ hasFetchedDetail: true,
+ }))
+
+ act(() => {
+ result.current.setModelConfig({
+ ...result.current.modelConfig,
+ model_id: 'gpt-4',
+ })
+ })
+
+ expect(result.current.modelConfig.model_id).toBe('gpt-4')
+ })
+})
+```
diff --git a/.claude/skills/frontend-code-review/SKILL.md b/.claude/skills/frontend-code-review/SKILL.md
new file mode 100644
index 0000000000..6cc23ca171
--- /dev/null
+++ b/.claude/skills/frontend-code-review/SKILL.md
@@ -0,0 +1,73 @@
+---
+name: frontend-code-review
+description: "Trigger when the user requests a review of frontend files (e.g., `.tsx`, `.ts`, `.js`). Support both pending-change reviews and focused file reviews while applying the checklist rules."
+---
+
+# Frontend Code Review
+
+## Intent
+Use this skill whenever the user asks to review frontend code (especially `.tsx`, `.ts`, or `.js` files). Support two review modes:
+
+1. **Pending-change review** – inspect staged/working-tree files slated for commit and flag checklist violations before submission.
+2. **File-targeted review** – review the specific file(s) the user names and report the relevant checklist findings.
+
+Stick to the checklist below for every applicable file and mode.
+
+## Checklist
+See [references/code-quality.md](references/code-quality.md), [references/performance.md](references/performance.md), [references/business-logic.md](references/business-logic.md) for the living checklist split by category—treat it as the canonical set of rules to follow.
+
+Flag each rule violation with urgency metadata so future reviewers can prioritize fixes.
+
+## Review Process
+1. Open the relevant component/module. Gather lines that relate to class names, React Flow hooks, prop memoization, and styling.
+2. For each rule in the review point, note where the code deviates and capture a representative snippet.
+3. Compose the review section per the template below. Group violations first by **Urgent** flag, then by category order (Code Quality, Performance, Business Logic).
+
+## Required output
+When invoked, the response must exactly follow one of the two templates:
+
+### Template A (any findings)
+```
+# Code review
+Found urgent issues need to be fixed:
+
+## 1
+FilePath: line
+
+
+
+### Suggested fix
+
+
+---
+... (repeat for each urgent issue) ...
+
+Found suggestions for improvement:
+
+## 1
+FilePath: line
+
+
+
+### Suggested fix
+
+
+---
+
+... (repeat for each suggestion) ...
+```
+
+If there are no urgent issues, omit that section. If there are no suggestions, omit that section.
+
+If the issue number is more than 10, summarize as "10+ urgent issues" or "10+ suggestions" and just output the first 10 issues.
+
+Don't compress the blank lines between sections; keep them as-is for readability.
+
+If you use Template A (i.e., there are issues to fix) and at least one issue requires code changes, append a brief follow-up question after the structured output asking whether the user wants you to apply the suggested fix(es). For example: "Would you like me to use the Suggested fix section to address these issues?"
+
+### Template B (no issues)
+```
+## Code review
+No issues found.
+```
+
diff --git a/.claude/skills/frontend-code-review/references/business-logic.md b/.claude/skills/frontend-code-review/references/business-logic.md
new file mode 100644
index 0000000000..4584f99dfc
--- /dev/null
+++ b/.claude/skills/frontend-code-review/references/business-logic.md
@@ -0,0 +1,15 @@
+# Rule Catalog — Business Logic
+
+## Can't use workflowStore in Node components
+
+IsUrgent: True
+
+### Description
+
+File path pattern of node components: `web/app/components/workflow/nodes/[nodeName]/node.tsx`
+
+Node components are also used when creating a RAG Pipe from a template, but in that context there is no workflowStore Provider, which results in a blank screen. [This Issue](https://github.com/langgenius/dify/issues/29168) was caused by exactly this reason.
+
+### Suggested Fix
+
+Use `import { useNodes } from 'reactflow'` instead of `import useNodes from '@/app/components/workflow/store/workflow/use-nodes'`.
diff --git a/.claude/skills/frontend-code-review/references/code-quality.md b/.claude/skills/frontend-code-review/references/code-quality.md
new file mode 100644
index 0000000000..afdd40deb3
--- /dev/null
+++ b/.claude/skills/frontend-code-review/references/code-quality.md
@@ -0,0 +1,44 @@
+# Rule Catalog — Code Quality
+
+## Conditional class names use utility function
+
+IsUrgent: True
+Category: Code Quality
+
+### Description
+
+Ensure conditional CSS is handled via the shared `classNames` instead of custom ternaries, string concatenation, or template strings. Centralizing class logic keeps components consistent and easier to maintain.
+
+### Suggested Fix
+
+```ts
+import { cn } from '@/utils/classnames'
+const classNames = cn(isActive ? 'text-primary-600' : 'text-gray-500')
+```
+
+## Tailwind-first styling
+
+IsUrgent: True
+Category: Code Quality
+
+### Description
+
+Favor Tailwind CSS utility classes instead of adding new `.module.css` files unless a Tailwind combination cannot achieve the required styling. Keeping styles in Tailwind improves consistency and reduces maintenance overhead.
+
+Update this file when adding, editing, or removing Code Quality rules so the catalog remains accurate.
+
+## Classname ordering for easy overrides
+
+### Description
+
+When writing components, always place the incoming `className` prop after the component’s own class values so that downstream consumers can override or extend the styling. This keeps your component’s defaults but still lets external callers change or remove specific styles.
+
+Example:
+
+```tsx
+import { cn } from '@/utils/classnames'
+
+const Button = ({ className }) => {
+ return
+}
+```
diff --git a/.claude/skills/frontend-code-review/references/performance.md b/.claude/skills/frontend-code-review/references/performance.md
new file mode 100644
index 0000000000..2d60072f5c
--- /dev/null
+++ b/.claude/skills/frontend-code-review/references/performance.md
@@ -0,0 +1,45 @@
+# Rule Catalog — Performance
+
+## React Flow data usage
+
+IsUrgent: True
+Category: Performance
+
+### Description
+
+When rendering React Flow, prefer `useNodes`/`useEdges` for UI consumption and rely on `useStoreApi` inside callbacks that mutate or read node/edge state. Avoid manually pulling Flow data outside of these hooks.
+
+## Complex prop memoization
+
+IsUrgent: True
+Category: Performance
+
+### Description
+
+Wrap complex prop values (objects, arrays, maps) in `useMemo` prior to passing them into child components to guarantee stable references and prevent unnecessary renders.
+
+Update this file when adding, editing, or removing Performance rules so the catalog remains accurate.
+
+Wrong:
+
+```tsx
+
+```
+
+Right:
+
+```tsx
+const config = useMemo(() => ({
+ provider: ...,
+ detail: ...
+}), [provider, detail]);
+
+
+```
diff --git a/.claude/skills/frontend-testing/SKILL.md b/.claude/skills/frontend-testing/SKILL.md
new file mode 100644
index 0000000000..dd9677a78e
--- /dev/null
+++ b/.claude/skills/frontend-testing/SKILL.md
@@ -0,0 +1,322 @@
+---
+name: frontend-testing
+description: Generate Vitest + React Testing Library tests for Dify frontend components, hooks, and utilities. Triggers on testing, spec files, coverage, Vitest, RTL, unit tests, integration tests, or write/review test requests.
+---
+
+# Dify Frontend Testing Skill
+
+This skill enables Claude to generate high-quality, comprehensive frontend tests for the Dify project following established conventions and best practices.
+
+> **⚠️ Authoritative Source**: This skill is derived from `web/testing/testing.md`. Use Vitest mock/timer APIs (`vi.*`).
+
+## When to Apply This Skill
+
+Apply this skill when the user:
+
+- Asks to **write tests** for a component, hook, or utility
+- Asks to **review existing tests** for completeness
+- Mentions **Vitest**, **React Testing Library**, **RTL**, or **spec files**
+- Requests **test coverage** improvement
+- Uses `pnpm analyze-component` output as context
+- Mentions **testing**, **unit tests**, or **integration tests** for frontend code
+- Wants to understand **testing patterns** in the Dify codebase
+
+**Do NOT apply** when:
+
+- User is asking about backend/API tests (Python/pytest)
+- User is asking about E2E tests (Playwright/Cypress)
+- User is only asking conceptual questions without code context
+
+## Quick Reference
+
+### Tech Stack
+
+| Tool | Version | Purpose |
+|------|---------|---------|
+| Vitest | 4.0.16 | Test runner |
+| React Testing Library | 16.0 | Component testing |
+| jsdom | - | Test environment |
+| nock | 14.0 | HTTP mocking |
+| TypeScript | 5.x | Type safety |
+
+### Key Commands
+
+```bash
+# Run all tests
+pnpm test
+
+# Watch mode
+pnpm test:watch
+
+# Run specific file
+pnpm test path/to/file.spec.tsx
+
+# Generate coverage report
+pnpm test:coverage
+
+# Analyze component complexity
+pnpm analyze-component
+
+# Review existing test
+pnpm analyze-component --review
+```
+
+### File Naming
+
+- Test files: `ComponentName.spec.tsx` (same directory as component)
+- Integration tests: `web/__tests__/` directory
+
+## Test Structure Template
+
+```typescript
+import { render, screen, fireEvent, waitFor } from '@testing-library/react'
+import Component from './index'
+
+// ✅ Import real project components (DO NOT mock these)
+// import Loading from '@/app/components/base/loading'
+// import { ChildComponent } from './child-component'
+
+// ✅ Mock external dependencies only
+vi.mock('@/service/api')
+vi.mock('next/navigation', () => ({
+ useRouter: () => ({ push: vi.fn() }),
+ usePathname: () => '/test',
+}))
+
+// Shared state for mocks (if needed)
+let mockSharedState = false
+
+describe('ComponentName', () => {
+ beforeEach(() => {
+ vi.clearAllMocks() // ✅ Reset mocks BEFORE each test
+ mockSharedState = false // ✅ Reset shared state
+ })
+
+ // Rendering tests (REQUIRED)
+ describe('Rendering', () => {
+ it('should render without crashing', () => {
+ // Arrange
+ const props = { title: 'Test' }
+
+ // Act
+ render()
+
+ // Assert
+ expect(screen.getByText('Test')).toBeInTheDocument()
+ })
+ })
+
+ // Props tests (REQUIRED)
+ describe('Props', () => {
+ it('should apply custom className', () => {
+ render()
+ expect(screen.getByRole('button')).toHaveClass('custom')
+ })
+ })
+
+ // User Interactions
+ describe('User Interactions', () => {
+ it('should handle click events', () => {
+ const handleClick = vi.fn()
+ render()
+
+ fireEvent.click(screen.getByRole('button'))
+
+ expect(handleClick).toHaveBeenCalledTimes(1)
+ })
+ })
+
+ // Edge Cases (REQUIRED)
+ describe('Edge Cases', () => {
+ it('should handle null data', () => {
+ render()
+ expect(screen.getByText(/no data/i)).toBeInTheDocument()
+ })
+
+ it('should handle empty array', () => {
+ render()
+ expect(screen.getByText(/empty/i)).toBeInTheDocument()
+ })
+ })
+})
+```
+
+## Testing Workflow (CRITICAL)
+
+### ⚠️ Incremental Approach Required
+
+**NEVER generate all test files at once.** For complex components or multi-file directories:
+
+1. **Analyze & Plan**: List all files, order by complexity (simple → complex)
+1. **Process ONE at a time**: Write test → Run test → Fix if needed → Next
+1. **Verify before proceeding**: Do NOT continue to next file until current passes
+
+```
+For each file:
+ ┌────────────────────────────────────────┐
+ │ 1. Write test │
+ │ 2. Run: pnpm test .spec.tsx │
+ │ 3. PASS? → Mark complete, next file │
+ │ FAIL? → Fix first, then continue │
+ └────────────────────────────────────────┘
+```
+
+### Complexity-Based Order
+
+Process in this order for multi-file testing:
+
+1. 🟢 Utility functions (simplest)
+1. 🟢 Custom hooks
+1. 🟡 Simple components (presentational)
+1. 🟡 Medium components (state, effects)
+1. 🔴 Complex components (API, routing)
+1. 🔴 Integration tests (index files - last)
+
+### When to Refactor First
+
+- **Complexity > 50**: Break into smaller pieces before testing
+- **500+ lines**: Consider splitting before testing
+- **Many dependencies**: Extract logic into hooks first
+
+> 📖 See `references/workflow.md` for complete workflow details and todo list format.
+
+## Testing Strategy
+
+### Path-Level Testing (Directory Testing)
+
+When assigned to test a directory/path, test **ALL content** within that path:
+
+- Test all components, hooks, utilities in the directory (not just `index` file)
+- Use incremental approach: one file at a time, verify each before proceeding
+- Goal: 100% coverage of ALL files in the directory
+
+### Integration Testing First
+
+**Prefer integration testing** when writing tests for a directory:
+
+- ✅ **Import real project components** directly (including base components and siblings)
+- ✅ **Only mock**: API services (`@/service/*`), `next/navigation`, complex context providers
+- ❌ **DO NOT mock** base components (`@/app/components/base/*`)
+- ❌ **DO NOT mock** sibling/child components in the same directory
+
+> See [Test Structure Template](#test-structure-template) for correct import/mock patterns.
+
+## Core Principles
+
+### 1. AAA Pattern (Arrange-Act-Assert)
+
+Every test should clearly separate:
+
+- **Arrange**: Setup test data and render component
+- **Act**: Perform user actions
+- **Assert**: Verify expected outcomes
+
+### 2. Black-Box Testing
+
+- Test observable behavior, not implementation details
+- Use semantic queries (getByRole, getByLabelText)
+- Avoid testing internal state directly
+- **Prefer pattern matching over hardcoded strings** in assertions:
+
+```typescript
+// ❌ Avoid: hardcoded text assertions
+expect(screen.getByText('Loading...')).toBeInTheDocument()
+
+// ✅ Better: role-based queries
+expect(screen.getByRole('status')).toBeInTheDocument()
+
+// ✅ Better: pattern matching
+expect(screen.getByText(/loading/i)).toBeInTheDocument()
+```
+
+### 3. Single Behavior Per Test
+
+Each test verifies ONE user-observable behavior:
+
+```typescript
+// ✅ Good: One behavior
+it('should disable button when loading', () => {
+ render()
+ expect(screen.getByRole('button')).toBeDisabled()
+})
+
+// ❌ Bad: Multiple behaviors
+it('should handle loading state', () => {
+ render()
+ expect(screen.getByRole('button')).toBeDisabled()
+ expect(screen.getByText('Loading...')).toBeInTheDocument()
+ expect(screen.getByRole('button')).toHaveClass('loading')
+})
+```
+
+### 4. Semantic Naming
+
+Use `should when `:
+
+```typescript
+it('should show error message when validation fails')
+it('should call onSubmit when form is valid')
+it('should disable input when isReadOnly is true')
+```
+
+## Required Test Scenarios
+
+### Always Required (All Components)
+
+1. **Rendering**: Component renders without crashing
+1. **Props**: Required props, optional props, default values
+1. **Edge Cases**: null, undefined, empty values, boundary conditions
+
+### Conditional (When Present)
+
+| Feature | Test Focus |
+|---------|-----------|
+| `useState` | Initial state, transitions, cleanup |
+| `useEffect` | Execution, dependencies, cleanup |
+| Event handlers | All onClick, onChange, onSubmit, keyboard |
+| API calls | Loading, success, error states |
+| Routing | Navigation, params, query strings |
+| `useCallback`/`useMemo` | Referential equality |
+| Context | Provider values, consumer behavior |
+| Forms | Validation, submission, error display |
+
+## Coverage Goals (Per File)
+
+For each test file generated, aim for:
+
+- ✅ **100%** function coverage
+- ✅ **100%** statement coverage
+- ✅ **>95%** branch coverage
+- ✅ **>95%** line coverage
+
+> **Note**: For multi-file directories, process one file at a time with full coverage each. See `references/workflow.md`.
+
+## Detailed Guides
+
+For more detailed information, refer to:
+
+- `references/workflow.md` - **Incremental testing workflow** (MUST READ for multi-file testing)
+- `references/mocking.md` - Mock patterns and best practices
+- `references/async-testing.md` - Async operations and API calls
+- `references/domain-components.md` - Workflow, Dataset, Configuration testing
+- `references/common-patterns.md` - Frequently used testing patterns
+- `references/checklist.md` - Test generation checklist and validation steps
+
+## Authoritative References
+
+### Primary Specification (MUST follow)
+
+- **`web/testing/testing.md`** - The canonical testing specification. This skill is derived from this document.
+
+### Reference Examples in Codebase
+
+- `web/utils/classnames.spec.ts` - Utility function tests
+- `web/app/components/base/button/index.spec.tsx` - Component tests
+- `web/__mocks__/provider-context.ts` - Mock factory example
+
+### Project Configuration
+
+- `web/vitest.config.ts` - Vitest configuration
+- `web/vitest.setup.ts` - Test environment setup
+- `web/scripts/analyze-component.js` - Component analysis tool
+- Modules are not mocked automatically. Global mocks live in `web/vitest.setup.ts` (for example `react-i18next`, `next/image`); mock other modules like `ky` or `mime` locally in test files.
diff --git a/.claude/skills/frontend-testing/assets/component-test.template.tsx b/.claude/skills/frontend-testing/assets/component-test.template.tsx
new file mode 100644
index 0000000000..6b7803bd4b
--- /dev/null
+++ b/.claude/skills/frontend-testing/assets/component-test.template.tsx
@@ -0,0 +1,293 @@
+/**
+ * Test Template for React Components
+ *
+ * WHY THIS STRUCTURE?
+ * - Organized sections make tests easy to navigate and maintain
+ * - Mocks at top ensure consistent test isolation
+ * - Factory functions reduce duplication and improve readability
+ * - describe blocks group related scenarios for better debugging
+ *
+ * INSTRUCTIONS:
+ * 1. Replace `ComponentName` with your component name
+ * 2. Update import path
+ * 3. Add/remove test sections based on component features (use analyze-component)
+ * 4. Follow AAA pattern: Arrange → Act → Assert
+ *
+ * RUN FIRST: pnpm analyze-component to identify required test scenarios
+ */
+
+import { render, screen, fireEvent, waitFor } from '@testing-library/react'
+import userEvent from '@testing-library/user-event'
+// import ComponentName from './index'
+
+// ============================================================================
+// Mocks
+// ============================================================================
+// WHY: Mocks must be hoisted to top of file (Vitest requirement).
+// They run BEFORE imports, so keep them before component imports.
+
+// i18n (automatically mocked)
+// WHY: Global mock in web/vitest.setup.ts is auto-loaded by Vitest setup
+// The global mock provides: useTranslation, Trans, useMixedTranslation, useGetLanguage
+// No explicit mock needed for most tests
+//
+// Override only if custom translations are required:
+// import { createReactI18nextMock } from '@/test/i18n-mock'
+// vi.mock('react-i18next', () => createReactI18nextMock({
+// 'my.custom.key': 'Custom Translation',
+// 'button.save': 'Save',
+// }))
+
+// Router (if component uses useRouter, usePathname, useSearchParams)
+// WHY: Isolates tests from Next.js routing, enables testing navigation behavior
+// const mockPush = vi.fn()
+// vi.mock('next/navigation', () => ({
+// useRouter: () => ({ push: mockPush }),
+// usePathname: () => '/test-path',
+// }))
+
+// API services (if component fetches data)
+// WHY: Prevents real network calls, enables testing all states (loading/success/error)
+// vi.mock('@/service/api')
+// import * as api from '@/service/api'
+// const mockedApi = vi.mocked(api)
+
+// Shared mock state (for portal/dropdown components)
+// WHY: Portal components like PortalToFollowElem need shared state between
+// parent and child mocks to correctly simulate open/close behavior
+// let mockOpenState = false
+
+// ============================================================================
+// Test Data Factories
+// ============================================================================
+// WHY FACTORIES?
+// - Avoid hard-coded test data scattered across tests
+// - Easy to create variations with overrides
+// - Type-safe when using actual types from source
+// - Single source of truth for default test values
+
+// const createMockProps = (overrides = {}) => ({
+// // Default props that make component render successfully
+// ...overrides,
+// })
+
+// const createMockItem = (overrides = {}) => ({
+// id: 'item-1',
+// name: 'Test Item',
+// ...overrides,
+// })
+
+// ============================================================================
+// Test Helpers
+// ============================================================================
+
+// const renderComponent = (props = {}) => {
+// return render()
+// }
+
+// ============================================================================
+// Tests
+// ============================================================================
+
+describe('ComponentName', () => {
+ // WHY beforeEach with clearAllMocks?
+ // - Ensures each test starts with clean slate
+ // - Prevents mock call history from leaking between tests
+ // - MUST be beforeEach (not afterEach) to reset BEFORE assertions like toHaveBeenCalledTimes
+ beforeEach(() => {
+ vi.clearAllMocks()
+ // Reset shared mock state if used (CRITICAL for portal/dropdown tests)
+ // mockOpenState = false
+ })
+
+ // --------------------------------------------------------------------------
+ // Rendering Tests (REQUIRED - Every component MUST have these)
+ // --------------------------------------------------------------------------
+ // WHY: Catches import errors, missing providers, and basic render issues
+ describe('Rendering', () => {
+ it('should render without crashing', () => {
+ // Arrange - Setup data and mocks
+ // const props = createMockProps()
+
+ // Act - Render the component
+ // render()
+
+ // Assert - Verify expected output
+ // Prefer getByRole for accessibility; it's what users "see"
+ // expect(screen.getByRole('...')).toBeInTheDocument()
+ })
+
+ it('should render with default props', () => {
+ // WHY: Verifies component works without optional props
+ // render()
+ // expect(screen.getByText('...')).toBeInTheDocument()
+ })
+ })
+
+ // --------------------------------------------------------------------------
+ // Props Tests (REQUIRED - Every component MUST test prop behavior)
+ // --------------------------------------------------------------------------
+ // WHY: Props are the component's API contract. Test them thoroughly.
+ describe('Props', () => {
+ it('should apply custom className', () => {
+ // WHY: Common pattern in Dify - components should merge custom classes
+ // render()
+ // expect(screen.getByTestId('component')).toHaveClass('custom-class')
+ })
+
+ it('should use default values for optional props', () => {
+ // WHY: Verifies TypeScript defaults work at runtime
+ // render()
+ // expect(screen.getByRole('...')).toHaveAttribute('...', 'default-value')
+ })
+ })
+
+ // --------------------------------------------------------------------------
+ // User Interactions (if component has event handlers - on*, handle*)
+ // --------------------------------------------------------------------------
+ // WHY: Event handlers are core functionality. Test from user's perspective.
+ describe('User Interactions', () => {
+ it('should call onClick when clicked', async () => {
+ // WHY userEvent over fireEvent?
+ // - userEvent simulates real user behavior (focus, hover, then click)
+ // - fireEvent is lower-level, doesn't trigger all browser events
+ // const user = userEvent.setup()
+ // const handleClick = vi.fn()
+ // render()
+ //
+ // await user.click(screen.getByRole('button'))
+ //
+ // expect(handleClick).toHaveBeenCalledTimes(1)
+ })
+
+ it('should call onChange when value changes', async () => {
+ // const user = userEvent.setup()
+ // const handleChange = vi.fn()
+ // render()
+ //
+ // await user.type(screen.getByRole('textbox'), 'new value')
+ //
+ // expect(handleChange).toHaveBeenCalled()
+ })
+ })
+
+ // --------------------------------------------------------------------------
+ // State Management (if component uses useState/useReducer)
+ // --------------------------------------------------------------------------
+ // WHY: Test state through observable UI changes, not internal state values
+ describe('State Management', () => {
+ it('should update state on interaction', async () => {
+ // WHY test via UI, not state?
+ // - State is implementation detail; UI is what users see
+ // - If UI works correctly, state must be correct
+ // const user = userEvent.setup()
+ // render()
+ //
+ // // Initial state - verify what user sees
+ // expect(screen.getByText('Initial')).toBeInTheDocument()
+ //
+ // // Trigger state change via user action
+ // await user.click(screen.getByRole('button'))
+ //
+ // // New state - verify UI updated
+ // expect(screen.getByText('Updated')).toBeInTheDocument()
+ })
+ })
+
+ // --------------------------------------------------------------------------
+ // Async Operations (if component fetches data - useQuery, fetch)
+ // --------------------------------------------------------------------------
+ // WHY: Async operations have 3 states users experience: loading, success, error
+ describe('Async Operations', () => {
+ it('should show loading state', () => {
+ // WHY never-resolving promise?
+ // - Keeps component in loading state for assertion
+ // - Alternative: use fake timers
+ // mockedApi.fetchData.mockImplementation(() => new Promise(() => {}))
+ // render()
+ //
+ // expect(screen.getByText(/loading/i)).toBeInTheDocument()
+ })
+
+ it('should show data on success', async () => {
+ // WHY waitFor?
+ // - Component updates asynchronously after fetch resolves
+ // - waitFor retries assertion until it passes or times out
+ // mockedApi.fetchData.mockResolvedValue({ items: ['Item 1'] })
+ // render()
+ //
+ // await waitFor(() => {
+ // expect(screen.getByText('Item 1')).toBeInTheDocument()
+ // })
+ })
+
+ it('should show error on failure', async () => {
+ // mockedApi.fetchData.mockRejectedValue(new Error('Network error'))
+ // render()
+ //
+ // await waitFor(() => {
+ // expect(screen.getByText(/error/i)).toBeInTheDocument()
+ // })
+ })
+ })
+
+ // --------------------------------------------------------------------------
+ // Edge Cases (REQUIRED - Every component MUST handle edge cases)
+ // --------------------------------------------------------------------------
+ // WHY: Real-world data is messy. Components must handle:
+ // - Null/undefined from API failures or optional fields
+ // - Empty arrays/strings from user clearing data
+ // - Boundary values (0, MAX_INT, special characters)
+ describe('Edge Cases', () => {
+ it('should handle null value', () => {
+ // WHY test null specifically?
+ // - API might return null for missing data
+ // - Prevents "Cannot read property of null" in production
+ // render()
+ // expect(screen.getByText(/no data/i)).toBeInTheDocument()
+ })
+
+ it('should handle undefined value', () => {
+ // WHY test undefined separately from null?
+ // - TypeScript treats them differently
+ // - Optional props are undefined, not null
+ // render()
+ // expect(screen.getByText(/no data/i)).toBeInTheDocument()
+ })
+
+ it('should handle empty array', () => {
+ // WHY: Empty state often needs special UI (e.g., "No items yet")
+ // render()
+ // expect(screen.getByText(/empty/i)).toBeInTheDocument()
+ })
+
+ it('should handle empty string', () => {
+ // WHY: Empty strings are truthy in JS but visually empty
+ // render()
+ // expect(screen.getByText(/placeholder/i)).toBeInTheDocument()
+ })
+ })
+
+ // --------------------------------------------------------------------------
+ // Accessibility (optional but recommended for Dify's enterprise users)
+ // --------------------------------------------------------------------------
+ // WHY: Dify has enterprise customers who may require accessibility compliance
+ describe('Accessibility', () => {
+ it('should have accessible name', () => {
+ // WHY getByRole with name?
+ // - Tests that screen readers can identify the element
+ // - Enforces proper labeling practices
+ // render()
+ // expect(screen.getByRole('button', { name: /test label/i })).toBeInTheDocument()
+ })
+
+ it('should support keyboard navigation', async () => {
+ // WHY: Some users can't use a mouse
+ // const user = userEvent.setup()
+ // render()
+ //
+ // await user.tab()
+ // expect(screen.getByRole('button')).toHaveFocus()
+ })
+ })
+})
diff --git a/.claude/skills/frontend-testing/assets/hook-test.template.ts b/.claude/skills/frontend-testing/assets/hook-test.template.ts
new file mode 100644
index 0000000000..99161848a4
--- /dev/null
+++ b/.claude/skills/frontend-testing/assets/hook-test.template.ts
@@ -0,0 +1,207 @@
+/**
+ * Test Template for Custom Hooks
+ *
+ * Instructions:
+ * 1. Replace `useHookName` with your hook name
+ * 2. Update import path
+ * 3. Add/remove test sections based on hook features
+ */
+
+import { renderHook, act, waitFor } from '@testing-library/react'
+// import { useHookName } from './use-hook-name'
+
+// ============================================================================
+// Mocks
+// ============================================================================
+
+// API services (if hook fetches data)
+// vi.mock('@/service/api')
+// import * as api from '@/service/api'
+// const mockedApi = vi.mocked(api)
+
+// ============================================================================
+// Test Helpers
+// ============================================================================
+
+// Wrapper for hooks that need context
+// const createWrapper = (contextValue = {}) => {
+// return ({ children }: { children: React.ReactNode }) => (
+//
+// {children}
+//
+// )
+// }
+
+// ============================================================================
+// Tests
+// ============================================================================
+
+describe('useHookName', () => {
+ beforeEach(() => {
+ vi.clearAllMocks()
+ })
+
+ // --------------------------------------------------------------------------
+ // Initial State
+ // --------------------------------------------------------------------------
+ describe('Initial State', () => {
+ it('should return initial state', () => {
+ // const { result } = renderHook(() => useHookName())
+ //
+ // expect(result.current.value).toBe(initialValue)
+ // expect(result.current.isLoading).toBe(false)
+ })
+
+ it('should accept initial value from props', () => {
+ // const { result } = renderHook(() => useHookName({ initialValue: 'custom' }))
+ //
+ // expect(result.current.value).toBe('custom')
+ })
+ })
+
+ // --------------------------------------------------------------------------
+ // State Updates
+ // --------------------------------------------------------------------------
+ describe('State Updates', () => {
+ it('should update value when setValue is called', () => {
+ // const { result } = renderHook(() => useHookName())
+ //
+ // act(() => {
+ // result.current.setValue('new value')
+ // })
+ //
+ // expect(result.current.value).toBe('new value')
+ })
+
+ it('should reset to initial value', () => {
+ // const { result } = renderHook(() => useHookName({ initialValue: 'initial' }))
+ //
+ // act(() => {
+ // result.current.setValue('changed')
+ // })
+ // expect(result.current.value).toBe('changed')
+ //
+ // act(() => {
+ // result.current.reset()
+ // })
+ // expect(result.current.value).toBe('initial')
+ })
+ })
+
+ // --------------------------------------------------------------------------
+ // Async Operations
+ // --------------------------------------------------------------------------
+ describe('Async Operations', () => {
+ it('should fetch data on mount', async () => {
+ // mockedApi.fetchData.mockResolvedValue({ data: 'test' })
+ //
+ // const { result } = renderHook(() => useHookName())
+ //
+ // // Initially loading
+ // expect(result.current.isLoading).toBe(true)
+ //
+ // // Wait for data
+ // await waitFor(() => {
+ // expect(result.current.isLoading).toBe(false)
+ // })
+ //
+ // expect(result.current.data).toEqual({ data: 'test' })
+ })
+
+ it('should handle fetch error', async () => {
+ // mockedApi.fetchData.mockRejectedValue(new Error('Network error'))
+ //
+ // const { result } = renderHook(() => useHookName())
+ //
+ // await waitFor(() => {
+ // expect(result.current.error).toBeTruthy()
+ // })
+ //
+ // expect(result.current.error?.message).toBe('Network error')
+ })
+
+ it('should refetch when dependency changes', async () => {
+ // mockedApi.fetchData.mockResolvedValue({ data: 'test' })
+ //
+ // const { result, rerender } = renderHook(
+ // ({ id }) => useHookName(id),
+ // { initialProps: { id: '1' } }
+ // )
+ //
+ // await waitFor(() => {
+ // expect(mockedApi.fetchData).toHaveBeenCalledWith('1')
+ // })
+ //
+ // rerender({ id: '2' })
+ //
+ // await waitFor(() => {
+ // expect(mockedApi.fetchData).toHaveBeenCalledWith('2')
+ // })
+ })
+ })
+
+ // --------------------------------------------------------------------------
+ // Side Effects
+ // --------------------------------------------------------------------------
+ describe('Side Effects', () => {
+ it('should call callback when value changes', () => {
+ // const callback = vi.fn()
+ // const { result } = renderHook(() => useHookName({ onChange: callback }))
+ //
+ // act(() => {
+ // result.current.setValue('new value')
+ // })
+ //
+ // expect(callback).toHaveBeenCalledWith('new value')
+ })
+
+ it('should cleanup on unmount', () => {
+ // const cleanup = vi.fn()
+ // vi.spyOn(window, 'addEventListener')
+ // vi.spyOn(window, 'removeEventListener')
+ //
+ // const { unmount } = renderHook(() => useHookName())
+ //
+ // expect(window.addEventListener).toHaveBeenCalled()
+ //
+ // unmount()
+ //
+ // expect(window.removeEventListener).toHaveBeenCalled()
+ })
+ })
+
+ // --------------------------------------------------------------------------
+ // Edge Cases
+ // --------------------------------------------------------------------------
+ describe('Edge Cases', () => {
+ it('should handle null input', () => {
+ // const { result } = renderHook(() => useHookName(null))
+ //
+ // expect(result.current.value).toBeNull()
+ })
+
+ it('should handle rapid updates', () => {
+ // const { result } = renderHook(() => useHookName())
+ //
+ // act(() => {
+ // result.current.setValue('1')
+ // result.current.setValue('2')
+ // result.current.setValue('3')
+ // })
+ //
+ // expect(result.current.value).toBe('3')
+ })
+ })
+
+ // --------------------------------------------------------------------------
+ // With Context (if hook uses context)
+ // --------------------------------------------------------------------------
+ describe('With Context', () => {
+ it('should use context value', () => {
+ // const wrapper = createWrapper({ someValue: 'context-value' })
+ // const { result } = renderHook(() => useHookName(), { wrapper })
+ //
+ // expect(result.current.contextValue).toBe('context-value')
+ })
+ })
+})
diff --git a/.claude/skills/frontend-testing/assets/utility-test.template.ts b/.claude/skills/frontend-testing/assets/utility-test.template.ts
new file mode 100644
index 0000000000..ec13b5f5bd
--- /dev/null
+++ b/.claude/skills/frontend-testing/assets/utility-test.template.ts
@@ -0,0 +1,154 @@
+/**
+ * Test Template for Utility Functions
+ *
+ * Instructions:
+ * 1. Replace `utilityFunction` with your function name
+ * 2. Update import path
+ * 3. Use test.each for data-driven tests
+ */
+
+// import { utilityFunction } from './utility'
+
+// ============================================================================
+// Tests
+// ============================================================================
+
+describe('utilityFunction', () => {
+ // --------------------------------------------------------------------------
+ // Basic Functionality
+ // --------------------------------------------------------------------------
+ describe('Basic Functionality', () => {
+ it('should return expected result for valid input', () => {
+ // expect(utilityFunction('input')).toBe('expected-output')
+ })
+
+ it('should handle multiple arguments', () => {
+ // expect(utilityFunction('a', 'b', 'c')).toBe('abc')
+ })
+ })
+
+ // --------------------------------------------------------------------------
+ // Data-Driven Tests
+ // --------------------------------------------------------------------------
+ describe('Input/Output Mapping', () => {
+ test.each([
+ // [input, expected]
+ ['input1', 'output1'],
+ ['input2', 'output2'],
+ ['input3', 'output3'],
+ ])('should return %s for input %s', (input, expected) => {
+ // expect(utilityFunction(input)).toBe(expected)
+ })
+ })
+
+ // --------------------------------------------------------------------------
+ // Edge Cases
+ // --------------------------------------------------------------------------
+ describe('Edge Cases', () => {
+ it('should handle empty string', () => {
+ // expect(utilityFunction('')).toBe('')
+ })
+
+ it('should handle null', () => {
+ // expect(utilityFunction(null)).toBe(null)
+ // or
+ // expect(() => utilityFunction(null)).toThrow()
+ })
+
+ it('should handle undefined', () => {
+ // expect(utilityFunction(undefined)).toBe(undefined)
+ // or
+ // expect(() => utilityFunction(undefined)).toThrow()
+ })
+
+ it('should handle empty array', () => {
+ // expect(utilityFunction([])).toEqual([])
+ })
+
+ it('should handle empty object', () => {
+ // expect(utilityFunction({})).toEqual({})
+ })
+ })
+
+ // --------------------------------------------------------------------------
+ // Boundary Conditions
+ // --------------------------------------------------------------------------
+ describe('Boundary Conditions', () => {
+ it('should handle minimum value', () => {
+ // expect(utilityFunction(0)).toBe(0)
+ })
+
+ it('should handle maximum value', () => {
+ // expect(utilityFunction(Number.MAX_SAFE_INTEGER)).toBe(...)
+ })
+
+ it('should handle negative numbers', () => {
+ // expect(utilityFunction(-1)).toBe(...)
+ })
+ })
+
+ // --------------------------------------------------------------------------
+ // Type Coercion (if applicable)
+ // --------------------------------------------------------------------------
+ describe('Type Handling', () => {
+ it('should handle numeric string', () => {
+ // expect(utilityFunction('123')).toBe(123)
+ })
+
+ it('should handle boolean', () => {
+ // expect(utilityFunction(true)).toBe(...)
+ })
+ })
+
+ // --------------------------------------------------------------------------
+ // Error Cases
+ // --------------------------------------------------------------------------
+ describe('Error Handling', () => {
+ it('should throw for invalid input', () => {
+ // expect(() => utilityFunction('invalid')).toThrow('Error message')
+ })
+
+ it('should throw with specific error type', () => {
+ // expect(() => utilityFunction('invalid')).toThrow(ValidationError)
+ })
+ })
+
+ // --------------------------------------------------------------------------
+ // Complex Objects (if applicable)
+ // --------------------------------------------------------------------------
+ describe('Object Handling', () => {
+ it('should preserve object structure', () => {
+ // const input = { a: 1, b: 2 }
+ // expect(utilityFunction(input)).toEqual({ a: 1, b: 2 })
+ })
+
+ it('should handle nested objects', () => {
+ // const input = { nested: { deep: 'value' } }
+ // expect(utilityFunction(input)).toEqual({ nested: { deep: 'transformed' } })
+ })
+
+ it('should not mutate input', () => {
+ // const input = { a: 1 }
+ // const inputCopy = { ...input }
+ // utilityFunction(input)
+ // expect(input).toEqual(inputCopy)
+ })
+ })
+
+ // --------------------------------------------------------------------------
+ // Array Handling (if applicable)
+ // --------------------------------------------------------------------------
+ describe('Array Handling', () => {
+ it('should process all elements', () => {
+ // expect(utilityFunction([1, 2, 3])).toEqual([2, 4, 6])
+ })
+
+ it('should handle single element array', () => {
+ // expect(utilityFunction([1])).toEqual([2])
+ })
+
+ it('should preserve order', () => {
+ // expect(utilityFunction(['c', 'a', 'b'])).toEqual(['c', 'a', 'b'])
+ })
+ })
+})
diff --git a/.claude/skills/frontend-testing/references/async-testing.md b/.claude/skills/frontend-testing/references/async-testing.md
new file mode 100644
index 0000000000..ae775a87a9
--- /dev/null
+++ b/.claude/skills/frontend-testing/references/async-testing.md
@@ -0,0 +1,345 @@
+# Async Testing Guide
+
+## Core Async Patterns
+
+### 1. waitFor - Wait for Condition
+
+```typescript
+import { render, screen, waitFor } from '@testing-library/react'
+
+it('should load and display data', async () => {
+ render()
+
+ // Wait for element to appear
+ await waitFor(() => {
+ expect(screen.getByText('Loaded Data')).toBeInTheDocument()
+ })
+})
+
+it('should hide loading spinner after load', async () => {
+ render()
+
+ // Wait for element to disappear
+ await waitFor(() => {
+ expect(screen.queryByText('Loading...')).not.toBeInTheDocument()
+ })
+})
+```
+
+### 2. findBy\* - Async Queries
+
+```typescript
+it('should show user name after fetch', async () => {
+ render()
+
+ // findBy returns a promise, auto-waits up to 1000ms
+ const userName = await screen.findByText('John Doe')
+ expect(userName).toBeInTheDocument()
+
+ // findByRole with options
+ const button = await screen.findByRole('button', { name: /submit/i })
+ expect(button).toBeEnabled()
+})
+```
+
+### 3. userEvent for Async Interactions
+
+```typescript
+import userEvent from '@testing-library/user-event'
+
+it('should submit form', async () => {
+ const user = userEvent.setup()
+ const onSubmit = vi.fn()
+
+ render()
+
+ // userEvent methods are async
+ await user.type(screen.getByLabelText('Email'), 'test@example.com')
+ await user.click(screen.getByRole('button', { name: /submit/i }))
+
+ await waitFor(() => {
+ expect(onSubmit).toHaveBeenCalledWith({ email: 'test@example.com' })
+ })
+})
+```
+
+## Fake Timers
+
+### When to Use Fake Timers
+
+- Testing components with `setTimeout`/`setInterval`
+- Testing debounce/throttle behavior
+- Testing animations or delayed transitions
+- Testing polling or retry logic
+
+### Basic Fake Timer Setup
+
+```typescript
+describe('Debounced Search', () => {
+ beforeEach(() => {
+ vi.useFakeTimers()
+ })
+
+ afterEach(() => {
+ vi.useRealTimers()
+ })
+
+ it('should debounce search input', async () => {
+ const onSearch = vi.fn()
+ render()
+
+ // Type in the input
+ fireEvent.change(screen.getByRole('textbox'), { target: { value: 'query' } })
+
+ // Search not called immediately
+ expect(onSearch).not.toHaveBeenCalled()
+
+ // Advance timers
+ vi.advanceTimersByTime(300)
+
+ // Now search is called
+ expect(onSearch).toHaveBeenCalledWith('query')
+ })
+})
+```
+
+### Fake Timers with Async Code
+
+```typescript
+it('should retry on failure', async () => {
+ vi.useFakeTimers()
+ const fetchData = vi.fn()
+ .mockRejectedValueOnce(new Error('Network error'))
+ .mockResolvedValueOnce({ data: 'success' })
+
+ render()
+
+ // First call fails
+ await waitFor(() => {
+ expect(fetchData).toHaveBeenCalledTimes(1)
+ })
+
+ // Advance timer for retry
+ vi.advanceTimersByTime(1000)
+
+ // Second call succeeds
+ await waitFor(() => {
+ expect(fetchData).toHaveBeenCalledTimes(2)
+ expect(screen.getByText('success')).toBeInTheDocument()
+ })
+
+ vi.useRealTimers()
+})
+```
+
+### Common Fake Timer Utilities
+
+```typescript
+// Run all pending timers
+vi.runAllTimers()
+
+// Run only pending timers (not new ones created during execution)
+vi.runOnlyPendingTimers()
+
+// Advance by specific time
+vi.advanceTimersByTime(1000)
+
+// Get current fake time
+Date.now()
+
+// Clear all timers
+vi.clearAllTimers()
+```
+
+## API Testing Patterns
+
+### Loading → Success → Error States
+
+```typescript
+describe('DataFetcher', () => {
+ beforeEach(() => {
+ vi.clearAllMocks()
+ })
+
+ it('should show loading state', () => {
+ mockedApi.fetchData.mockImplementation(() => new Promise(() => {})) // Never resolves
+
+ render()
+
+ expect(screen.getByTestId('loading-spinner')).toBeInTheDocument()
+ })
+
+ it('should show data on success', async () => {
+ mockedApi.fetchData.mockResolvedValue({ items: ['Item 1', 'Item 2'] })
+
+ render()
+
+ // Use findBy* for multiple async elements (better error messages than waitFor with multiple assertions)
+ const item1 = await screen.findByText('Item 1')
+ const item2 = await screen.findByText('Item 2')
+ expect(item1).toBeInTheDocument()
+ expect(item2).toBeInTheDocument()
+
+ expect(screen.queryByTestId('loading-spinner')).not.toBeInTheDocument()
+ })
+
+ it('should show error on failure', async () => {
+ mockedApi.fetchData.mockRejectedValue(new Error('Failed to fetch'))
+
+ render()
+
+ await waitFor(() => {
+ expect(screen.getByText(/failed to fetch/i)).toBeInTheDocument()
+ })
+ })
+
+ it('should retry on error', async () => {
+ mockedApi.fetchData.mockRejectedValue(new Error('Network error'))
+
+ render()
+
+ await waitFor(() => {
+ expect(screen.getByRole('button', { name: /retry/i })).toBeInTheDocument()
+ })
+
+ mockedApi.fetchData.mockResolvedValue({ items: ['Item 1'] })
+ fireEvent.click(screen.getByRole('button', { name: /retry/i }))
+
+ await waitFor(() => {
+ expect(screen.getByText('Item 1')).toBeInTheDocument()
+ })
+ })
+})
+```
+
+### Testing Mutations
+
+```typescript
+it('should submit form and show success', async () => {
+ const user = userEvent.setup()
+ mockedApi.createItem.mockResolvedValue({ id: '1', name: 'New Item' })
+
+ render()
+
+ await user.type(screen.getByLabelText('Name'), 'New Item')
+ await user.click(screen.getByRole('button', { name: /create/i }))
+
+ // Button should be disabled during submission
+ expect(screen.getByRole('button', { name: /creating/i })).toBeDisabled()
+
+ await waitFor(() => {
+ expect(screen.getByText(/created successfully/i)).toBeInTheDocument()
+ })
+
+ expect(mockedApi.createItem).toHaveBeenCalledWith({ name: 'New Item' })
+})
+```
+
+## useEffect Testing
+
+### Testing Effect Execution
+
+```typescript
+it('should fetch data on mount', async () => {
+ const fetchData = vi.fn().mockResolvedValue({ data: 'test' })
+
+ render()
+
+ await waitFor(() => {
+ expect(fetchData).toHaveBeenCalledTimes(1)
+ })
+})
+```
+
+### Testing Effect Dependencies
+
+```typescript
+it('should refetch when id changes', async () => {
+ const fetchData = vi.fn().mockResolvedValue({ data: 'test' })
+
+ const { rerender } = render()
+
+ await waitFor(() => {
+ expect(fetchData).toHaveBeenCalledWith('1')
+ })
+
+ rerender()
+
+ await waitFor(() => {
+ expect(fetchData).toHaveBeenCalledWith('2')
+ expect(fetchData).toHaveBeenCalledTimes(2)
+ })
+})
+```
+
+### Testing Effect Cleanup
+
+```typescript
+it('should cleanup subscription on unmount', () => {
+ const subscribe = vi.fn()
+ const unsubscribe = vi.fn()
+ subscribe.mockReturnValue(unsubscribe)
+
+ const { unmount } = render()
+
+ expect(subscribe).toHaveBeenCalledTimes(1)
+
+ unmount()
+
+ expect(unsubscribe).toHaveBeenCalledTimes(1)
+})
+```
+
+## Common Async Pitfalls
+
+### ❌ Don't: Forget to await
+
+```typescript
+// Bad - test may pass even if assertion fails
+it('should load data', () => {
+ render()
+ waitFor(() => {
+ expect(screen.getByText('Data')).toBeInTheDocument()
+ })
+})
+
+// Good - properly awaited
+it('should load data', async () => {
+ render()
+ await waitFor(() => {
+ expect(screen.getByText('Data')).toBeInTheDocument()
+ })
+})
+```
+
+### ❌ Don't: Use multiple assertions in single waitFor
+
+```typescript
+// Bad - if first assertion fails, won't know about second
+await waitFor(() => {
+ expect(screen.getByText('Title')).toBeInTheDocument()
+ expect(screen.getByText('Description')).toBeInTheDocument()
+})
+
+// Good - separate waitFor or use findBy
+const title = await screen.findByText('Title')
+const description = await screen.findByText('Description')
+expect(title).toBeInTheDocument()
+expect(description).toBeInTheDocument()
+```
+
+### ❌ Don't: Mix fake timers with real async
+
+```typescript
+// Bad - fake timers don't work well with real Promises
+vi.useFakeTimers()
+await waitFor(() => {
+ expect(screen.getByText('Data')).toBeInTheDocument()
+}) // May timeout!
+
+// Good - use runAllTimers or advanceTimersByTime
+vi.useFakeTimers()
+render()
+vi.runAllTimers()
+expect(screen.getByText('Data')).toBeInTheDocument()
+```
diff --git a/.claude/skills/frontend-testing/references/checklist.md b/.claude/skills/frontend-testing/references/checklist.md
new file mode 100644
index 0000000000..1ff2b27bbb
--- /dev/null
+++ b/.claude/skills/frontend-testing/references/checklist.md
@@ -0,0 +1,205 @@
+# Test Generation Checklist
+
+Use this checklist when generating or reviewing tests for Dify frontend components.
+
+## Pre-Generation
+
+- [ ] Read the component source code completely
+- [ ] Identify component type (component, hook, utility, page)
+- [ ] Run `pnpm analyze-component ` if available
+- [ ] Note complexity score and features detected
+- [ ] Check for existing tests in the same directory
+- [ ] **Identify ALL files in the directory** that need testing (not just index)
+
+## Testing Strategy
+
+### ⚠️ Incremental Workflow (CRITICAL for Multi-File)
+
+- [ ] **NEVER generate all tests at once** - process one file at a time
+- [ ] Order files by complexity: utilities → hooks → simple → complex → integration
+- [ ] Create a todo list to track progress before starting
+- [ ] For EACH file: write → run test → verify pass → then next
+- [ ] **DO NOT proceed** to next file until current one passes
+
+### Path-Level Coverage
+
+- [ ] **Test ALL files** in the assigned directory/path
+- [ ] List all components, hooks, utilities that need coverage
+- [ ] Decide: single spec file (integration) or multiple spec files (unit)
+
+### Complexity Assessment
+
+- [ ] Run `pnpm analyze-component ` for complexity score
+- [ ] **Complexity > 50**: Consider refactoring before testing
+- [ ] **500+ lines**: Consider splitting before testing
+- [ ] **30-50 complexity**: Use multiple describe blocks, organized structure
+
+### Integration vs Mocking
+
+- [ ] **DO NOT mock base components** (`Loading`, `Button`, `Tooltip`, etc.)
+- [ ] Import real project components instead of mocking
+- [ ] Only mock: API calls, complex context providers, third-party libs with side effects
+- [ ] Prefer integration testing when using single spec file
+
+## Required Test Sections
+
+### All Components MUST Have
+
+- [ ] **Rendering tests** - Component renders without crashing
+- [ ] **Props tests** - Required props, optional props, default values
+- [ ] **Edge cases** - null, undefined, empty values, boundaries
+
+### Conditional Sections (Add When Feature Present)
+
+| Feature | Add Tests For |
+|---------|---------------|
+| `useState` | Initial state, transitions, cleanup |
+| `useEffect` | Execution, dependencies, cleanup |
+| Event handlers | onClick, onChange, onSubmit, keyboard |
+| API calls | Loading, success, error states |
+| Routing | Navigation, params, query strings |
+| `useCallback`/`useMemo` | Referential equality |
+| Context | Provider values, consumer behavior |
+| Forms | Validation, submission, error display |
+
+## Code Quality Checklist
+
+### Structure
+
+- [ ] Uses `describe` blocks to group related tests
+- [ ] Test names follow `should when ` pattern
+- [ ] AAA pattern (Arrange-Act-Assert) is clear
+- [ ] Comments explain complex test scenarios
+
+### Mocks
+
+- [ ] **DO NOT mock base components** (`@/app/components/base/*`)
+- [ ] `vi.clearAllMocks()` in `beforeEach` (not `afterEach`)
+- [ ] Shared mock state reset in `beforeEach`
+- [ ] i18n uses global mock (auto-loaded in `web/vitest.setup.ts`); only override locally for custom translations
+- [ ] Router mocks match actual Next.js API
+- [ ] Mocks reflect actual component conditional behavior
+- [ ] Only mock: API services, complex context providers, third-party libs
+
+### Queries
+
+- [ ] Prefer semantic queries (`getByRole`, `getByLabelText`)
+- [ ] Use `queryBy*` for absence assertions
+- [ ] Use `findBy*` for async elements
+- [ ] `getByTestId` only as last resort
+
+### Async
+
+- [ ] All async tests use `async/await`
+- [ ] `waitFor` wraps async assertions
+- [ ] Fake timers properly setup/teardown
+- [ ] No floating promises
+
+### TypeScript
+
+- [ ] No `any` types without justification
+- [ ] Mock data uses actual types from source
+- [ ] Factory functions have proper return types
+
+## Coverage Goals (Per File)
+
+For the current file being tested:
+
+- [ ] 100% function coverage
+- [ ] 100% statement coverage
+- [ ] >95% branch coverage
+- [ ] >95% line coverage
+
+## Post-Generation (Per File)
+
+**Run these checks after EACH test file, not just at the end:**
+
+- [ ] Run `pnpm test path/to/file.spec.tsx` - **MUST PASS before next file**
+- [ ] Fix any failures immediately
+- [ ] Mark file as complete in todo list
+- [ ] Only then proceed to next file
+
+### After All Files Complete
+
+- [ ] Run full directory test: `pnpm test path/to/directory/`
+- [ ] Check coverage report: `pnpm test:coverage`
+- [ ] Run `pnpm lint:fix` on all test files
+- [ ] Run `pnpm type-check:tsgo`
+
+## Common Issues to Watch
+
+### False Positives
+
+```typescript
+// ❌ Mock doesn't match actual behavior
+vi.mock('./Component', () => () => Mocked
)
+
+// ✅ Mock matches actual conditional logic
+vi.mock('./Component', () => ({ isOpen }: any) =>
+ isOpen ? Content
: null
+)
+```
+
+### State Leakage
+
+```typescript
+// ❌ Shared state not reset
+let mockState = false
+vi.mock('./useHook', () => () => mockState)
+
+// ✅ Reset in beforeEach
+beforeEach(() => {
+ mockState = false
+})
+```
+
+### Async Race Conditions
+
+```typescript
+// ❌ Not awaited
+it('loads data', () => {
+ render()
+ expect(screen.getByText('Data')).toBeInTheDocument()
+})
+
+// ✅ Properly awaited
+it('loads data', async () => {
+ render()
+ await waitFor(() => {
+ expect(screen.getByText('Data')).toBeInTheDocument()
+ })
+})
+```
+
+### Missing Edge Cases
+
+Always test these scenarios:
+
+- `null` / `undefined` inputs
+- Empty strings / arrays / objects
+- Boundary values (0, -1, MAX_INT)
+- Error states
+- Loading states
+- Disabled states
+
+## Quick Commands
+
+```bash
+# Run specific test
+pnpm test path/to/file.spec.tsx
+
+# Run with coverage
+pnpm test:coverage path/to/file.spec.tsx
+
+# Watch mode
+pnpm test:watch path/to/file.spec.tsx
+
+# Update snapshots (use sparingly)
+pnpm test -u path/to/file.spec.tsx
+
+# Analyze component
+pnpm analyze-component path/to/component.tsx
+
+# Review existing test
+pnpm analyze-component path/to/component.tsx --review
+```
diff --git a/.claude/skills/frontend-testing/references/common-patterns.md b/.claude/skills/frontend-testing/references/common-patterns.md
new file mode 100644
index 0000000000..6eded5ceba
--- /dev/null
+++ b/.claude/skills/frontend-testing/references/common-patterns.md
@@ -0,0 +1,449 @@
+# Common Testing Patterns
+
+## Query Priority
+
+Use queries in this order (most to least preferred):
+
+```typescript
+// 1. getByRole - Most recommended (accessibility)
+screen.getByRole('button', { name: /submit/i })
+screen.getByRole('textbox', { name: /email/i })
+screen.getByRole('heading', { level: 1 })
+
+// 2. getByLabelText - Form fields
+screen.getByLabelText('Email address')
+screen.getByLabelText(/password/i)
+
+// 3. getByPlaceholderText - When no label
+screen.getByPlaceholderText('Search...')
+
+// 4. getByText - Non-interactive elements
+screen.getByText('Welcome to Dify')
+screen.getByText(/loading/i)
+
+// 5. getByDisplayValue - Current input value
+screen.getByDisplayValue('current value')
+
+// 6. getByAltText - Images
+screen.getByAltText('Company logo')
+
+// 7. getByTitle - Tooltip elements
+screen.getByTitle('Close')
+
+// 8. getByTestId - Last resort only!
+screen.getByTestId('custom-element')
+```
+
+## Event Handling Patterns
+
+### Click Events
+
+```typescript
+// Basic click
+fireEvent.click(screen.getByRole('button'))
+
+// With userEvent (preferred for realistic interaction)
+const user = userEvent.setup()
+await user.click(screen.getByRole('button'))
+
+// Double click
+await user.dblClick(screen.getByRole('button'))
+
+// Right click
+await user.pointer({ keys: '[MouseRight]', target: screen.getByRole('button') })
+```
+
+### Form Input
+
+```typescript
+const user = userEvent.setup()
+
+// Type in input
+await user.type(screen.getByRole('textbox'), 'Hello World')
+
+// Clear and type
+await user.clear(screen.getByRole('textbox'))
+await user.type(screen.getByRole('textbox'), 'New value')
+
+// Select option
+await user.selectOptions(screen.getByRole('combobox'), 'option-value')
+
+// Check checkbox
+await user.click(screen.getByRole('checkbox'))
+
+// Upload file
+const file = new File(['content'], 'test.pdf', { type: 'application/pdf' })
+await user.upload(screen.getByLabelText(/upload/i), file)
+```
+
+### Keyboard Events
+
+```typescript
+const user = userEvent.setup()
+
+// Press Enter
+await user.keyboard('{Enter}')
+
+// Press Escape
+await user.keyboard('{Escape}')
+
+// Keyboard shortcut
+await user.keyboard('{Control>}a{/Control}') // Ctrl+A
+
+// Tab navigation
+await user.tab()
+
+// Arrow keys
+await user.keyboard('{ArrowDown}')
+await user.keyboard('{ArrowUp}')
+```
+
+## Component State Testing
+
+### Testing State Transitions
+
+```typescript
+describe('Counter', () => {
+ it('should increment count', async () => {
+ const user = userEvent.setup()
+ render()
+
+ // Initial state
+ expect(screen.getByText('Count: 0')).toBeInTheDocument()
+
+ // Trigger transition
+ await user.click(screen.getByRole('button', { name: /increment/i }))
+
+ // New state
+ expect(screen.getByText('Count: 1')).toBeInTheDocument()
+ })
+})
+```
+
+### Testing Controlled Components
+
+```typescript
+describe('ControlledInput', () => {
+ it('should call onChange with new value', async () => {
+ const user = userEvent.setup()
+ const handleChange = vi.fn()
+
+ render()
+
+ await user.type(screen.getByRole('textbox'), 'a')
+
+ expect(handleChange).toHaveBeenCalledWith('a')
+ })
+
+ it('should display controlled value', () => {
+ render()
+
+ expect(screen.getByRole('textbox')).toHaveValue('controlled')
+ })
+})
+```
+
+## Conditional Rendering Testing
+
+```typescript
+describe('ConditionalComponent', () => {
+ it('should show loading state', () => {
+ render()
+
+ expect(screen.getByText(/loading/i)).toBeInTheDocument()
+ expect(screen.queryByTestId('data-content')).not.toBeInTheDocument()
+ })
+
+ it('should show error state', () => {
+ render()
+
+ expect(screen.getByText(/failed to load/i)).toBeInTheDocument()
+ })
+
+ it('should show data when loaded', () => {
+ render()
+
+ expect(screen.getByText('Test')).toBeInTheDocument()
+ })
+
+ it('should show empty state when no data', () => {
+ render()
+
+ expect(screen.getByText(/no data/i)).toBeInTheDocument()
+ })
+})
+```
+
+## List Rendering Testing
+
+```typescript
+describe('ItemList', () => {
+ const items = [
+ { id: '1', name: 'Item 1' },
+ { id: '2', name: 'Item 2' },
+ { id: '3', name: 'Item 3' },
+ ]
+
+ it('should render all items', () => {
+ render()
+
+ expect(screen.getAllByRole('listitem')).toHaveLength(3)
+ items.forEach(item => {
+ expect(screen.getByText(item.name)).toBeInTheDocument()
+ })
+ })
+
+ it('should handle item selection', async () => {
+ const user = userEvent.setup()
+ const onSelect = vi.fn()
+
+ render()
+
+ await user.click(screen.getByText('Item 2'))
+
+ expect(onSelect).toHaveBeenCalledWith(items[1])
+ })
+
+ it('should handle empty list', () => {
+ render()
+
+ expect(screen.getByText(/no items/i)).toBeInTheDocument()
+ })
+})
+```
+
+## Modal/Dialog Testing
+
+```typescript
+describe('Modal', () => {
+ it('should not render when closed', () => {
+ render()
+
+ expect(screen.queryByRole('dialog')).not.toBeInTheDocument()
+ })
+
+ it('should render when open', () => {
+ render()
+
+ expect(screen.getByRole('dialog')).toBeInTheDocument()
+ })
+
+ it('should call onClose when clicking overlay', async () => {
+ const user = userEvent.setup()
+ const handleClose = vi.fn()
+
+ render()
+
+ await user.click(screen.getByTestId('modal-overlay'))
+
+ expect(handleClose).toHaveBeenCalled()
+ })
+
+ it('should call onClose when pressing Escape', async () => {
+ const user = userEvent.setup()
+ const handleClose = vi.fn()
+
+ render()
+
+ await user.keyboard('{Escape}')
+
+ expect(handleClose).toHaveBeenCalled()
+ })
+
+ it('should trap focus inside modal', async () => {
+ const user = userEvent.setup()
+
+ render(
+
+
+
+
+ )
+
+ // Focus should cycle within modal
+ await user.tab()
+ expect(screen.getByText('First')).toHaveFocus()
+
+ await user.tab()
+ expect(screen.getByText('Second')).toHaveFocus()
+
+ await user.tab()
+ expect(screen.getByText('First')).toHaveFocus() // Cycles back
+ })
+})
+```
+
+## Form Testing
+
+```typescript
+describe('LoginForm', () => {
+ it('should submit valid form', async () => {
+ const user = userEvent.setup()
+ const onSubmit = vi.fn()
+
+ render()
+
+ await user.type(screen.getByLabelText(/email/i), 'test@example.com')
+ await user.type(screen.getByLabelText(/password/i), 'password123')
+ await user.click(screen.getByRole('button', { name: /sign in/i }))
+
+ expect(onSubmit).toHaveBeenCalledWith({
+ email: 'test@example.com',
+ password: 'password123',
+ })
+ })
+
+ it('should show validation errors', async () => {
+ const user = userEvent.setup()
+
+ render()
+
+ // Submit empty form
+ await user.click(screen.getByRole('button', { name: /sign in/i }))
+
+ expect(screen.getByText(/email is required/i)).toBeInTheDocument()
+ expect(screen.getByText(/password is required/i)).toBeInTheDocument()
+ })
+
+ it('should validate email format', async () => {
+ const user = userEvent.setup()
+
+ render()
+
+ await user.type(screen.getByLabelText(/email/i), 'invalid-email')
+ await user.click(screen.getByRole('button', { name: /sign in/i }))
+
+ expect(screen.getByText(/invalid email/i)).toBeInTheDocument()
+ })
+
+ it('should disable submit button while submitting', async () => {
+ const user = userEvent.setup()
+ const onSubmit = vi.fn(() => new Promise(resolve => setTimeout(resolve, 100)))
+
+ render()
+
+ await user.type(screen.getByLabelText(/email/i), 'test@example.com')
+ await user.type(screen.getByLabelText(/password/i), 'password123')
+ await user.click(screen.getByRole('button', { name: /sign in/i }))
+
+ expect(screen.getByRole('button', { name: /signing in/i })).toBeDisabled()
+
+ await waitFor(() => {
+ expect(screen.getByRole('button', { name: /sign in/i })).toBeEnabled()
+ })
+ })
+})
+```
+
+## Data-Driven Tests with test.each
+
+```typescript
+describe('StatusBadge', () => {
+ test.each([
+ ['success', 'bg-green-500'],
+ ['warning', 'bg-yellow-500'],
+ ['error', 'bg-red-500'],
+ ['info', 'bg-blue-500'],
+ ])('should apply correct class for %s status', (status, expectedClass) => {
+ render()
+
+ expect(screen.getByTestId('status-badge')).toHaveClass(expectedClass)
+ })
+
+ test.each([
+ { input: null, expected: 'Unknown' },
+ { input: undefined, expected: 'Unknown' },
+ { input: '', expected: 'Unknown' },
+ { input: 'invalid', expected: 'Unknown' },
+ ])('should show "Unknown" for invalid input: $input', ({ input, expected }) => {
+ render()
+
+ expect(screen.getByText(expected)).toBeInTheDocument()
+ })
+})
+```
+
+## Debugging Tips
+
+```typescript
+// Print entire DOM
+screen.debug()
+
+// Print specific element
+screen.debug(screen.getByRole('button'))
+
+// Log testing playground URL
+screen.logTestingPlaygroundURL()
+
+// Pretty print DOM
+import { prettyDOM } from '@testing-library/react'
+console.log(prettyDOM(screen.getByRole('dialog')))
+
+// Check available roles
+import { getRoles } from '@testing-library/react'
+console.log(getRoles(container))
+```
+
+## Common Mistakes to Avoid
+
+### ❌ Don't Use Implementation Details
+
+```typescript
+// Bad - testing implementation
+expect(component.state.isOpen).toBe(true)
+expect(wrapper.find('.internal-class').length).toBe(1)
+
+// Good - testing behavior
+expect(screen.getByRole('dialog')).toBeInTheDocument()
+```
+
+### ❌ Don't Forget Cleanup
+
+```typescript
+// Bad - may leak state between tests
+it('test 1', () => {
+ render()
+})
+
+// Good - cleanup is automatic with RTL, but reset mocks
+beforeEach(() => {
+ vi.clearAllMocks()
+})
+```
+
+### ❌ Don't Use Exact String Matching (Prefer Black-Box Assertions)
+
+```typescript
+// ❌ Bad - hardcoded strings are brittle
+expect(screen.getByText('Submit Form')).toBeInTheDocument()
+expect(screen.getByText('Loading...')).toBeInTheDocument()
+
+// ✅ Good - role-based queries (most semantic)
+expect(screen.getByRole('button', { name: /submit/i })).toBeInTheDocument()
+expect(screen.getByRole('status')).toBeInTheDocument()
+
+// ✅ Good - pattern matching (flexible)
+expect(screen.getByText(/submit/i)).toBeInTheDocument()
+expect(screen.getByText(/loading/i)).toBeInTheDocument()
+
+// ✅ Good - test behavior, not exact UI text
+expect(screen.getByRole('button')).toBeDisabled()
+expect(screen.getByRole('alert')).toBeInTheDocument()
+```
+
+**Why prefer black-box assertions?**
+
+- Text content may change (i18n, copy updates)
+- Role-based queries test accessibility
+- Pattern matching is resilient to minor changes
+- Tests focus on behavior, not implementation details
+
+### ❌ Don't Assert on Absence Without Query
+
+```typescript
+// Bad - throws if not found
+expect(screen.getByText('Error')).not.toBeInTheDocument() // Error!
+
+// Good - use queryBy for absence assertions
+expect(screen.queryByText('Error')).not.toBeInTheDocument()
+```
diff --git a/.claude/skills/frontend-testing/references/domain-components.md b/.claude/skills/frontend-testing/references/domain-components.md
new file mode 100644
index 0000000000..5535d28f3d
--- /dev/null
+++ b/.claude/skills/frontend-testing/references/domain-components.md
@@ -0,0 +1,523 @@
+# Domain-Specific Component Testing
+
+This guide covers testing patterns for Dify's domain-specific components.
+
+## Workflow Components (`workflow/`)
+
+Workflow components handle node configuration, data flow, and graph operations.
+
+### Key Test Areas
+
+1. **Node Configuration**
+1. **Data Validation**
+1. **Variable Passing**
+1. **Edge Connections**
+1. **Error Handling**
+
+### Example: Node Configuration Panel
+
+```typescript
+import { render, screen, fireEvent, waitFor } from '@testing-library/react'
+import userEvent from '@testing-library/user-event'
+import NodeConfigPanel from './node-config-panel'
+import { createMockNode, createMockWorkflowContext } from '@/__mocks__/workflow'
+
+// Mock workflow context
+vi.mock('@/app/components/workflow/hooks', () => ({
+ useWorkflowStore: () => mockWorkflowStore,
+ useNodesInteractions: () => mockNodesInteractions,
+}))
+
+let mockWorkflowStore = {
+ nodes: [],
+ edges: [],
+ updateNode: vi.fn(),
+}
+
+let mockNodesInteractions = {
+ handleNodeSelect: vi.fn(),
+ handleNodeDelete: vi.fn(),
+}
+
+describe('NodeConfigPanel', () => {
+ beforeEach(() => {
+ vi.clearAllMocks()
+ mockWorkflowStore = {
+ nodes: [],
+ edges: [],
+ updateNode: vi.fn(),
+ }
+ })
+
+ describe('Node Configuration', () => {
+ it('should render node type selector', () => {
+ const node = createMockNode({ type: 'llm' })
+ render()
+
+ expect(screen.getByLabelText(/model/i)).toBeInTheDocument()
+ })
+
+ it('should update node config on change', async () => {
+ const user = userEvent.setup()
+ const node = createMockNode({ type: 'llm' })
+
+ render()
+
+ await user.selectOptions(screen.getByLabelText(/model/i), 'gpt-4')
+
+ expect(mockWorkflowStore.updateNode).toHaveBeenCalledWith(
+ node.id,
+ expect.objectContaining({ model: 'gpt-4' })
+ )
+ })
+ })
+
+ describe('Data Validation', () => {
+ it('should show error for invalid input', async () => {
+ const user = userEvent.setup()
+ const node = createMockNode({ type: 'code' })
+
+ render()
+
+ // Enter invalid code
+ const codeInput = screen.getByLabelText(/code/i)
+ await user.clear(codeInput)
+ await user.type(codeInput, 'invalid syntax {{{')
+
+ await waitFor(() => {
+ expect(screen.getByText(/syntax error/i)).toBeInTheDocument()
+ })
+ })
+
+ it('should validate required fields', async () => {
+ const node = createMockNode({ type: 'http', data: { url: '' } })
+
+ render()
+
+ fireEvent.click(screen.getByRole('button', { name: /save/i }))
+
+ await waitFor(() => {
+ expect(screen.getByText(/url is required/i)).toBeInTheDocument()
+ })
+ })
+ })
+
+ describe('Variable Passing', () => {
+ it('should display available variables from upstream nodes', () => {
+ const upstreamNode = createMockNode({
+ id: 'node-1',
+ type: 'start',
+ data: { outputs: [{ name: 'user_input', type: 'string' }] },
+ })
+ const currentNode = createMockNode({
+ id: 'node-2',
+ type: 'llm',
+ })
+
+ mockWorkflowStore.nodes = [upstreamNode, currentNode]
+ mockWorkflowStore.edges = [{ source: 'node-1', target: 'node-2' }]
+
+ render()
+
+ // Variable selector should show upstream variables
+ fireEvent.click(screen.getByRole('button', { name: /add variable/i }))
+
+ expect(screen.getByText('user_input')).toBeInTheDocument()
+ })
+
+ it('should insert variable into prompt template', async () => {
+ const user = userEvent.setup()
+ const node = createMockNode({ type: 'llm' })
+
+ render()
+
+ // Click variable button
+ await user.click(screen.getByRole('button', { name: /insert variable/i }))
+ await user.click(screen.getByText('user_input'))
+
+ const promptInput = screen.getByLabelText(/prompt/i)
+ expect(promptInput).toHaveValue(expect.stringContaining('{{user_input}}'))
+ })
+ })
+})
+```
+
+## Dataset Components (`dataset/`)
+
+Dataset components handle file uploads, data display, and search/filter operations.
+
+### Key Test Areas
+
+1. **File Upload**
+1. **File Type Validation**
+1. **Pagination**
+1. **Search & Filtering**
+1. **Data Format Handling**
+
+### Example: Document Uploader
+
+```typescript
+import { render, screen, fireEvent, waitFor } from '@testing-library/react'
+import userEvent from '@testing-library/user-event'
+import DocumentUploader from './document-uploader'
+
+vi.mock('@/service/datasets', () => ({
+ uploadDocument: vi.fn(),
+ parseDocument: vi.fn(),
+}))
+
+import * as datasetService from '@/service/datasets'
+const mockedService = vi.mocked(datasetService)
+
+describe('DocumentUploader', () => {
+ beforeEach(() => {
+ vi.clearAllMocks()
+ })
+
+ describe('File Upload', () => {
+ it('should accept valid file types', async () => {
+ const user = userEvent.setup()
+ const onUpload = vi.fn()
+ mockedService.uploadDocument.mockResolvedValue({ id: 'doc-1' })
+
+ render()
+
+ const file = new File(['content'], 'test.pdf', { type: 'application/pdf' })
+ const input = screen.getByLabelText(/upload/i)
+
+ await user.upload(input, file)
+
+ await waitFor(() => {
+ expect(mockedService.uploadDocument).toHaveBeenCalledWith(
+ expect.any(FormData)
+ )
+ })
+ })
+
+ it('should reject invalid file types', async () => {
+ const user = userEvent.setup()
+
+ render()
+
+ const file = new File(['content'], 'test.exe', { type: 'application/x-msdownload' })
+ const input = screen.getByLabelText(/upload/i)
+
+ await user.upload(input, file)
+
+ expect(screen.getByText(/unsupported file type/i)).toBeInTheDocument()
+ expect(mockedService.uploadDocument).not.toHaveBeenCalled()
+ })
+
+ it('should show upload progress', async () => {
+ const user = userEvent.setup()
+
+ // Mock upload with progress
+ mockedService.uploadDocument.mockImplementation(() => {
+ return new Promise((resolve) => {
+ setTimeout(() => resolve({ id: 'doc-1' }), 100)
+ })
+ })
+
+ render()
+
+ const file = new File(['content'], 'test.pdf', { type: 'application/pdf' })
+ await user.upload(screen.getByLabelText(/upload/i), file)
+
+ expect(screen.getByRole('progressbar')).toBeInTheDocument()
+
+ await waitFor(() => {
+ expect(screen.queryByRole('progressbar')).not.toBeInTheDocument()
+ })
+ })
+ })
+
+ describe('Error Handling', () => {
+ it('should handle upload failure', async () => {
+ const user = userEvent.setup()
+ mockedService.uploadDocument.mockRejectedValue(new Error('Upload failed'))
+
+ render()
+
+ const file = new File(['content'], 'test.pdf', { type: 'application/pdf' })
+ await user.upload(screen.getByLabelText(/upload/i), file)
+
+ await waitFor(() => {
+ expect(screen.getByText(/upload failed/i)).toBeInTheDocument()
+ })
+ })
+
+ it('should allow retry after failure', async () => {
+ const user = userEvent.setup()
+ mockedService.uploadDocument
+ .mockRejectedValueOnce(new Error('Network error'))
+ .mockResolvedValueOnce({ id: 'doc-1' })
+
+ render()
+
+ const file = new File(['content'], 'test.pdf', { type: 'application/pdf' })
+ await user.upload(screen.getByLabelText(/upload/i), file)
+
+ await waitFor(() => {
+ expect(screen.getByRole('button', { name: /retry/i })).toBeInTheDocument()
+ })
+
+ await user.click(screen.getByRole('button', { name: /retry/i }))
+
+ await waitFor(() => {
+ expect(screen.getByText(/uploaded successfully/i)).toBeInTheDocument()
+ })
+ })
+ })
+})
+```
+
+### Example: Document List with Pagination
+
+```typescript
+describe('DocumentList', () => {
+ describe('Pagination', () => {
+ it('should load first page on mount', async () => {
+ mockedService.getDocuments.mockResolvedValue({
+ data: [{ id: '1', name: 'Doc 1' }],
+ total: 50,
+ page: 1,
+ pageSize: 10,
+ })
+
+ render()
+
+ await waitFor(() => {
+ expect(screen.getByText('Doc 1')).toBeInTheDocument()
+ })
+
+ expect(mockedService.getDocuments).toHaveBeenCalledWith('ds-1', { page: 1 })
+ })
+
+ it('should navigate to next page', async () => {
+ const user = userEvent.setup()
+ mockedService.getDocuments.mockResolvedValue({
+ data: [{ id: '1', name: 'Doc 1' }],
+ total: 50,
+ page: 1,
+ pageSize: 10,
+ })
+
+ render()
+
+ await waitFor(() => {
+ expect(screen.getByText('Doc 1')).toBeInTheDocument()
+ })
+
+ mockedService.getDocuments.mockResolvedValue({
+ data: [{ id: '11', name: 'Doc 11' }],
+ total: 50,
+ page: 2,
+ pageSize: 10,
+ })
+
+ await user.click(screen.getByRole('button', { name: /next/i }))
+
+ await waitFor(() => {
+ expect(screen.getByText('Doc 11')).toBeInTheDocument()
+ })
+ })
+ })
+
+ describe('Search & Filtering', () => {
+ it('should filter by search query', async () => {
+ const user = userEvent.setup()
+ vi.useFakeTimers()
+
+ render()
+
+ await user.type(screen.getByPlaceholderText(/search/i), 'test query')
+
+ // Debounce
+ vi.advanceTimersByTime(300)
+
+ await waitFor(() => {
+ expect(mockedService.getDocuments).toHaveBeenCalledWith(
+ 'ds-1',
+ expect.objectContaining({ search: 'test query' })
+ )
+ })
+
+ vi.useRealTimers()
+ })
+ })
+})
+```
+
+## Configuration Components (`app/configuration/`, `config/`)
+
+Configuration components handle forms, validation, and data persistence.
+
+### Key Test Areas
+
+1. **Form Validation**
+1. **Save/Reset**
+1. **Required vs Optional Fields**
+1. **Configuration Persistence**
+1. **Error Feedback**
+
+### Example: App Configuration Form
+
+```typescript
+import { render, screen, fireEvent, waitFor } from '@testing-library/react'
+import userEvent from '@testing-library/user-event'
+import AppConfigForm from './app-config-form'
+
+vi.mock('@/service/apps', () => ({
+ updateAppConfig: vi.fn(),
+ getAppConfig: vi.fn(),
+}))
+
+import * as appService from '@/service/apps'
+const mockedService = vi.mocked(appService)
+
+describe('AppConfigForm', () => {
+ const defaultConfig = {
+ name: 'My App',
+ description: '',
+ icon: 'default',
+ openingStatement: '',
+ }
+
+ beforeEach(() => {
+ vi.clearAllMocks()
+ mockedService.getAppConfig.mockResolvedValue(defaultConfig)
+ })
+
+ describe('Form Validation', () => {
+ it('should require app name', async () => {
+ const user = userEvent.setup()
+
+ render()
+
+ await waitFor(() => {
+ expect(screen.getByLabelText(/name/i)).toHaveValue('My App')
+ })
+
+ // Clear name field
+ await user.clear(screen.getByLabelText(/name/i))
+ await user.click(screen.getByRole('button', { name: /save/i }))
+
+ expect(screen.getByText(/name is required/i)).toBeInTheDocument()
+ expect(mockedService.updateAppConfig).not.toHaveBeenCalled()
+ })
+
+ it('should validate name length', async () => {
+ const user = userEvent.setup()
+
+ render()
+
+ await waitFor(() => {
+ expect(screen.getByLabelText(/name/i)).toBeInTheDocument()
+ })
+
+ // Enter very long name
+ await user.clear(screen.getByLabelText(/name/i))
+ await user.type(screen.getByLabelText(/name/i), 'a'.repeat(101))
+
+ expect(screen.getByText(/name must be less than 100 characters/i)).toBeInTheDocument()
+ })
+
+ it('should allow empty optional fields', async () => {
+ const user = userEvent.setup()
+ mockedService.updateAppConfig.mockResolvedValue({ success: true })
+
+ render()
+
+ await waitFor(() => {
+ expect(screen.getByLabelText(/name/i)).toHaveValue('My App')
+ })
+
+ // Leave description empty (optional)
+ await user.click(screen.getByRole('button', { name: /save/i }))
+
+ await waitFor(() => {
+ expect(mockedService.updateAppConfig).toHaveBeenCalled()
+ })
+ })
+ })
+
+ describe('Save/Reset Functionality', () => {
+ it('should save configuration', async () => {
+ const user = userEvent.setup()
+ mockedService.updateAppConfig.mockResolvedValue({ success: true })
+
+ render()
+
+ await waitFor(() => {
+ expect(screen.getByLabelText(/name/i)).toHaveValue('My App')
+ })
+
+ await user.clear(screen.getByLabelText(/name/i))
+ await user.type(screen.getByLabelText(/name/i), 'Updated App')
+ await user.click(screen.getByRole('button', { name: /save/i }))
+
+ await waitFor(() => {
+ expect(mockedService.updateAppConfig).toHaveBeenCalledWith(
+ 'app-1',
+ expect.objectContaining({ name: 'Updated App' })
+ )
+ })
+
+ expect(screen.getByText(/saved successfully/i)).toBeInTheDocument()
+ })
+
+ it('should reset to default values', async () => {
+ const user = userEvent.setup()
+
+ render()
+
+ await waitFor(() => {
+ expect(screen.getByLabelText(/name/i)).toHaveValue('My App')
+ })
+
+ // Make changes
+ await user.clear(screen.getByLabelText(/name/i))
+ await user.type(screen.getByLabelText(/name/i), 'Changed Name')
+
+ // Reset
+ await user.click(screen.getByRole('button', { name: /reset/i }))
+
+ expect(screen.getByLabelText(/name/i)).toHaveValue('My App')
+ })
+
+ it('should show unsaved changes warning', async () => {
+ const user = userEvent.setup()
+
+ render()
+
+ await waitFor(() => {
+ expect(screen.getByLabelText(/name/i)).toHaveValue('My App')
+ })
+
+ // Make changes
+ await user.type(screen.getByLabelText(/name/i), ' Updated')
+
+ expect(screen.getByText(/unsaved changes/i)).toBeInTheDocument()
+ })
+ })
+
+ describe('Error Handling', () => {
+ it('should show error on save failure', async () => {
+ const user = userEvent.setup()
+ mockedService.updateAppConfig.mockRejectedValue(new Error('Server error'))
+
+ render()
+
+ await waitFor(() => {
+ expect(screen.getByLabelText(/name/i)).toHaveValue('My App')
+ })
+
+ await user.click(screen.getByRole('button', { name: /save/i }))
+
+ await waitFor(() => {
+ expect(screen.getByText(/failed to save/i)).toBeInTheDocument()
+ })
+ })
+ })
+})
+```
diff --git a/.claude/skills/frontend-testing/references/mocking.md b/.claude/skills/frontend-testing/references/mocking.md
new file mode 100644
index 0000000000..c70bcf0ae5
--- /dev/null
+++ b/.claude/skills/frontend-testing/references/mocking.md
@@ -0,0 +1,349 @@
+# Mocking Guide for Dify Frontend Tests
+
+## ⚠️ Important: What NOT to Mock
+
+### DO NOT Mock Base Components
+
+**Never mock components from `@/app/components/base/`** such as:
+
+- `Loading`, `Spinner`
+- `Button`, `Input`, `Select`
+- `Tooltip`, `Modal`, `Dropdown`
+- `Icon`, `Badge`, `Tag`
+
+**Why?**
+
+- Base components will have their own dedicated tests
+- Mocking them creates false positives (tests pass but real integration fails)
+- Using real components tests actual integration behavior
+
+```typescript
+// ❌ WRONG: Don't mock base components
+vi.mock('@/app/components/base/loading', () => () => Loading
)
+vi.mock('@/app/components/base/button', () => ({ children }: any) => )
+
+// ✅ CORRECT: Import and use real base components
+import Loading from '@/app/components/base/loading'
+import Button from '@/app/components/base/button'
+// They will render normally in tests
+```
+
+### What TO Mock
+
+Only mock these categories:
+
+1. **API services** (`@/service/*`) - Network calls
+1. **Complex context providers** - When setup is too difficult
+1. **Third-party libraries with side effects** - `next/navigation`, external SDKs
+1. **i18n** - Always mock to return keys
+
+## Mock Placement
+
+| Location | Purpose |
+|----------|---------|
+| `web/vitest.setup.ts` | Global mocks shared by all tests (for example `react-i18next`, `next/image`) |
+| `web/__mocks__/` | Reusable mock factories shared across multiple test files |
+| Test file | Test-specific mocks, inline with `vi.mock()` |
+
+Modules are not mocked automatically. Use `vi.mock` in test files, or add global mocks in `web/vitest.setup.ts`.
+
+## Essential Mocks
+
+### 1. i18n (Auto-loaded via Global Mock)
+
+A global mock is defined in `web/vitest.setup.ts` and is auto-loaded by Vitest setup.
+
+The global mock provides:
+
+- `useTranslation` - returns translation keys with namespace prefix
+- `Trans` component - renders i18nKey and components
+- `useMixedTranslation` (from `@/app/components/plugins/marketplace/hooks`)
+- `useGetLanguage` (from `@/context/i18n`) - returns `'en-US'`
+
+**Default behavior**: Most tests should use the global mock (no local override needed).
+
+**For custom translations**: Use the helper function from `@/test/i18n-mock`:
+
+```typescript
+import { createReactI18nextMock } from '@/test/i18n-mock'
+
+vi.mock('react-i18next', () => createReactI18nextMock({
+ 'my.custom.key': 'Custom translation',
+ 'button.save': 'Save',
+}))
+```
+
+**Avoid**: Manually defining `useTranslation` mocks that just return the key - the global mock already does this.
+
+### 2. Next.js Router
+
+```typescript
+const mockPush = vi.fn()
+const mockReplace = vi.fn()
+
+vi.mock('next/navigation', () => ({
+ useRouter: () => ({
+ push: mockPush,
+ replace: mockReplace,
+ back: vi.fn(),
+ prefetch: vi.fn(),
+ }),
+ usePathname: () => '/current-path',
+ useSearchParams: () => new URLSearchParams('?key=value'),
+}))
+
+describe('Component', () => {
+ beforeEach(() => {
+ vi.clearAllMocks()
+ })
+
+ it('should navigate on click', () => {
+ render()
+ fireEvent.click(screen.getByRole('button'))
+ expect(mockPush).toHaveBeenCalledWith('/expected-path')
+ })
+})
+```
+
+### 3. Portal Components (with Shared State)
+
+```typescript
+// ⚠️ Important: Use shared state for components that depend on each other
+let mockPortalOpenState = false
+
+vi.mock('@/app/components/base/portal-to-follow-elem', () => ({
+ PortalToFollowElem: ({ children, open, ...props }: any) => {
+ mockPortalOpenState = open || false // Update shared state
+ return {children}
+ },
+ PortalToFollowElemContent: ({ children }: any) => {
+ // ✅ Matches actual: returns null when portal is closed
+ if (!mockPortalOpenState) return null
+ return {children}
+ },
+ PortalToFollowElemTrigger: ({ children }: any) => (
+ {children}
+ ),
+}))
+
+describe('Component', () => {
+ beforeEach(() => {
+ vi.clearAllMocks()
+ mockPortalOpenState = false // ✅ Reset shared state
+ })
+})
+```
+
+### 4. API Service Mocks
+
+```typescript
+import * as api from '@/service/api'
+
+vi.mock('@/service/api')
+
+const mockedApi = vi.mocked(api)
+
+describe('Component', () => {
+ beforeEach(() => {
+ vi.clearAllMocks()
+
+ // Setup default mock implementation
+ mockedApi.fetchData.mockResolvedValue({ data: [] })
+ })
+
+ it('should show data on success', async () => {
+ mockedApi.fetchData.mockResolvedValue({ data: [{ id: 1 }] })
+
+ render()
+
+ await waitFor(() => {
+ expect(screen.getByText('1')).toBeInTheDocument()
+ })
+ })
+
+ it('should show error on failure', async () => {
+ mockedApi.fetchData.mockRejectedValue(new Error('Network error'))
+
+ render()
+
+ await waitFor(() => {
+ expect(screen.getByText(/error/i)).toBeInTheDocument()
+ })
+ })
+})
+```
+
+### 5. HTTP Mocking with Nock
+
+```typescript
+import nock from 'nock'
+
+const GITHUB_HOST = 'https://api.github.com'
+const GITHUB_PATH = '/repos/owner/repo'
+
+const mockGithubApi = (status: number, body: Record, delayMs = 0) => {
+ return nock(GITHUB_HOST)
+ .get(GITHUB_PATH)
+ .delay(delayMs)
+ .reply(status, body)
+}
+
+describe('GithubComponent', () => {
+ afterEach(() => {
+ nock.cleanAll()
+ })
+
+ it('should display repo info', async () => {
+ mockGithubApi(200, { name: 'dify', stars: 1000 })
+
+ render()
+
+ await waitFor(() => {
+ expect(screen.getByText('dify')).toBeInTheDocument()
+ })
+ })
+
+ it('should handle API error', async () => {
+ mockGithubApi(500, { message: 'Server error' })
+
+ render()
+
+ await waitFor(() => {
+ expect(screen.getByText(/error/i)).toBeInTheDocument()
+ })
+ })
+})
+```
+
+### 6. Context Providers
+
+```typescript
+import { ProviderContext } from '@/context/provider-context'
+import { createMockProviderContextValue, createMockPlan } from '@/__mocks__/provider-context'
+
+describe('Component with Context', () => {
+ it('should render for free plan', () => {
+ const mockContext = createMockPlan('sandbox')
+
+ render(
+
+
+
+ )
+
+ expect(screen.getByText('Upgrade')).toBeInTheDocument()
+ })
+
+ it('should render for pro plan', () => {
+ const mockContext = createMockPlan('professional')
+
+ render(
+
+
+
+ )
+
+ expect(screen.queryByText('Upgrade')).not.toBeInTheDocument()
+ })
+})
+```
+
+### 7. React Query
+
+```typescript
+import { QueryClient, QueryClientProvider } from '@tanstack/react-query'
+
+const createTestQueryClient = () => new QueryClient({
+ defaultOptions: {
+ queries: { retry: false },
+ mutations: { retry: false },
+ },
+})
+
+const renderWithQueryClient = (ui: React.ReactElement) => {
+ const queryClient = createTestQueryClient()
+ return render(
+
+ {ui}
+
+ )
+}
+```
+
+## Mock Best Practices
+
+### ✅ DO
+
+1. **Use real base components** - Import from `@/app/components/base/` directly
+1. **Use real project components** - Prefer importing over mocking
+1. **Reset mocks in `beforeEach`**, not `afterEach`
+1. **Match actual component behavior** in mocks (when mocking is necessary)
+1. **Use factory functions** for complex mock data
+1. **Import actual types** for type safety
+1. **Reset shared mock state** in `beforeEach`
+
+### ❌ DON'T
+
+1. **Don't mock base components** (`Loading`, `Button`, `Tooltip`, etc.)
+1. Don't mock components you can import directly
+1. Don't create overly simplified mocks that miss conditional logic
+1. Don't forget to clean up nock after each test
+1. Don't use `any` types in mocks without necessity
+
+### Mock Decision Tree
+
+```
+Need to use a component in test?
+│
+├─ Is it from @/app/components/base/*?
+│ └─ YES → Import real component, DO NOT mock
+│
+├─ Is it a project component?
+│ └─ YES → Prefer importing real component
+│ Only mock if setup is extremely complex
+│
+├─ Is it an API service (@/service/*)?
+│ └─ YES → Mock it
+│
+├─ Is it a third-party lib with side effects?
+│ └─ YES → Mock it (next/navigation, external SDKs)
+│
+└─ Is it i18n?
+ └─ YES → Uses shared mock (auto-loaded). Override only for custom translations
+```
+
+## Factory Function Pattern
+
+```typescript
+// __mocks__/data-factories.ts
+import type { User, Project } from '@/types'
+
+export const createMockUser = (overrides: Partial = {}): User => ({
+ id: 'user-1',
+ name: 'Test User',
+ email: 'test@example.com',
+ role: 'member',
+ createdAt: new Date().toISOString(),
+ ...overrides,
+})
+
+export const createMockProject = (overrides: Partial = {}): Project => ({
+ id: 'project-1',
+ name: 'Test Project',
+ description: 'A test project',
+ owner: createMockUser(),
+ members: [],
+ createdAt: new Date().toISOString(),
+ ...overrides,
+})
+
+// Usage in tests
+it('should display project owner', () => {
+ const project = createMockProject({
+ owner: createMockUser({ name: 'John Doe' }),
+ })
+
+ render()
+ expect(screen.getByText('John Doe')).toBeInTheDocument()
+})
+```
diff --git a/.claude/skills/frontend-testing/references/workflow.md b/.claude/skills/frontend-testing/references/workflow.md
new file mode 100644
index 0000000000..009c3e013b
--- /dev/null
+++ b/.claude/skills/frontend-testing/references/workflow.md
@@ -0,0 +1,269 @@
+# Testing Workflow Guide
+
+This guide defines the workflow for generating tests, especially for complex components or directories with multiple files.
+
+## Scope Clarification
+
+This guide addresses **multi-file workflow** (how to process multiple test files). For coverage requirements within a single test file, see `web/testing/testing.md` § Coverage Goals.
+
+| Scope | Rule |
+|-------|------|
+| **Single file** | Complete coverage in one generation (100% function, >95% branch) |
+| **Multi-file directory** | Process one file at a time, verify each before proceeding |
+
+## ⚠️ Critical Rule: Incremental Approach for Multi-File Testing
+
+When testing a **directory with multiple files**, **NEVER generate all test files at once.** Use an incremental, verify-as-you-go approach.
+
+### Why Incremental?
+
+| Batch Approach (❌) | Incremental Approach (✅) |
+|---------------------|---------------------------|
+| Generate 5+ tests at once | Generate 1 test at a time |
+| Run tests only at the end | Run test immediately after each file |
+| Multiple failures compound | Single point of failure, easy to debug |
+| Hard to identify root cause | Clear cause-effect relationship |
+| Mock issues affect many files | Mock issues caught early |
+| Messy git history | Clean, atomic commits possible |
+
+## Single File Workflow
+
+When testing a **single component, hook, or utility**:
+
+```
+1. Read source code completely
+2. Run `pnpm analyze-component ` (if available)
+3. Check complexity score and features detected
+4. Write the test file
+5. Run test: `pnpm test .spec.tsx`
+6. Fix any failures
+7. Verify coverage meets goals (100% function, >95% branch)
+```
+
+## Directory/Multi-File Workflow (MUST FOLLOW)
+
+When testing a **directory or multiple files**, follow this strict workflow:
+
+### Step 1: Analyze and Plan
+
+1. **List all files** that need tests in the directory
+1. **Categorize by complexity**:
+ - 🟢 **Simple**: Utility functions, simple hooks, presentational components
+ - 🟡 **Medium**: Components with state, effects, or event handlers
+ - 🔴 **Complex**: Components with API calls, routing, or many dependencies
+1. **Order by dependency**: Test dependencies before dependents
+1. **Create a todo list** to track progress
+
+### Step 2: Determine Processing Order
+
+Process files in this recommended order:
+
+```
+1. Utility functions (simplest, no React)
+2. Custom hooks (isolated logic)
+3. Simple presentational components (few/no props)
+4. Medium complexity components (state, effects)
+5. Complex components (API, routing, many deps)
+6. Container/index components (integration tests - last)
+```
+
+**Rationale**:
+
+- Simpler files help establish mock patterns
+- Hooks used by components should be tested first
+- Integration tests (index files) depend on child components working
+
+### Step 3: Process Each File Incrementally
+
+**For EACH file in the ordered list:**
+
+```
+┌─────────────────────────────────────────────┐
+│ 1. Write test file │
+│ 2. Run: pnpm test .spec.tsx │
+│ 3. If FAIL → Fix immediately, re-run │
+│ 4. If PASS → Mark complete in todo list │
+│ 5. ONLY THEN proceed to next file │
+└─────────────────────────────────────────────┘
+```
+
+**DO NOT proceed to the next file until the current one passes.**
+
+### Step 4: Final Verification
+
+After all individual tests pass:
+
+```bash
+# Run all tests in the directory together
+pnpm test path/to/directory/
+
+# Check coverage
+pnpm test:coverage path/to/directory/
+```
+
+## Component Complexity Guidelines
+
+Use `pnpm analyze-component ` to assess complexity before testing.
+
+### 🔴 Very Complex Components (Complexity > 50)
+
+**Consider refactoring BEFORE testing:**
+
+- Break component into smaller, testable pieces
+- Extract complex logic into custom hooks
+- Separate container and presentational layers
+
+**If testing as-is:**
+
+- Use integration tests for complex workflows
+- Use `test.each()` for data-driven testing
+- Multiple `describe` blocks for organization
+- Consider testing major sections separately
+
+### 🟡 Medium Complexity (Complexity 30-50)
+
+- Group related tests in `describe` blocks
+- Test integration scenarios between internal parts
+- Focus on state transitions and side effects
+- Use helper functions to reduce test complexity
+
+### 🟢 Simple Components (Complexity < 30)
+
+- Standard test structure
+- Focus on props, rendering, and edge cases
+- Usually straightforward to test
+
+### 📏 Large Files (500+ lines)
+
+Regardless of complexity score:
+
+- **Strongly consider refactoring** before testing
+- If testing as-is, test major sections separately
+- Create helper functions for test setup
+- May need multiple test files
+
+## Todo List Format
+
+When testing multiple files, use a todo list like this:
+
+```
+Testing: path/to/directory/
+
+Ordered by complexity (simple → complex):
+
+☐ utils/helper.ts [utility, simple]
+☐ hooks/use-custom-hook.ts [hook, simple]
+☐ empty-state.tsx [component, simple]
+☐ item-card.tsx [component, medium]
+☐ list.tsx [component, complex]
+☐ index.tsx [integration]
+
+Progress: 0/6 complete
+```
+
+Update status as you complete each:
+
+- ☐ → ⏳ (in progress)
+- ⏳ → ✅ (complete and verified)
+- ⏳ → ❌ (blocked, needs attention)
+
+## When to Stop and Verify
+
+**Always run tests after:**
+
+- Completing a test file
+- Making changes to fix a failure
+- Modifying shared mocks
+- Updating test utilities or helpers
+
+**Signs you should pause:**
+
+- More than 2 consecutive test failures
+- Mock-related errors appearing
+- Unclear why a test is failing
+- Test passing but coverage unexpectedly low
+
+## Common Pitfalls to Avoid
+
+### ❌ Don't: Generate Everything First
+
+```
+# BAD: Writing all files then testing
+Write component-a.spec.tsx
+Write component-b.spec.tsx
+Write component-c.spec.tsx
+Write component-d.spec.tsx
+Run pnpm test ← Multiple failures, hard to debug
+```
+
+### ✅ Do: Verify Each Step
+
+```
+# GOOD: Incremental with verification
+Write component-a.spec.tsx
+Run pnpm test component-a.spec.tsx ✅
+Write component-b.spec.tsx
+Run pnpm test component-b.spec.tsx ✅
+...continue...
+```
+
+### ❌ Don't: Skip Verification for "Simple" Components
+
+Even simple components can have:
+
+- Import errors
+- Missing mock setup
+- Incorrect assumptions about props
+
+**Always verify, regardless of perceived simplicity.**
+
+### ❌ Don't: Continue When Tests Fail
+
+Failing tests compound:
+
+- A mock issue in file A affects files B, C, D
+- Fixing A later requires revisiting all dependent tests
+- Time wasted on debugging cascading failures
+
+**Fix failures immediately before proceeding.**
+
+## Integration with Claude's Todo Feature
+
+When using Claude for multi-file testing:
+
+1. **Ask Claude to create a todo list** before starting
+1. **Request one file at a time** or ensure Claude processes incrementally
+1. **Verify each test passes** before asking for the next
+1. **Mark todos complete** as you progress
+
+Example prompt:
+
+```
+Test all components in `path/to/directory/`.
+First, analyze the directory and create a todo list ordered by complexity.
+Then, process ONE file at a time, waiting for my confirmation that tests pass
+before proceeding to the next.
+```
+
+## Summary Checklist
+
+Before starting multi-file testing:
+
+- [ ] Listed all files needing tests
+- [ ] Ordered by complexity (simple → complex)
+- [ ] Created todo list for tracking
+- [ ] Understand dependencies between files
+
+During testing:
+
+- [ ] Processing ONE file at a time
+- [ ] Running tests after EACH file
+- [ ] Fixing failures BEFORE proceeding
+- [ ] Updating todo list progress
+
+After completion:
+
+- [ ] All individual tests pass
+- [ ] Full directory test run passes
+- [ ] Coverage goals met
+- [ ] Todo list shows all complete
diff --git a/.codex/skills b/.codex/skills
new file mode 120000
index 0000000000..454b8427cd
--- /dev/null
+++ b/.codex/skills
@@ -0,0 +1 @@
+../.claude/skills
\ No newline at end of file
diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 0000000000..190c0c185b
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,5 @@
+[run]
+omit =
+ api/tests/*
+ api/migrations/*
+ api/core/rag/datasource/vdb/*
diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
index ddec42e0ee..3998a69c36 100644
--- a/.devcontainer/devcontainer.json
+++ b/.devcontainer/devcontainer.json
@@ -6,6 +6,9 @@
"context": "..",
"dockerfile": "Dockerfile"
},
+ "mounts": [
+ "source=dify-dev-tmp,target=/tmp,type=volume"
+ ],
"features": {
"ghcr.io/devcontainers/features/node:1": {
"nodeGypDependencies": true,
@@ -34,19 +37,13 @@
},
"postStartCommand": "./.devcontainer/post_start_command.sh",
"postCreateCommand": "./.devcontainer/post_create_command.sh"
-
// Features to add to the dev container. More info: https://containers.dev/features.
// "features": {},
-
// Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [],
-
// Use 'postCreateCommand' to run commands after the container is created.
// "postCreateCommand": "python --version",
-
// Configure tool-specific properties.
// "customizations": {},
-
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
- // "remoteUser": "root"
-}
+}
\ No newline at end of file
diff --git a/.devcontainer/post_create_command.sh b/.devcontainer/post_create_command.sh
index a26fd076ed..220f77e5ce 100755
--- a/.devcontainer/post_create_command.sh
+++ b/.devcontainer/post_create_command.sh
@@ -1,12 +1,13 @@
#!/bin/bash
WORKSPACE_ROOT=$(pwd)
+export COREPACK_ENABLE_DOWNLOAD_PROMPT=0
corepack enable
cd web && pnpm install
pipx install uv
echo "alias start-api=\"cd $WORKSPACE_ROOT/api && uv run python -m flask run --host 0.0.0.0 --port=5001 --debug\"" >> ~/.bashrc
-echo "alias start-worker=\"cd $WORKSPACE_ROOT/api && uv run python -m celery -A app.celery worker -P threads -c 1 --loglevel INFO -Q dataset,priority_dataset,priority_pipeline,pipeline,mail,ops_trace,app_deletion,plugin,workflow_storage,conversation,workflow,schedule_poller,schedule_executor,triggered_workflow_dispatcher,trigger_refresh_executor\"" >> ~/.bashrc
+echo "alias start-worker=\"cd $WORKSPACE_ROOT/api && uv run python -m celery -A app.celery worker -P threads -c 1 --loglevel INFO -Q dataset,priority_dataset,priority_pipeline,pipeline,mail,ops_trace,app_deletion,plugin,workflow_storage,conversation,workflow,schedule_poller,schedule_executor,triggered_workflow_dispatcher,trigger_refresh_executor,retention\"" >> ~/.bashrc
echo "alias start-web=\"cd $WORKSPACE_ROOT/web && pnpm dev\"" >> ~/.bashrc
echo "alias start-web-prod=\"cd $WORKSPACE_ROOT/web && pnpm build && pnpm start\"" >> ~/.bashrc
echo "alias start-containers=\"cd $WORKSPACE_ROOT/docker && docker-compose -f docker-compose.middleware.yaml -p dify --env-file middleware.env up -d\"" >> ~/.bashrc
diff --git a/.editorconfig b/.editorconfig
index 374da0b5d2..be14939ddb 100644
--- a/.editorconfig
+++ b/.editorconfig
@@ -29,7 +29,7 @@ trim_trailing_whitespace = false
# Matches multiple files with brace expansion notation
# Set default charset
-[*.{js,tsx}]
+[*.{js,jsx,ts,tsx,mjs}]
indent_style = space
indent_size = 2
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
new file mode 100644
index 0000000000..106c26bbed
--- /dev/null
+++ b/.github/CODEOWNERS
@@ -0,0 +1,249 @@
+# CODEOWNERS
+# This file defines code ownership for the Dify project.
+# Each line is a file pattern followed by one or more owners.
+# Owners can be @username, @org/team-name, or email addresses.
+# For more information, see: https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners
+
+* @crazywoola @laipz8200 @Yeuoly
+
+# CODEOWNERS file
+/.github/CODEOWNERS @laipz8200 @crazywoola
+
+# Docs
+/docs/ @crazywoola
+
+# Backend (default owner, more specific rules below will override)
+/api/ @QuantumGhost
+
+# Backend - MCP
+/api/core/mcp/ @Nov1c444
+/api/core/entities/mcp_provider.py @Nov1c444
+/api/services/tools/mcp_tools_manage_service.py @Nov1c444
+/api/controllers/mcp/ @Nov1c444
+/api/controllers/console/app/mcp_server.py @Nov1c444
+/api/tests/**/*mcp* @Nov1c444
+
+# Backend - Workflow - Engine (Core graph execution engine)
+/api/core/workflow/graph_engine/ @laipz8200 @QuantumGhost
+/api/core/workflow/runtime/ @laipz8200 @QuantumGhost
+/api/core/workflow/graph/ @laipz8200 @QuantumGhost
+/api/core/workflow/graph_events/ @laipz8200 @QuantumGhost
+/api/core/workflow/node_events/ @laipz8200 @QuantumGhost
+/api/core/model_runtime/ @laipz8200 @QuantumGhost
+
+# Backend - Workflow - Nodes (Agent, Iteration, Loop, LLM)
+/api/core/workflow/nodes/agent/ @Nov1c444
+/api/core/workflow/nodes/iteration/ @Nov1c444
+/api/core/workflow/nodes/loop/ @Nov1c444
+/api/core/workflow/nodes/llm/ @Nov1c444
+
+# Backend - RAG (Retrieval Augmented Generation)
+/api/core/rag/ @JohnJyong
+/api/services/rag_pipeline/ @JohnJyong
+/api/services/dataset_service.py @JohnJyong
+/api/services/knowledge_service.py @JohnJyong
+/api/services/external_knowledge_service.py @JohnJyong
+/api/services/hit_testing_service.py @JohnJyong
+/api/services/metadata_service.py @JohnJyong
+/api/services/vector_service.py @JohnJyong
+/api/services/entities/knowledge_entities/ @JohnJyong
+/api/services/entities/external_knowledge_entities/ @JohnJyong
+/api/controllers/console/datasets/ @JohnJyong
+/api/controllers/service_api/dataset/ @JohnJyong
+/api/models/dataset.py @JohnJyong
+/api/tasks/rag_pipeline/ @JohnJyong
+/api/tasks/add_document_to_index_task.py @JohnJyong
+/api/tasks/batch_clean_document_task.py @JohnJyong
+/api/tasks/clean_document_task.py @JohnJyong
+/api/tasks/clean_notion_document_task.py @JohnJyong
+/api/tasks/document_indexing_task.py @JohnJyong
+/api/tasks/document_indexing_sync_task.py @JohnJyong
+/api/tasks/document_indexing_update_task.py @JohnJyong
+/api/tasks/duplicate_document_indexing_task.py @JohnJyong
+/api/tasks/recover_document_indexing_task.py @JohnJyong
+/api/tasks/remove_document_from_index_task.py @JohnJyong
+/api/tasks/retry_document_indexing_task.py @JohnJyong
+/api/tasks/sync_website_document_indexing_task.py @JohnJyong
+/api/tasks/batch_create_segment_to_index_task.py @JohnJyong
+/api/tasks/create_segment_to_index_task.py @JohnJyong
+/api/tasks/delete_segment_from_index_task.py @JohnJyong
+/api/tasks/disable_segment_from_index_task.py @JohnJyong
+/api/tasks/disable_segments_from_index_task.py @JohnJyong
+/api/tasks/enable_segment_to_index_task.py @JohnJyong
+/api/tasks/enable_segments_to_index_task.py @JohnJyong
+/api/tasks/clean_dataset_task.py @JohnJyong
+/api/tasks/deal_dataset_index_update_task.py @JohnJyong
+/api/tasks/deal_dataset_vector_index_task.py @JohnJyong
+
+# Backend - Plugins
+/api/core/plugin/ @Mairuis @Yeuoly @Stream29
+/api/services/plugin/ @Mairuis @Yeuoly @Stream29
+/api/controllers/console/workspace/plugin.py @Mairuis @Yeuoly @Stream29
+/api/controllers/inner_api/plugin/ @Mairuis @Yeuoly @Stream29
+/api/tasks/process_tenant_plugin_autoupgrade_check_task.py @Mairuis @Yeuoly @Stream29
+
+# Backend - Trigger/Schedule/Webhook
+/api/controllers/trigger/ @Mairuis @Yeuoly
+/api/controllers/console/app/workflow_trigger.py @Mairuis @Yeuoly
+/api/controllers/console/workspace/trigger_providers.py @Mairuis @Yeuoly
+/api/core/trigger/ @Mairuis @Yeuoly
+/api/core/app/layers/trigger_post_layer.py @Mairuis @Yeuoly
+/api/services/trigger/ @Mairuis @Yeuoly
+/api/models/trigger.py @Mairuis @Yeuoly
+/api/fields/workflow_trigger_fields.py @Mairuis @Yeuoly
+/api/repositories/workflow_trigger_log_repository.py @Mairuis @Yeuoly
+/api/repositories/sqlalchemy_workflow_trigger_log_repository.py @Mairuis @Yeuoly
+/api/libs/schedule_utils.py @Mairuis @Yeuoly
+/api/services/workflow/scheduler.py @Mairuis @Yeuoly
+/api/schedule/trigger_provider_refresh_task.py @Mairuis @Yeuoly
+/api/schedule/workflow_schedule_task.py @Mairuis @Yeuoly
+/api/tasks/trigger_processing_tasks.py @Mairuis @Yeuoly
+/api/tasks/trigger_subscription_refresh_tasks.py @Mairuis @Yeuoly
+/api/tasks/workflow_schedule_tasks.py @Mairuis @Yeuoly
+/api/tasks/workflow_cfs_scheduler/ @Mairuis @Yeuoly
+/api/events/event_handlers/sync_plugin_trigger_when_app_created.py @Mairuis @Yeuoly
+/api/events/event_handlers/update_app_triggers_when_app_published_workflow_updated.py @Mairuis @Yeuoly
+/api/events/event_handlers/sync_workflow_schedule_when_app_published.py @Mairuis @Yeuoly
+/api/events/event_handlers/sync_webhook_when_app_created.py @Mairuis @Yeuoly
+
+# Backend - Async Workflow
+/api/services/async_workflow_service.py @Mairuis @Yeuoly
+/api/tasks/async_workflow_tasks.py @Mairuis @Yeuoly
+
+# Backend - Billing
+/api/services/billing_service.py @hj24 @zyssyz123
+/api/controllers/console/billing/ @hj24 @zyssyz123
+
+# Backend - Enterprise
+/api/configs/enterprise/ @GarfieldDai @GareArc
+/api/services/enterprise/ @GarfieldDai @GareArc
+/api/services/feature_service.py @GarfieldDai @GareArc
+/api/controllers/console/feature.py @GarfieldDai @GareArc
+/api/controllers/web/feature.py @GarfieldDai @GareArc
+
+# Backend - Database Migrations
+/api/migrations/ @snakevash @laipz8200 @MRZHUH
+
+# Backend - Vector DB Middleware
+/api/configs/middleware/vdb/* @JohnJyong
+
+# Frontend
+/web/ @iamjoel
+
+# Frontend - Web Tests
+/.github/workflows/web-tests.yml @iamjoel
+
+# Frontend - App - Orchestration
+/web/app/components/workflow/ @iamjoel @zxhlyh
+/web/app/components/workflow-app/ @iamjoel @zxhlyh
+/web/app/components/app/configuration/ @iamjoel @zxhlyh
+/web/app/components/app/app-publisher/ @iamjoel @zxhlyh
+
+# Frontend - WebApp - Chat
+/web/app/components/base/chat/ @iamjoel @zxhlyh
+
+# Frontend - WebApp - Completion
+/web/app/components/share/text-generation/ @iamjoel @zxhlyh
+
+# Frontend - App - List and Creation
+/web/app/components/apps/ @JzoNgKVO @iamjoel
+/web/app/components/app/create-app-dialog/ @JzoNgKVO @iamjoel
+/web/app/components/app/create-app-modal/ @JzoNgKVO @iamjoel
+/web/app/components/app/create-from-dsl-modal/ @JzoNgKVO @iamjoel
+
+# Frontend - App - API Documentation
+/web/app/components/develop/ @JzoNgKVO @iamjoel
+
+# Frontend - App - Logs and Annotations
+/web/app/components/app/workflow-log/ @JzoNgKVO @iamjoel
+/web/app/components/app/log/ @JzoNgKVO @iamjoel
+/web/app/components/app/log-annotation/ @JzoNgKVO @iamjoel
+/web/app/components/app/annotation/ @JzoNgKVO @iamjoel
+
+# Frontend - App - Monitoring
+/web/app/(commonLayout)/app/(appDetailLayout)/\[appId\]/overview/ @JzoNgKVO @iamjoel
+/web/app/components/app/overview/ @JzoNgKVO @iamjoel
+
+# Frontend - App - Settings
+/web/app/components/app-sidebar/ @JzoNgKVO @iamjoel
+
+# Frontend - RAG - Hit Testing
+/web/app/components/datasets/hit-testing/ @JzoNgKVO @iamjoel
+
+# Frontend - RAG - List and Creation
+/web/app/components/datasets/list/ @iamjoel @WTW0313
+/web/app/components/datasets/create/ @iamjoel @WTW0313
+/web/app/components/datasets/create-from-pipeline/ @iamjoel @WTW0313
+/web/app/components/datasets/external-knowledge-base/ @iamjoel @WTW0313
+
+# Frontend - RAG - Orchestration (general rule first, specific rules below override)
+/web/app/components/rag-pipeline/ @iamjoel @WTW0313
+/web/app/components/rag-pipeline/components/rag-pipeline-main.tsx @iamjoel @zxhlyh
+/web/app/components/rag-pipeline/store/ @iamjoel @zxhlyh
+
+# Frontend - RAG - Documents List
+/web/app/components/datasets/documents/list.tsx @iamjoel @WTW0313
+/web/app/components/datasets/documents/create-from-pipeline/ @iamjoel @WTW0313
+
+# Frontend - RAG - Segments List
+/web/app/components/datasets/documents/detail/ @iamjoel @WTW0313
+
+# Frontend - RAG - Settings
+/web/app/components/datasets/settings/ @iamjoel @WTW0313
+
+# Frontend - Ecosystem - Plugins
+/web/app/components/plugins/ @iamjoel @zhsama
+
+# Frontend - Ecosystem - Tools
+/web/app/components/tools/ @iamjoel @Yessenia-d
+
+# Frontend - Ecosystem - MarketPlace
+/web/app/components/plugins/marketplace/ @iamjoel @Yessenia-d
+
+# Frontend - Login and Registration
+/web/app/signin/ @douxc @iamjoel
+/web/app/signup/ @douxc @iamjoel
+/web/app/reset-password/ @douxc @iamjoel
+/web/app/install/ @douxc @iamjoel
+/web/app/init/ @douxc @iamjoel
+/web/app/forgot-password/ @douxc @iamjoel
+/web/app/account/ @douxc @iamjoel
+
+# Frontend - Service Authentication
+/web/service/base.ts @douxc @iamjoel
+
+# Frontend - WebApp Authentication and Access Control
+/web/app/(shareLayout)/components/ @douxc @iamjoel
+/web/app/(shareLayout)/webapp-signin/ @douxc @iamjoel
+/web/app/(shareLayout)/webapp-reset-password/ @douxc @iamjoel
+/web/app/components/app/app-access-control/ @douxc @iamjoel
+
+# Frontend - Explore Page
+/web/app/components/explore/ @CodingOnStar @iamjoel
+
+# Frontend - Personal Settings
+/web/app/components/header/account-setting/ @CodingOnStar @iamjoel
+/web/app/components/header/account-dropdown/ @CodingOnStar @iamjoel
+
+# Frontend - Analytics
+/web/app/components/base/ga/ @CodingOnStar @iamjoel
+
+# Frontend - Base Components
+/web/app/components/base/ @iamjoel @zxhlyh
+
+# Frontend - Utils and Hooks
+/web/utils/classnames.ts @iamjoel @zxhlyh
+/web/utils/time.ts @iamjoel @zxhlyh
+/web/utils/format.ts @iamjoel @zxhlyh
+/web/utils/clipboard.ts @iamjoel @zxhlyh
+/web/hooks/use-document-title.ts @iamjoel @zxhlyh
+
+# Frontend - Billing and Education
+/web/app/components/billing/ @iamjoel @zxhlyh
+/web/app/education-apply/ @iamjoel @zxhlyh
+
+# Frontend - Workspace
+/web/app/components/header/account-dropdown/workplace-selector/ @iamjoel @zxhlyh
+
+# Docker
+/docker/* @laipz8200
diff --git a/.github/ISSUE_TEMPLATE/refactor.yml b/.github/ISSUE_TEMPLATE/refactor.yml
index cf74dcc546..dbe8cbb602 100644
--- a/.github/ISSUE_TEMPLATE/refactor.yml
+++ b/.github/ISSUE_TEMPLATE/refactor.yml
@@ -1,8 +1,6 @@
-name: "✨ Refactor"
-description: Refactor existing code for improved readability and maintainability.
-title: "[Chore/Refactor] "
-labels:
- - refactor
+name: "✨ Refactor or Chore"
+description: Refactor existing code or perform maintenance chores to improve readability and reliability.
+title: "[Refactor/Chore] "
body:
- type: checkboxes
attributes:
@@ -11,7 +9,7 @@ body:
options:
- label: I have read the [Contributing Guide](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md) and [Language Policy](https://github.com/langgenius/dify/issues/1542).
required: true
- - label: This is only for refactoring, if you would like to ask a question, please head to [Discussions](https://github.com/langgenius/dify/discussions/categories/general).
+ - label: This is only for refactors or chores; if you would like to ask a question, please head to [Discussions](https://github.com/langgenius/dify/discussions/categories/general).
required: true
- label: I have searched for existing issues [search for existing issues](https://github.com/langgenius/dify/issues), including closed ones.
required: true
@@ -25,14 +23,14 @@ body:
id: description
attributes:
label: Description
- placeholder: "Describe the refactor you are proposing."
+ placeholder: "Describe the refactor or chore you are proposing."
validations:
required: true
- type: textarea
id: motivation
attributes:
label: Motivation
- placeholder: "Explain why this refactor is necessary."
+ placeholder: "Explain why this refactor or chore is necessary."
validations:
required: false
- type: textarea
diff --git a/.github/ISSUE_TEMPLATE/tracker.yml b/.github/ISSUE_TEMPLATE/tracker.yml
deleted file mode 100644
index 35fedefc75..0000000000
--- a/.github/ISSUE_TEMPLATE/tracker.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-name: "👾 Tracker"
-description: For inner usages, please do not use this template.
-title: "[Tracker] "
-labels:
- - tracker
-body:
- - type: textarea
- id: content
- attributes:
- label: Blockers
- placeholder: "- [ ] ..."
- validations:
- required: true
diff --git a/.github/workflows/api-tests.yml b/.github/workflows/api-tests.yml
index 37d351627b..152a9caee8 100644
--- a/.github/workflows/api-tests.yml
+++ b/.github/workflows/api-tests.yml
@@ -22,12 +22,12 @@ jobs:
steps:
- name: Checkout code
- uses: actions/checkout@v4
+ uses: actions/checkout@v6
with:
persist-credentials: false
- name: Setup UV and Python
- uses: astral-sh/setup-uv@v6
+ uses: astral-sh/setup-uv@v7
with:
enable-cache: true
python-version: ${{ matrix.python-version }}
@@ -57,12 +57,12 @@ jobs:
run: sh .github/workflows/expose_service_ports.sh
- name: Set up Sandbox
- uses: hoverkraft-tech/compose-action@v2.0.2
+ uses: hoverkraft-tech/compose-action@v2
with:
compose-file: |
docker/docker-compose.middleware.yaml
services: |
- db
+ db_postgres
redis
sandbox
ssrf_proxy
@@ -71,18 +71,18 @@ jobs:
run: |
cp api/tests/integration_tests/.env.example api/tests/integration_tests/.env
- - name: Run Workflow
- run: uv run --project api bash dev/pytest/pytest_workflow.sh
-
- - name: Run Tool
- run: uv run --project api bash dev/pytest/pytest_tools.sh
-
- - name: Run TestContainers
- run: uv run --project api bash dev/pytest/pytest_testcontainers.sh
-
- - name: Run Unit tests
+ - name: Run API Tests
+ env:
+ STORAGE_TYPE: opendal
+ OPENDAL_SCHEME: fs
+ OPENDAL_FS_ROOT: /tmp/dify-storage
run: |
- uv run --project api bash dev/pytest/pytest_unit_tests.sh
+ uv run --project api pytest \
+ --timeout "${PYTEST_TIMEOUT:-180}" \
+ api/tests/integration_tests/workflow \
+ api/tests/integration_tests/tools \
+ api/tests/test_containers_integration_tests \
+ api/tests/unit_tests
- name: Coverage Summary
run: |
@@ -93,5 +93,12 @@ jobs:
# Create a detailed coverage summary
echo "### Test Coverage Summary :test_tube:" >> $GITHUB_STEP_SUMMARY
echo "Total Coverage: ${TOTAL_COVERAGE}%" >> $GITHUB_STEP_SUMMARY
- uv run --project api coverage report --format=markdown >> $GITHUB_STEP_SUMMARY
-
+ {
+ echo ""
+ echo "File-level coverage (click to expand)
"
+ echo ""
+ echo '```'
+ uv run --project api coverage report -m
+ echo '```'
+ echo " "
+ } >> $GITHUB_STEP_SUMMARY
diff --git a/.github/workflows/autofix.yml b/.github/workflows/autofix.yml
index 81392a9734..5413f83c27 100644
--- a/.github/workflows/autofix.yml
+++ b/.github/workflows/autofix.yml
@@ -12,12 +12,29 @@ jobs:
if: github.repository == 'langgenius/dify'
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v6
- # Use uv to ensure we have the same ruff version in CI and locally.
- - uses: astral-sh/setup-uv@v6
+ - name: Check Docker Compose inputs
+ id: docker-compose-changes
+ uses: tj-actions/changed-files@v46
+ with:
+ files: |
+ docker/generate_docker_compose
+ docker/.env.example
+ docker/docker-compose-template.yaml
+ docker/docker-compose.yaml
+ - uses: actions/setup-python@v5
with:
python-version: "3.11"
+
+ - uses: astral-sh/setup-uv@v7
+
+ - name: Generate Docker Compose
+ if: steps.docker-compose-changes.outputs.any_changed == 'true'
+ run: |
+ cd docker
+ ./generate_docker_compose
+
- run: |
cd api
uv sync --dev
@@ -35,10 +52,11 @@ jobs:
- name: ast-grep
run: |
- uvx --from ast-grep-cli sg --pattern 'db.session.query($WHATEVER).filter($HERE)' --rewrite 'db.session.query($WHATEVER).where($HERE)' -l py --update-all
- uvx --from ast-grep-cli sg --pattern 'session.query($WHATEVER).filter($HERE)' --rewrite 'session.query($WHATEVER).where($HERE)' -l py --update-all
- uvx --from ast-grep-cli sg -p '$A = db.Column($$$B)' -r '$A = mapped_column($$$B)' -l py --update-all
- uvx --from ast-grep-cli sg -p '$A : $T = db.Column($$$B)' -r '$A : $T = mapped_column($$$B)' -l py --update-all
+ # ast-grep exits 1 if no matches are found; allow idempotent runs.
+ uvx --from ast-grep-cli ast-grep --pattern 'db.session.query($WHATEVER).filter($HERE)' --rewrite 'db.session.query($WHATEVER).where($HERE)' -l py --update-all || true
+ uvx --from ast-grep-cli ast-grep --pattern 'session.query($WHATEVER).filter($HERE)' --rewrite 'session.query($WHATEVER).where($HERE)' -l py --update-all || true
+ uvx --from ast-grep-cli ast-grep -p '$A = db.Column($$$B)' -r '$A = mapped_column($$$B)' -l py --update-all || true
+ uvx --from ast-grep-cli ast-grep -p '$A : $T = db.Column($$$B)' -r '$A : $T = mapped_column($$$B)' -l py --update-all || true
# Convert Optional[T] to T | None (ignoring quoted types)
cat > /tmp/optional-rule.yml << 'EOF'
id: convert-optional-to-union
@@ -56,35 +74,14 @@ jobs:
pattern: $T
fix: $T | None
EOF
- uvx --from ast-grep-cli sg scan --inline-rules "$(cat /tmp/optional-rule.yml)" --update-all
+ uvx --from ast-grep-cli ast-grep scan . --inline-rules "$(cat /tmp/optional-rule.yml)" --update-all
# Fix forward references that were incorrectly converted (Python doesn't support "Type" | None syntax)
find . -name "*.py" -type f -exec sed -i.bak -E 's/"([^"]+)" \| None/Optional["\1"]/g; s/'"'"'([^'"'"']+)'"'"' \| None/Optional['"'"'\1'"'"']/g' {} \;
find . -name "*.py.bak" -type f -delete
+ # mdformat breaks YAML front matter in markdown files. Add --exclude for directories containing YAML front matter.
- name: mdformat
run: |
- uvx mdformat .
-
- - name: Install pnpm
- uses: pnpm/action-setup@v4
- with:
- package_json_file: web/package.json
- run_install: false
-
- - name: Setup NodeJS
- uses: actions/setup-node@v4
- with:
- node-version: 22
- cache: pnpm
- cache-dependency-path: ./web/package.json
-
- - name: Web dependencies
- working-directory: ./web
- run: pnpm install --frozen-lockfile
-
- - name: oxlint
- working-directory: ./web
- run: |
- pnpx oxlint --fix
+ uvx --python 3.13 mdformat . --exclude ".claude/skills/**/SKILL.md"
- uses: autofix-ci/action@635ffb0c9798bd160680f18fd73371e355b85f27
diff --git a/.github/workflows/build-push.yml b/.github/workflows/build-push.yml
index f7f464a601..bbf89236de 100644
--- a/.github/workflows/build-push.yml
+++ b/.github/workflows/build-push.yml
@@ -90,7 +90,7 @@ jobs:
touch "/tmp/digests/${sanitized_digest}"
- name: Upload digest
- uses: actions/upload-artifact@v4
+ uses: actions/upload-artifact@v6
with:
name: digests-${{ matrix.context }}-${{ env.PLATFORM_PAIR }}
path: /tmp/digests/*
diff --git a/.github/workflows/db-migration-test.yml b/.github/workflows/db-migration-test.yml
index b9961a4714..e20cf9850b 100644
--- a/.github/workflows/db-migration-test.yml
+++ b/.github/workflows/db-migration-test.yml
@@ -8,18 +8,18 @@ concurrency:
cancel-in-progress: true
jobs:
- db-migration-test:
+ db-migration-test-postgres:
runs-on: ubuntu-latest
steps:
- name: Checkout code
- uses: actions/checkout@v4
+ uses: actions/checkout@v6
with:
fetch-depth: 0
persist-credentials: false
- name: Setup UV and Python
- uses: astral-sh/setup-uv@v6
+ uses: astral-sh/setup-uv@v7
with:
enable-cache: true
python-version: "3.12"
@@ -45,7 +45,7 @@ jobs:
compose-file: |
docker/docker-compose.middleware.yaml
services: |
- db
+ db_postgres
redis
- name: Prepare configs
@@ -57,3 +57,60 @@ jobs:
env:
DEBUG: true
run: uv run --directory api flask upgrade-db
+
+ db-migration-test-mysql:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v6
+ with:
+ fetch-depth: 0
+ persist-credentials: false
+
+ - name: Setup UV and Python
+ uses: astral-sh/setup-uv@v7
+ with:
+ enable-cache: true
+ python-version: "3.12"
+ cache-dependency-glob: api/uv.lock
+
+ - name: Install dependencies
+ run: uv sync --project api
+ - name: Ensure Offline migration are supported
+ run: |
+ # upgrade
+ uv run --directory api flask db upgrade 'base:head' --sql
+ # downgrade
+ uv run --directory api flask db downgrade 'head:base' --sql
+
+ - name: Prepare middleware env for MySQL
+ run: |
+ cd docker
+ cp middleware.env.example middleware.env
+ sed -i 's/DB_TYPE=postgresql/DB_TYPE=mysql/' middleware.env
+ sed -i 's/DB_HOST=db_postgres/DB_HOST=db_mysql/' middleware.env
+ sed -i 's/DB_PORT=5432/DB_PORT=3306/' middleware.env
+ sed -i 's/DB_USERNAME=postgres/DB_USERNAME=mysql/' middleware.env
+
+ - name: Set up Middlewares
+ uses: hoverkraft-tech/compose-action@v2.0.2
+ with:
+ compose-file: |
+ docker/docker-compose.middleware.yaml
+ services: |
+ db_mysql
+ redis
+
+ - name: Prepare configs for MySQL
+ run: |
+ cd api
+ cp .env.example .env
+ sed -i 's/DB_TYPE=postgresql/DB_TYPE=mysql/' .env
+ sed -i 's/DB_PORT=5432/DB_PORT=3306/' .env
+ sed -i 's/DB_USERNAME=postgres/DB_USERNAME=root/' .env
+
+ - name: Run DB Migration
+ env:
+ DEBUG: true
+ run: uv run --directory api flask upgrade-db
diff --git a/.github/workflows/main-ci.yml b/.github/workflows/main-ci.yml
index 876ec23a3d..d6653de950 100644
--- a/.github/workflows/main-ci.yml
+++ b/.github/workflows/main-ci.yml
@@ -27,7 +27,7 @@ jobs:
vdb-changed: ${{ steps.changes.outputs.vdb }}
migration-changed: ${{ steps.changes.outputs.migration }}
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v6
- uses: dorny/paths-filter@v3
id: changes
with:
@@ -38,6 +38,7 @@ jobs:
- '.github/workflows/api-tests.yml'
web:
- 'web/**'
+ - '.github/workflows/web-tests.yml'
vdb:
- 'api/core/rag/datasource/**'
- 'docker/**'
diff --git a/.github/workflows/semantic-pull-request.yml b/.github/workflows/semantic-pull-request.yml
new file mode 100644
index 0000000000..b15c26a096
--- /dev/null
+++ b/.github/workflows/semantic-pull-request.yml
@@ -0,0 +1,21 @@
+name: Semantic Pull Request
+
+on:
+ pull_request:
+ types:
+ - opened
+ - edited
+ - reopened
+ - synchronize
+
+jobs:
+ lint:
+ name: Validate PR title
+ permissions:
+ pull-requests: read
+ runs-on: ubuntu-latest
+ steps:
+ - name: Check title
+ uses: amannn/action-semantic-pull-request@v6.1.1
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml
index e652657705..462ece303e 100644
--- a/.github/workflows/style.yml
+++ b/.github/workflows/style.yml
@@ -19,13 +19,13 @@ jobs:
steps:
- name: Checkout code
- uses: actions/checkout@v4
+ uses: actions/checkout@v6
with:
persist-credentials: false
- name: Check changed files
id: changed-files
- uses: tj-actions/changed-files@v46
+ uses: tj-actions/changed-files@v47
with:
files: |
api/**
@@ -33,7 +33,7 @@ jobs:
- name: Setup UV and Python
if: steps.changed-files.outputs.any_changed == 'true'
- uses: astral-sh/setup-uv@v6
+ uses: astral-sh/setup-uv@v7
with:
enable-cache: false
python-version: "3.12"
@@ -68,15 +68,17 @@ jobs:
steps:
- name: Checkout code
- uses: actions/checkout@v4
+ uses: actions/checkout@v6
with:
persist-credentials: false
- name: Check changed files
id: changed-files
- uses: tj-actions/changed-files@v46
+ uses: tj-actions/changed-files@v47
with:
- files: web/**
+ files: |
+ web/**
+ .github/workflows/style.yml
- name: Install pnpm
uses: pnpm/action-setup@v4
@@ -85,12 +87,12 @@ jobs:
run_install: false
- name: Setup NodeJS
- uses: actions/setup-node@v4
+ uses: actions/setup-node@v6
if: steps.changed-files.outputs.any_changed == 'true'
with:
node-version: 22
cache: pnpm
- cache-dependency-path: ./web/package.json
+ cache-dependency-path: ./web/pnpm-lock.yaml
- name: Web dependencies
if: steps.changed-files.outputs.any_changed == 'true'
@@ -106,37 +108,17 @@ jobs:
- name: Web type check
if: steps.changed-files.outputs.any_changed == 'true'
working-directory: ./web
- run: pnpm run type-check
+ run: pnpm run type-check:tsgo
- docker-compose-template:
- name: Docker Compose Template
- runs-on: ubuntu-latest
-
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- persist-credentials: false
-
- - name: Check changed files
- id: changed-files
- uses: tj-actions/changed-files@v46
- with:
- files: |
- docker/generate_docker_compose
- docker/.env.example
- docker/docker-compose-template.yaml
- docker/docker-compose.yaml
-
- - name: Generate Docker Compose
+ - name: Web dead code check
if: steps.changed-files.outputs.any_changed == 'true'
- run: |
- cd docker
- ./generate_docker_compose
+ working-directory: ./web
+ run: pnpm run knip
- - name: Check for changes
+ - name: Web build check
if: steps.changed-files.outputs.any_changed == 'true'
- run: git diff --exit-code
+ working-directory: ./web
+ run: pnpm run build
superlinter:
name: SuperLinter
@@ -144,14 +126,14 @@ jobs:
steps:
- name: Checkout code
- uses: actions/checkout@v4
+ uses: actions/checkout@v6
with:
fetch-depth: 0
persist-credentials: false
- name: Check changed files
id: changed-files
- uses: tj-actions/changed-files@v46
+ uses: tj-actions/changed-files@v47
with:
files: |
**.sh
diff --git a/.github/workflows/tool-test-sdks.yaml b/.github/workflows/tool-test-sdks.yaml
index b1ccd7417a..0259ef2232 100644
--- a/.github/workflows/tool-test-sdks.yaml
+++ b/.github/workflows/tool-test-sdks.yaml
@@ -25,12 +25,12 @@ jobs:
working-directory: sdks/nodejs-client
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v6
with:
persist-credentials: false
- name: Use Node.js ${{ matrix.node-version }}
- uses: actions/setup-node@v4
+ uses: actions/setup-node@v6
with:
node-version: ${{ matrix.node-version }}
cache: ''
diff --git a/.github/workflows/translate-i18n-base-on-english.yml b/.github/workflows/translate-i18n-base-on-english.yml
index 836c3e0b02..16d36361fd 100644
--- a/.github/workflows/translate-i18n-base-on-english.yml
+++ b/.github/workflows/translate-i18n-base-on-english.yml
@@ -1,10 +1,11 @@
-name: Check i18n Files and Create PR
+name: Translate i18n Files Based on English
on:
push:
branches: [main]
paths:
- - 'web/i18n/en-US/*.ts'
+ - 'web/i18n/en-US/*.json'
+ workflow_dispatch:
permissions:
contents: write
@@ -18,29 +19,37 @@ jobs:
run:
working-directory: web
steps:
+ # Keep use old checkout action version for https://github.com/peter-evans/create-pull-request/issues/4272
- uses: actions/checkout@v4
with:
- fetch-depth: 2
+ fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
- name: Check for file changes in i18n/en-US
id: check_files
run: |
- recent_commit_sha=$(git rev-parse HEAD)
- second_recent_commit_sha=$(git rev-parse HEAD~1)
- changed_files=$(git diff --name-only $recent_commit_sha $second_recent_commit_sha -- 'i18n/en-US/*.ts')
- echo "Changed files: $changed_files"
- if [ -n "$changed_files" ]; then
+ # Skip check for manual trigger, translate all files
+ if [ "${{ github.event_name }}" == "workflow_dispatch" ]; then
echo "FILES_CHANGED=true" >> $GITHUB_ENV
- file_args=""
- for file in $changed_files; do
- filename=$(basename "$file" .ts)
- file_args="$file_args --file=$filename"
- done
- echo "FILE_ARGS=$file_args" >> $GITHUB_ENV
- echo "File arguments: $file_args"
+ echo "FILE_ARGS=" >> $GITHUB_ENV
+ echo "Manual trigger: translating all files"
else
- echo "FILES_CHANGED=false" >> $GITHUB_ENV
+ git fetch origin "${{ github.event.before }}" || true
+ git fetch origin "${{ github.sha }}" || true
+ changed_files=$(git diff --name-only "${{ github.event.before }}" "${{ github.sha }}" -- 'i18n/en-US/*.json')
+ echo "Changed files: $changed_files"
+ if [ -n "$changed_files" ]; then
+ echo "FILES_CHANGED=true" >> $GITHUB_ENV
+ file_args=""
+ for file in $changed_files; do
+ filename=$(basename "$file" .json)
+ file_args="$file_args --file $filename"
+ done
+ echo "FILE_ARGS=$file_args" >> $GITHUB_ENV
+ echo "File arguments: $file_args"
+ else
+ echo "FILES_CHANGED=false" >> $GITHUB_ENV
+ fi
fi
- name: Install pnpm
@@ -51,11 +60,11 @@ jobs:
- name: Set up Node.js
if: env.FILES_CHANGED == 'true'
- uses: actions/setup-node@v4
+ uses: actions/setup-node@v6
with:
node-version: 'lts/*'
cache: pnpm
- cache-dependency-path: ./web/package.json
+ cache-dependency-path: ./web/pnpm-lock.yaml
- name: Install dependencies
if: env.FILES_CHANGED == 'true'
@@ -65,24 +74,21 @@ jobs:
- name: Generate i18n translations
if: env.FILES_CHANGED == 'true'
working-directory: ./web
- run: pnpm run auto-gen-i18n ${{ env.FILE_ARGS }}
-
- - name: Generate i18n type definitions
- if: env.FILES_CHANGED == 'true'
- working-directory: ./web
- run: pnpm run gen:i18n-types
+ run: pnpm run i18n:gen ${{ env.FILE_ARGS }}
- name: Create Pull Request
if: env.FILES_CHANGED == 'true'
uses: peter-evans/create-pull-request@v6
with:
token: ${{ secrets.GITHUB_TOKEN }}
- commit-message: Update i18n files and type definitions based on en-US changes
- title: 'chore: translate i18n files and update type definitions'
+ commit-message: 'chore(i18n): update translations based on en-US changes'
+ title: 'chore(i18n): translate i18n files based on en-US changes'
body: |
- This PR was automatically created to update i18n files and TypeScript type definitions based on changes in en-US locale.
-
+ This PR was automatically created to update i18n translation files based on changes in en-US locale.
+
+ **Triggered by:** ${{ github.sha }}
+
**Changes included:**
- Updated translation files for all locales
- - Regenerated TypeScript type definitions for type safety
- branch: chore/automated-i18n-updates
+ branch: chore/automated-i18n-updates-${{ github.sha }}
+ delete-branch: true
diff --git a/.github/workflows/vdb-tests.yml b/.github/workflows/vdb-tests.yml
index e33fbb209e..7735afdaca 100644
--- a/.github/workflows/vdb-tests.yml
+++ b/.github/workflows/vdb-tests.yml
@@ -1,10 +1,7 @@
name: Run VDB Tests
on:
- push:
- branches: [main]
- paths:
- - 'api/core/rag/*.py'
+ workflow_call:
concurrency:
group: vdb-tests-${{ github.head_ref || github.run_id }}
@@ -22,19 +19,19 @@ jobs:
steps:
- name: Checkout code
- uses: actions/checkout@v4
+ uses: actions/checkout@v6
with:
persist-credentials: false
- name: Free Disk Space
- uses: endersonmenezes/free-disk-space@v2
+ uses: endersonmenezes/free-disk-space@v3
with:
remove_dotnet: true
remove_haskell: true
remove_tool_cache: true
- name: Setup UV and Python
- uses: astral-sh/setup-uv@v6
+ uses: astral-sh/setup-uv@v7
with:
enable-cache: true
python-version: ${{ matrix.python-version }}
@@ -54,13 +51,13 @@ jobs:
- name: Expose Service Ports
run: sh .github/workflows/expose_service_ports.sh
- - name: Set up Vector Store (TiDB)
- uses: hoverkraft-tech/compose-action@v2.0.2
- with:
- compose-file: docker/tidb/docker-compose.yaml
- services: |
- tidb
- tiflash
+# - name: Set up Vector Store (TiDB)
+# uses: hoverkraft-tech/compose-action@v2.0.2
+# with:
+# compose-file: docker/tidb/docker-compose.yaml
+# services: |
+# tidb
+# tiflash
- name: Set up Vector Stores (Weaviate, Qdrant, PGVector, Milvus, PgVecto-RS, Chroma, MyScale, ElasticSearch, Couchbase, OceanBase)
uses: hoverkraft-tech/compose-action@v2.0.2
@@ -86,8 +83,8 @@ jobs:
ls -lah .
cp api/tests/integration_tests/.env.example api/tests/integration_tests/.env
- - name: Check VDB Ready (TiDB)
- run: uv run --project api python api/tests/integration_tests/vdb/tidb_vector/check_tiflash_ready.py
+# - name: Check VDB Ready (TiDB)
+# run: uv run --project api python api/tests/integration_tests/vdb/tidb_vector/check_tiflash_ready.py
- name: Test Vector Stores
run: uv run --project api bash dev/pytest/pytest_vdb.sh
diff --git a/.github/workflows/web-tests.yml b/.github/workflows/web-tests.yml
index 3313e58614..0fd1d5d22b 100644
--- a/.github/workflows/web-tests.yml
+++ b/.github/workflows/web-tests.yml
@@ -13,46 +13,356 @@ jobs:
runs-on: ubuntu-latest
defaults:
run:
+ shell: bash
working-directory: ./web
steps:
- name: Checkout code
- uses: actions/checkout@v4
+ uses: actions/checkout@v6
with:
persist-credentials: false
- - name: Check changed files
- id: changed-files
- uses: tj-actions/changed-files@v46
- with:
- files: web/**
-
- name: Install pnpm
- if: steps.changed-files.outputs.any_changed == 'true'
uses: pnpm/action-setup@v4
with:
package_json_file: web/package.json
run_install: false
- name: Setup Node.js
- uses: actions/setup-node@v4
- if: steps.changed-files.outputs.any_changed == 'true'
+ uses: actions/setup-node@v6
with:
node-version: 22
cache: pnpm
- cache-dependency-path: ./web/package.json
+ cache-dependency-path: ./web/pnpm-lock.yaml
- name: Install dependencies
- if: steps.changed-files.outputs.any_changed == 'true'
- working-directory: ./web
run: pnpm install --frozen-lockfile
- - name: Check i18n types synchronization
- if: steps.changed-files.outputs.any_changed == 'true'
- working-directory: ./web
- run: pnpm run check:i18n-types
-
- name: Run tests
- if: steps.changed-files.outputs.any_changed == 'true'
- working-directory: ./web
- run: pnpm test
+ run: pnpm test:coverage
+
+ - name: Coverage Summary
+ if: always()
+ id: coverage-summary
+ run: |
+ set -eo pipefail
+
+ COVERAGE_FILE="coverage/coverage-final.json"
+ COVERAGE_SUMMARY_FILE="coverage/coverage-summary.json"
+
+ if [ ! -f "$COVERAGE_FILE" ] && [ ! -f "$COVERAGE_SUMMARY_FILE" ]; then
+ echo "has_coverage=false" >> "$GITHUB_OUTPUT"
+ echo "### 🚨 Test Coverage Report :test_tube:" >> "$GITHUB_STEP_SUMMARY"
+ echo "Coverage data not found. Ensure Vitest runs with coverage enabled." >> "$GITHUB_STEP_SUMMARY"
+ exit 0
+ fi
+
+ echo "has_coverage=true" >> "$GITHUB_OUTPUT"
+
+ node <<'NODE' >> "$GITHUB_STEP_SUMMARY"
+ const fs = require('fs');
+ const path = require('path');
+ let libCoverage = null;
+
+ try {
+ libCoverage = require('istanbul-lib-coverage');
+ } catch (error) {
+ libCoverage = null;
+ }
+
+ const summaryPath = path.join('coverage', 'coverage-summary.json');
+ const finalPath = path.join('coverage', 'coverage-final.json');
+
+ const hasSummary = fs.existsSync(summaryPath);
+ const hasFinal = fs.existsSync(finalPath);
+
+ if (!hasSummary && !hasFinal) {
+ console.log('### Test Coverage Summary :test_tube:');
+ console.log('');
+ console.log('No coverage data found.');
+ process.exit(0);
+ }
+
+ const summary = hasSummary
+ ? JSON.parse(fs.readFileSync(summaryPath, 'utf8'))
+ : null;
+ const coverage = hasFinal
+ ? JSON.parse(fs.readFileSync(finalPath, 'utf8'))
+ : null;
+
+ const getLineCoverageFromStatements = (statementMap, statementHits) => {
+ const lineHits = {};
+
+ if (!statementMap || !statementHits) {
+ return lineHits;
+ }
+
+ Object.entries(statementMap).forEach(([key, statement]) => {
+ const line = statement?.start?.line;
+ if (!line) {
+ return;
+ }
+ const hits = statementHits[key] ?? 0;
+ const previous = lineHits[line];
+ lineHits[line] = previous === undefined ? hits : Math.max(previous, hits);
+ });
+
+ return lineHits;
+ };
+
+ const getFileCoverage = (entry) => (
+ libCoverage ? libCoverage.createFileCoverage(entry) : null
+ );
+
+ const getLineHits = (entry, fileCoverage) => {
+ const lineHits = entry.l ?? {};
+ if (Object.keys(lineHits).length > 0) {
+ return lineHits;
+ }
+ if (fileCoverage) {
+ return fileCoverage.getLineCoverage();
+ }
+ return getLineCoverageFromStatements(entry.statementMap ?? {}, entry.s ?? {});
+ };
+
+ const getUncoveredLines = (entry, fileCoverage, lineHits) => {
+ if (lineHits && Object.keys(lineHits).length > 0) {
+ return Object.entries(lineHits)
+ .filter(([, count]) => count === 0)
+ .map(([line]) => Number(line))
+ .sort((a, b) => a - b);
+ }
+ if (fileCoverage) {
+ return fileCoverage.getUncoveredLines();
+ }
+ return [];
+ };
+
+ const totals = {
+ lines: { covered: 0, total: 0 },
+ statements: { covered: 0, total: 0 },
+ branches: { covered: 0, total: 0 },
+ functions: { covered: 0, total: 0 },
+ };
+ const fileSummaries = [];
+
+ if (summary) {
+ const totalEntry = summary.total ?? {};
+ ['lines', 'statements', 'branches', 'functions'].forEach((key) => {
+ if (totalEntry[key]) {
+ totals[key].covered = totalEntry[key].covered ?? 0;
+ totals[key].total = totalEntry[key].total ?? 0;
+ }
+ });
+
+ Object.entries(summary)
+ .filter(([file]) => file !== 'total')
+ .forEach(([file, data]) => {
+ fileSummaries.push({
+ file,
+ pct: data.lines?.pct ?? data.statements?.pct ?? 0,
+ lines: {
+ covered: data.lines?.covered ?? 0,
+ total: data.lines?.total ?? 0,
+ },
+ });
+ });
+ } else if (coverage) {
+ Object.entries(coverage).forEach(([file, entry]) => {
+ const fileCoverage = getFileCoverage(entry);
+ const lineHits = getLineHits(entry, fileCoverage);
+ const statementHits = entry.s ?? {};
+ const branchHits = entry.b ?? {};
+ const functionHits = entry.f ?? {};
+
+ const lineTotal = Object.keys(lineHits).length;
+ const lineCovered = Object.values(lineHits).filter((n) => n > 0).length;
+
+ const statementTotal = Object.keys(statementHits).length;
+ const statementCovered = Object.values(statementHits).filter((n) => n > 0).length;
+
+ const branchTotal = Object.values(branchHits).reduce((acc, branches) => acc + branches.length, 0);
+ const branchCovered = Object.values(branchHits).reduce(
+ (acc, branches) => acc + branches.filter((n) => n > 0).length,
+ 0,
+ );
+
+ const functionTotal = Object.keys(functionHits).length;
+ const functionCovered = Object.values(functionHits).filter((n) => n > 0).length;
+
+ totals.lines.total += lineTotal;
+ totals.lines.covered += lineCovered;
+ totals.statements.total += statementTotal;
+ totals.statements.covered += statementCovered;
+ totals.branches.total += branchTotal;
+ totals.branches.covered += branchCovered;
+ totals.functions.total += functionTotal;
+ totals.functions.covered += functionCovered;
+
+ const pct = (covered, tot) => (tot > 0 ? (covered / tot) * 100 : 0);
+
+ fileSummaries.push({
+ file,
+ pct: pct(lineCovered || statementCovered, lineTotal || statementTotal),
+ lines: {
+ covered: lineCovered || statementCovered,
+ total: lineTotal || statementTotal,
+ },
+ });
+ });
+ }
+
+ const pct = (covered, tot) => (tot > 0 ? ((covered / tot) * 100).toFixed(2) : '0.00');
+
+ console.log('### Test Coverage Summary :test_tube:');
+ console.log('');
+ console.log('| Metric | Coverage | Covered / Total |');
+ console.log('|--------|----------|-----------------|');
+ console.log(`| Lines | ${pct(totals.lines.covered, totals.lines.total)}% | ${totals.lines.covered} / ${totals.lines.total} |`);
+ console.log(`| Statements | ${pct(totals.statements.covered, totals.statements.total)}% | ${totals.statements.covered} / ${totals.statements.total} |`);
+ console.log(`| Branches | ${pct(totals.branches.covered, totals.branches.total)}% | ${totals.branches.covered} / ${totals.branches.total} |`);
+ console.log(`| Functions | ${pct(totals.functions.covered, totals.functions.total)}% | ${totals.functions.covered} / ${totals.functions.total} |`);
+
+ console.log('');
+ console.log('File coverage (lowest lines first)
');
+ console.log('');
+ console.log('```');
+ fileSummaries
+ .sort((a, b) => (a.pct - b.pct) || (b.lines.total - a.lines.total))
+ .slice(0, 25)
+ .forEach(({ file, pct, lines }) => {
+ console.log(`${pct.toFixed(2)}%\t${lines.covered}/${lines.total}\t${file}`);
+ });
+ console.log('```');
+ console.log(' ');
+
+ if (coverage) {
+ const pctValue = (covered, tot) => {
+ if (tot === 0) {
+ return '0';
+ }
+ return ((covered / tot) * 100)
+ .toFixed(2)
+ .replace(/\.?0+$/, '');
+ };
+
+ const formatLineRanges = (lines) => {
+ if (lines.length === 0) {
+ return '';
+ }
+ const ranges = [];
+ let start = lines[0];
+ let end = lines[0];
+
+ for (let i = 1; i < lines.length; i += 1) {
+ const current = lines[i];
+ if (current === end + 1) {
+ end = current;
+ continue;
+ }
+ ranges.push(start === end ? `${start}` : `${start}-${end}`);
+ start = current;
+ end = current;
+ }
+ ranges.push(start === end ? `${start}` : `${start}-${end}`);
+ return ranges.join(',');
+ };
+
+ const tableTotals = {
+ statements: { covered: 0, total: 0 },
+ branches: { covered: 0, total: 0 },
+ functions: { covered: 0, total: 0 },
+ lines: { covered: 0, total: 0 },
+ };
+ const tableRows = Object.entries(coverage)
+ .map(([file, entry]) => {
+ const fileCoverage = getFileCoverage(entry);
+ const lineHits = getLineHits(entry, fileCoverage);
+ const statementHits = entry.s ?? {};
+ const branchHits = entry.b ?? {};
+ const functionHits = entry.f ?? {};
+
+ const lineTotal = Object.keys(lineHits).length;
+ const lineCovered = Object.values(lineHits).filter((n) => n > 0).length;
+ const statementTotal = Object.keys(statementHits).length;
+ const statementCovered = Object.values(statementHits).filter((n) => n > 0).length;
+ const branchTotal = Object.values(branchHits).reduce((acc, branches) => acc + branches.length, 0);
+ const branchCovered = Object.values(branchHits).reduce(
+ (acc, branches) => acc + branches.filter((n) => n > 0).length,
+ 0,
+ );
+ const functionTotal = Object.keys(functionHits).length;
+ const functionCovered = Object.values(functionHits).filter((n) => n > 0).length;
+
+ tableTotals.lines.total += lineTotal;
+ tableTotals.lines.covered += lineCovered;
+ tableTotals.statements.total += statementTotal;
+ tableTotals.statements.covered += statementCovered;
+ tableTotals.branches.total += branchTotal;
+ tableTotals.branches.covered += branchCovered;
+ tableTotals.functions.total += functionTotal;
+ tableTotals.functions.covered += functionCovered;
+
+ const uncoveredLines = getUncoveredLines(entry, fileCoverage, lineHits);
+
+ const filePath = entry.path ?? file;
+ const relativePath = path.isAbsolute(filePath)
+ ? path.relative(process.cwd(), filePath)
+ : filePath;
+
+ return {
+ file: relativePath || file,
+ statements: pctValue(statementCovered, statementTotal),
+ branches: pctValue(branchCovered, branchTotal),
+ functions: pctValue(functionCovered, functionTotal),
+ lines: pctValue(lineCovered, lineTotal),
+ uncovered: formatLineRanges(uncoveredLines),
+ };
+ })
+ .sort((a, b) => a.file.localeCompare(b.file));
+
+ const columns = [
+ { key: 'file', header: 'File', align: 'left' },
+ { key: 'statements', header: '% Stmts', align: 'right' },
+ { key: 'branches', header: '% Branch', align: 'right' },
+ { key: 'functions', header: '% Funcs', align: 'right' },
+ { key: 'lines', header: '% Lines', align: 'right' },
+ { key: 'uncovered', header: 'Uncovered Line #s', align: 'left' },
+ ];
+
+ const allFilesRow = {
+ file: 'All files',
+ statements: pctValue(tableTotals.statements.covered, tableTotals.statements.total),
+ branches: pctValue(tableTotals.branches.covered, tableTotals.branches.total),
+ functions: pctValue(tableTotals.functions.covered, tableTotals.functions.total),
+ lines: pctValue(tableTotals.lines.covered, tableTotals.lines.total),
+ uncovered: '',
+ };
+
+ const rowsForOutput = [allFilesRow, ...tableRows];
+ const formatRow = (row) => `| ${columns
+ .map(({ key }) => String(row[key] ?? ''))
+ .join(' | ')} |`;
+ const headerRow = `| ${columns.map(({ header }) => header).join(' | ')} |`;
+ const dividerRow = `| ${columns
+ .map(({ align }) => (align === 'right' ? '---:' : ':---'))
+ .join(' | ')} |`;
+
+ console.log('');
+ console.log('Vitest coverage table
');
+ console.log('');
+ console.log(headerRow);
+ console.log(dividerRow);
+ rowsForOutput.forEach((row) => console.log(formatRow(row)));
+ console.log(' ');
+ }
+ NODE
+
+ - name: Upload Coverage Artifact
+ if: steps.coverage-summary.outputs.has_coverage == 'true'
+ uses: actions/upload-artifact@v6
+ with:
+ name: web-coverage-report
+ path: web/coverage
+ retention-days: 30
+ if-no-files-found: error
diff --git a/.gitignore b/.gitignore
index c6067e96cd..7bd919f095 100644
--- a/.gitignore
+++ b/.gitignore
@@ -139,7 +139,6 @@ pyrightconfig.json
.idea/'
.DS_Store
-web/.vscode/settings.json
# Intellij IDEA Files
.idea/*
@@ -186,13 +185,17 @@ docker/volumes/couchbase/*
docker/volumes/oceanbase/*
docker/volumes/plugin_daemon/*
docker/volumes/matrixone/*
+docker/volumes/mysql/*
+docker/volumes/seekdb/*
!docker/volumes/oceanbase/init.d
+docker/volumes/iris/*
docker/nginx/conf.d/default.conf
docker/nginx/ssl/*
!docker/nginx/ssl/.gitkeep
docker/middleware.env
docker/docker-compose.override.yaml
+docker/env-backup/*
sdks/python-client/build
sdks/python-client/dist
@@ -202,7 +205,6 @@ sdks/python-client/dify_client.egg-info
!.vscode/launch.json.template
!.vscode/README.md
api/.vscode
-web/.vscode
# vscode Code History Extension
.history
@@ -217,15 +219,6 @@ plugins.jsonl
# mise
mise.toml
-# Next.js build output
-.next/
-
-# PWA generated files
-web/public/sw.js
-web/public/sw.js.map
-web/public/workbox-*.js
-web/public/workbox-*.js.map
-web/public/fallback-*.js
# AI Assistant
.roo/
@@ -242,3 +235,4 @@ scripts/stress-test/reports/
# settings
*.local.json
+*.local.md
diff --git a/.mcp.json b/.mcp.json
deleted file mode 100644
index 8eceaf9ead..0000000000
--- a/.mcp.json
+++ /dev/null
@@ -1,34 +0,0 @@
-{
- "mcpServers": {
- "context7": {
- "type": "http",
- "url": "https://mcp.context7.com/mcp"
- },
- "sequential-thinking": {
- "type": "stdio",
- "command": "npx",
- "args": ["-y", "@modelcontextprotocol/server-sequential-thinking"],
- "env": {}
- },
- "github": {
- "type": "stdio",
- "command": "npx",
- "args": ["-y", "@modelcontextprotocol/server-github"],
- "env": {
- "GITHUB_PERSONAL_ACCESS_TOKEN": "${GITHUB_PERSONAL_ACCESS_TOKEN}"
- }
- },
- "fetch": {
- "type": "stdio",
- "command": "uvx",
- "args": ["mcp-server-fetch"],
- "env": {}
- },
- "playwright": {
- "type": "stdio",
- "command": "npx",
- "args": ["-y", "@playwright/mcp@latest"],
- "env": {}
- }
- }
- }
\ No newline at end of file
diff --git a/.nvmrc b/.nvmrc
new file mode 100644
index 0000000000..7af24b7ddb
--- /dev/null
+++ b/.nvmrc
@@ -0,0 +1 @@
+22.11.0
diff --git a/.vscode/launch.json.template b/.vscode/launch.json.template
index bd5a787d4c..bdded1e73e 100644
--- a/.vscode/launch.json.template
+++ b/.vscode/launch.json.template
@@ -37,7 +37,7 @@
"-c",
"1",
"-Q",
- "dataset,mail,ops_trace,app_deletion,plugin,workflow_storage,conversation,priority_pipeline,pipeline",
+ "dataset,priority_dataset,priority_pipeline,pipeline,mail,ops_trace,app_deletion,plugin,workflow_storage,conversation,workflow,schedule_poller,schedule_executor,triggered_workflow_dispatcher,trigger_refresh_executor,retention",
"--loglevel",
"INFO"
],
diff --git a/AGENTS.md b/AGENTS.md
index 2ef7931efc..782861ad36 100644
--- a/AGENTS.md
+++ b/AGENTS.md
@@ -24,8 +24,8 @@ The codebase is split into:
```bash
cd web
-pnpm lint
pnpm lint:fix
+pnpm type-check:tsgo
pnpm test
```
@@ -39,7 +39,7 @@ pnpm test
## Language Style
- **Python**: Keep type hints on functions and attributes, and implement relevant special methods (e.g., `__repr__`, `__str__`).
-- **TypeScript**: Use the strict config, lean on ESLint + Prettier workflows, and avoid `any` types.
+- **TypeScript**: Use the strict config, rely on ESLint (`pnpm lint:fix` preferred) plus `pnpm type-check:tsgo`, and avoid `any` types.
## General Practices
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index fdc414b047..20a7d6c6f6 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -77,6 +77,8 @@ How we prioritize:
For setting up the frontend service, please refer to our comprehensive [guide](https://github.com/langgenius/dify/blob/main/web/README.md) in the `web/README.md` file. This document provides detailed instructions to help you set up the frontend environment properly.
+**Testing**: All React components must have comprehensive test coverage. See [web/testing/testing.md](https://github.com/langgenius/dify/blob/main/web/testing/testing.md) for the canonical frontend testing guidelines and follow every requirement described there.
+
#### Backend
For setting up the backend service, kindly refer to our detailed [instructions](https://github.com/langgenius/dify/blob/main/api/README.md) in the `api/README.md` file. This document contains step-by-step guidance to help you get the backend up and running smoothly.
diff --git a/Makefile b/Makefile
index 19c398ec82..07afd8187e 100644
--- a/Makefile
+++ b/Makefile
@@ -70,6 +70,11 @@ type-check:
@uv run --directory api --dev basedpyright
@echo "✅ Type check complete"
+test:
+ @echo "🧪 Running backend unit tests..."
+ @uv run --project api --dev dev/pytest/pytest_unit_tests.sh
+ @echo "✅ Tests complete"
+
# Build Docker images
build-web:
@echo "Building web Docker image: $(WEB_IMAGE):$(VERSION)..."
@@ -119,6 +124,7 @@ help:
@echo " make check - Check code with ruff"
@echo " make lint - Format and fix code with ruff"
@echo " make type-check - Run type checking with basedpyright"
+ @echo " make test - Run backend unit tests"
@echo ""
@echo "Docker Build Targets:"
@echo " make build-web - Build web Docker image"
@@ -128,4 +134,4 @@ help:
@echo " make build-push-all - Build and push all Docker images"
# Phony targets
-.PHONY: build-web build-api push-web push-api build-all push-all build-push-all dev-setup prepare-docker prepare-web prepare-api dev-clean help format check lint type-check
+.PHONY: build-web build-api push-web push-api build-all push-all build-push-all dev-setup prepare-docker prepare-web prepare-api dev-clean help format check lint type-check test
diff --git a/README.md b/README.md
index e5cc05fbc0..b71764a214 100644
--- a/README.md
+++ b/README.md
@@ -36,6 +36,12 @@
+
+
+
+
+
+
@@ -133,6 +139,19 @@ Star Dify on GitHub and be instantly notified of new releases.
If you need to customize the configuration, please refer to the comments in our [.env.example](docker/.env.example) file and update the corresponding values in your `.env` file. Additionally, you might need to make adjustments to the `docker-compose.yaml` file itself, such as changing image versions, port mappings, or volume mounts, based on your specific deployment environment and requirements. After making any changes, please re-run `docker-compose up -d`. You can find the full list of available environment variables [here](https://docs.dify.ai/getting-started/install-self-hosted/environments).
+#### Customizing Suggested Questions
+
+You can now customize the "Suggested Questions After Answer" feature to better fit your use case. For example, to generate longer, more technical questions:
+
+```bash
+# In your .env file
+SUGGESTED_QUESTIONS_PROMPT='Please help me predict the five most likely technical follow-up questions a developer would ask. Focus on implementation details, best practices, and architecture considerations. Keep each question between 40-60 characters. Output must be JSON array: ["question1","question2","question3","question4","question5"]'
+SUGGESTED_QUESTIONS_MAX_TOKENS=512
+SUGGESTED_QUESTIONS_TEMPERATURE=0.3
+```
+
+See the [Suggested Questions Configuration Guide](docs/suggested-questions-configuration.md) for detailed examples and usage instructions.
+
### Metrics Monitoring with Grafana
Import the dashboard to Grafana, using Dify's PostgreSQL database as data source, to monitor metrics in granularity of apps, tenants, messages, and more.
diff --git a/api/.env.example b/api/.env.example
index 5713095374..88611e016e 100644
--- a/api/.env.example
+++ b/api/.env.example
@@ -72,12 +72,15 @@ REDIS_CLUSTERS_PASSWORD=
# celery configuration
CELERY_BROKER_URL=redis://:difyai123456@localhost:${REDIS_PORT}/1
CELERY_BACKEND=redis
-# PostgreSQL database configuration
+
+# Database configuration
+DB_TYPE=postgresql
DB_USERNAME=postgres
DB_PASSWORD=difyai123456
DB_HOST=localhost
DB_PORT=5432
DB_DATABASE=dify
+
SQLALCHEMY_POOL_PRE_PING=true
SQLALCHEMY_POOL_TIMEOUT=30
@@ -98,6 +101,15 @@ S3_ACCESS_KEY=your-access-key
S3_SECRET_KEY=your-secret-key
S3_REGION=your-region
+# Workflow run and Conversation archive storage (S3-compatible)
+ARCHIVE_STORAGE_ENABLED=false
+ARCHIVE_STORAGE_ENDPOINT=
+ARCHIVE_STORAGE_ARCHIVE_BUCKET=
+ARCHIVE_STORAGE_EXPORT_BUCKET=
+ARCHIVE_STORAGE_ACCESS_KEY=
+ARCHIVE_STORAGE_SECRET_KEY=
+ARCHIVE_STORAGE_REGION=auto
+
# Azure Blob Storage configuration
AZURE_BLOB_ACCOUNT_NAME=your-account-name
AZURE_BLOB_ACCOUNT_KEY=your-account-key
@@ -113,6 +125,7 @@ ALIYUN_OSS_AUTH_VERSION=v1
ALIYUN_OSS_REGION=your-region
# Don't start with '/'. OSS doesn't support leading slash in object names.
ALIYUN_OSS_PATH=your-path
+ALIYUN_CLOUDBOX_ID=your-cloudbox-id
# Google Storage configuration
GOOGLE_STORAGE_BUCKET_NAME=your-bucket-name
@@ -124,12 +137,14 @@ TENCENT_COS_SECRET_KEY=your-secret-key
TENCENT_COS_SECRET_ID=your-secret-id
TENCENT_COS_REGION=your-region
TENCENT_COS_SCHEME=your-scheme
+TENCENT_COS_CUSTOM_DOMAIN=your-custom-domain
# Huawei OBS Storage Configuration
HUAWEI_OBS_BUCKET_NAME=your-bucket-name
HUAWEI_OBS_SECRET_KEY=your-secret-key
HUAWEI_OBS_ACCESS_KEY=your-access-key
HUAWEI_OBS_SERVER=your-server-url
+HUAWEI_OBS_PATH_STYLE=false
# Baidu OBS Storage Configuration
BAIDU_OBS_BUCKET_NAME=your-bucket-name
@@ -163,7 +178,7 @@ CONSOLE_CORS_ALLOW_ORIGINS=http://localhost:3000,*
COOKIE_DOMAIN=
# Vector database configuration
-# Supported values are `weaviate`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `oracle`, `tencent`, `elasticsearch`, `elasticsearch-ja`, `analyticdb`, `couchbase`, `vikingdb`, `oceanbase`, `opengauss`, `tablestore`,`vastbase`,`tidb`,`tidb_on_qdrant`,`baidu`,`lindorm`,`huawei_cloud`,`upstash`, `matrixone`.
+# Supported values are `weaviate`, `oceanbase`, `qdrant`, `milvus`, `myscale`, `relyt`, `pgvector`, `pgvecto-rs`, `chroma`, `opensearch`, `oracle`, `tencent`, `elasticsearch`, `elasticsearch-ja`, `analyticdb`, `couchbase`, `vikingdb`, `opengauss`, `tablestore`,`vastbase`,`tidb`,`tidb_on_qdrant`,`baidu`,`lindorm`,`huawei_cloud`,`upstash`, `matrixone`.
VECTOR_STORE=weaviate
# Prefix used to create collection name in vector database
VECTOR_INDEX_NAME_PREFIX=Vector_index
@@ -173,6 +188,18 @@ WEAVIATE_ENDPOINT=http://localhost:8080
WEAVIATE_API_KEY=WVF5YThaHlkYwhGUSmCRgsX3tD5ngdN8pkih
WEAVIATE_GRPC_ENABLED=false
WEAVIATE_BATCH_SIZE=100
+WEAVIATE_TOKENIZATION=word
+
+# OceanBase Vector configuration
+OCEANBASE_VECTOR_HOST=127.0.0.1
+OCEANBASE_VECTOR_PORT=2881
+OCEANBASE_VECTOR_USER=root@test
+OCEANBASE_VECTOR_PASSWORD=difyai123456
+OCEANBASE_VECTOR_DATABASE=test
+OCEANBASE_MEMORY_LIMIT=6G
+OCEANBASE_ENABLE_HYBRID_SEARCH=false
+OCEANBASE_FULLTEXT_PARSER=ik
+SEEKDB_MEMORY_LIMIT=2G
# Qdrant configuration, use `http://localhost:6333` for local mode or `https://your-qdrant-cluster-url.qdrant.io` for remote mode
QDRANT_URL=http://localhost:6333
@@ -339,15 +366,6 @@ LINDORM_PASSWORD=admin
LINDORM_USING_UGC=True
LINDORM_QUERY_TIMEOUT=1
-# OceanBase Vector configuration
-OCEANBASE_VECTOR_HOST=127.0.0.1
-OCEANBASE_VECTOR_PORT=2881
-OCEANBASE_VECTOR_USER=root@test
-OCEANBASE_VECTOR_PASSWORD=difyai123456
-OCEANBASE_VECTOR_DATABASE=test
-OCEANBASE_MEMORY_LIMIT=6G
-OCEANBASE_ENABLE_HYBRID_SEARCH=false
-
# AlibabaCloud MySQL Vector configuration
ALIBABACLOUD_MYSQL_HOST=127.0.0.1
ALIBABACLOUD_MYSQL_PORT=3306
@@ -484,6 +502,8 @@ LOG_FILE_BACKUP_COUNT=5
LOG_DATEFORMAT=%Y-%m-%d %H:%M:%S
# Log Timezone
LOG_TZ=UTC
+# Log output format: text or json
+LOG_OUTPUT_FORMAT=text
# Log format
LOG_FORMAT=%(asctime)s,%(msecs)d %(levelname)-2s [%(filename)s:%(lineno)d] %(req_id)s %(message)s
@@ -534,8 +554,28 @@ WORKFLOW_LOG_CLEANUP_BATCH_SIZE=100
# App configuration
APP_MAX_EXECUTION_TIME=1200
+APP_DEFAULT_ACTIVE_REQUESTS=0
APP_MAX_ACTIVE_REQUESTS=0
+# Aliyun SLS Logstore Configuration
+# Aliyun Access Key ID
+ALIYUN_SLS_ACCESS_KEY_ID=
+# Aliyun Access Key Secret
+ALIYUN_SLS_ACCESS_KEY_SECRET=
+# Aliyun SLS Endpoint (e.g., cn-hangzhou.log.aliyuncs.com)
+ALIYUN_SLS_ENDPOINT=
+# Aliyun SLS Region (e.g., cn-hangzhou)
+ALIYUN_SLS_REGION=
+# Aliyun SLS Project Name
+ALIYUN_SLS_PROJECT_NAME=
+# Number of days to retain workflow run logs (default: 365 days, 3650 for permanent storage)
+ALIYUN_SLS_LOGSTORE_TTL=365
+# Enable dual-write to both SLS LogStore and SQL database (default: false)
+LOGSTORE_DUAL_WRITE_ENABLED=false
+# Enable dual-read fallback to SQL database when LogStore returns no results (default: true)
+# Useful for migration scenarios where historical data exists only in SQL database
+LOGSTORE_DUAL_READ_ENABLED=true
+
# Celery beat configuration
CELERY_BEAT_SCHEDULER_TIME=1
@@ -626,8 +666,45 @@ SWAGGER_UI_PATH=/swagger-ui.html
# Set to false to export dataset IDs as plain text for easier cross-environment import
DSL_EXPORT_ENCRYPT_DATASET_ID=true
+# Suggested Questions After Answer Configuration
+# These environment variables allow customization of the suggested questions feature
+#
+# Custom prompt for generating suggested questions (optional)
+# If not set, uses the default prompt that generates 3 questions under 20 characters each
+# Example: "Please help me predict the five most likely technical follow-up questions a developer would ask. Focus on implementation details, best practices, and architecture considerations. Keep each question between 40-60 characters. Output must be JSON array: [\"question1\",\"question2\",\"question3\",\"question4\",\"question5\"]"
+# SUGGESTED_QUESTIONS_PROMPT=
+
+# Maximum number of tokens for suggested questions generation (default: 256)
+# Adjust this value for longer questions or more questions
+# SUGGESTED_QUESTIONS_MAX_TOKENS=256
+
+# Temperature for suggested questions generation (default: 0.0)
+# Higher values (0.5-1.0) produce more creative questions, lower values (0.0-0.3) produce more focused questions
+# SUGGESTED_QUESTIONS_TEMPERATURE=0
+
# Tenant isolated task queue configuration
TENANT_ISOLATED_TASK_CONCURRENCY=1
# Maximum number of segments for dataset segments API (0 for unlimited)
DATASET_MAX_SEGMENTS_PER_REQUEST=0
+
+# Multimodal knowledgebase limit
+SINGLE_CHUNK_ATTACHMENT_LIMIT=10
+ATTACHMENT_IMAGE_FILE_SIZE_LIMIT=2
+ATTACHMENT_IMAGE_DOWNLOAD_TIMEOUT=60
+IMAGE_FILE_BATCH_LIMIT=10
+
+# Maximum allowed CSV file size for annotation import in megabytes
+ANNOTATION_IMPORT_FILE_SIZE_LIMIT=2
+#Maximum number of annotation records allowed in a single import
+ANNOTATION_IMPORT_MAX_RECORDS=10000
+# Minimum number of annotation records required in a single import
+ANNOTATION_IMPORT_MIN_RECORDS=1
+ANNOTATION_IMPORT_RATE_LIMIT_PER_MINUTE=5
+ANNOTATION_IMPORT_RATE_LIMIT_PER_HOUR=20
+# Maximum number of concurrent annotation import tasks per tenant
+ANNOTATION_IMPORT_MAX_CONCURRENT=5
+# Sandbox expired records clean configuration
+SANDBOX_EXPIRED_RECORDS_CLEAN_GRACEFUL_PERIOD=21
+SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_SIZE=1000
+SANDBOX_EXPIRED_RECORDS_RETENTION_DAYS=30
diff --git a/api/.importlinter b/api/.importlinter
index 98fe5f50bb..24ece72b30 100644
--- a/api/.importlinter
+++ b/api/.importlinter
@@ -16,6 +16,7 @@ layers =
graph
nodes
node_events
+ runtime
entities
containers =
core.workflow
diff --git a/api/.ruff.toml b/api/.ruff.toml
index 5a29e1d8fa..8db0cbcb21 100644
--- a/api/.ruff.toml
+++ b/api/.ruff.toml
@@ -1,4 +1,8 @@
-exclude = ["migrations/*"]
+exclude = [
+ "migrations/*",
+ ".git",
+ ".git/**",
+]
line-length = 120
[format]
@@ -36,17 +40,20 @@ select = [
"UP", # pyupgrade rules
"W191", # tab-indentation
"W605", # invalid-escape-sequence
+ "G001", # don't use str format to logging messages
+ "G003", # don't use + in logging messages
+ "G004", # don't use f-strings to format logging messages
+ "UP042", # use StrEnum,
+ "S110", # disallow the try-except-pass pattern.
+
# security related linting rules
# RCE proctection (sort of)
"S102", # exec-builtin, disallow use of `exec`
"S307", # suspicious-eval-usage, disallow use of `eval` and `ast.literal_eval`
"S301", # suspicious-pickle-usage, disallow use of `pickle` and its wrappers.
"S302", # suspicious-marshal-usage, disallow use of `marshal` module
- "S311", # suspicious-non-cryptographic-random-usage
- "G001", # don't use str format to logging messages
- "G003", # don't use + in logging messages
- "G004", # don't use f-strings to format logging messages
- "UP042", # use StrEnum
+ "S311", # suspicious-non-cryptographic-random-usage,
+
]
ignore = [
@@ -91,18 +98,16 @@ ignore = [
"configs/*" = [
"N802", # invalid-function-name
]
-"core/model_runtime/callbacks/base_callback.py" = [
- "T201",
-]
-"core/workflow/callbacks/workflow_logging_callback.py" = [
- "T201",
-]
+"core/model_runtime/callbacks/base_callback.py" = ["T201"]
+"core/workflow/callbacks/workflow_logging_callback.py" = ["T201"]
"libs/gmpy2_pkcs10aep_cipher.py" = [
"N803", # invalid-argument-name
]
"tests/*" = [
"F811", # redefined-while-unused
- "T201", # allow print in tests
+ "T201", # allow print in tests,
+ "S110", # allow ignoring exceptions in tests code (currently)
+
]
[lint.pyflakes]
diff --git a/api/Dockerfile b/api/Dockerfile
index ed61923a40..02df91bfc1 100644
--- a/api/Dockerfile
+++ b/api/Dockerfile
@@ -48,6 +48,12 @@ ENV PYTHONIOENCODING=utf-8
WORKDIR /app/api
+# Create non-root user
+ARG dify_uid=1001
+RUN groupadd -r -g ${dify_uid} dify && \
+ useradd -r -u ${dify_uid} -g ${dify_uid} -s /bin/bash dify && \
+ chown -R dify:dify /app
+
RUN \
apt-get update \
# Install dependencies
@@ -57,7 +63,7 @@ RUN \
# for gmpy2 \
libgmp-dev libmpfr-dev libmpc-dev \
# For Security
- expat libldap-2.5-0 perl libsqlite3-0 zlib1g \
+ expat libldap-2.5-0=2.5.13+dfsg-5 perl libsqlite3-0=3.40.1-2+deb12u2 zlib1g=1:1.2.13.dfsg-1 \
# install fonts to support the use of tools like pypdfium2
fonts-noto-cjk \
# install a package to improve the accuracy of guessing mime type and file extension
@@ -69,24 +75,29 @@ RUN \
# Copy Python environment and packages
ENV VIRTUAL_ENV=/app/api/.venv
-COPY --from=packages ${VIRTUAL_ENV} ${VIRTUAL_ENV}
+COPY --from=packages --chown=dify:dify ${VIRTUAL_ENV} ${VIRTUAL_ENV}
ENV PATH="${VIRTUAL_ENV}/bin:${PATH}"
# Download nltk data
-RUN python -c "import nltk; nltk.download('punkt'); nltk.download('averaged_perceptron_tagger')"
+RUN mkdir -p /usr/local/share/nltk_data && NLTK_DATA=/usr/local/share/nltk_data python -c "import nltk; nltk.download('punkt'); nltk.download('averaged_perceptron_tagger'); nltk.download('stopwords')" \
+ && chmod -R 755 /usr/local/share/nltk_data
ENV TIKTOKEN_CACHE_DIR=/app/api/.tiktoken_cache
-RUN python -c "import tiktoken; tiktoken.encoding_for_model('gpt2')"
+RUN python -c "import tiktoken; tiktoken.encoding_for_model('gpt2')" \
+ && chown -R dify:dify ${TIKTOKEN_CACHE_DIR}
# Copy source code
-COPY . /app/api/
+COPY --chown=dify:dify . /app/api/
+
+# Prepare entrypoint script
+COPY --chown=dify:dify --chmod=755 docker/entrypoint.sh /entrypoint.sh
-# Copy entrypoint
-COPY docker/entrypoint.sh /entrypoint.sh
-RUN chmod +x /entrypoint.sh
ARG COMMIT_SHA
ENV COMMIT_SHA=${COMMIT_SHA}
+ENV NLTK_DATA=/usr/local/share/nltk_data
+
+USER dify
ENTRYPOINT ["/bin/bash", "/entrypoint.sh"]
diff --git a/api/README.md b/api/README.md
index 7809ea8a3d..794b05d3af 100644
--- a/api/README.md
+++ b/api/README.md
@@ -15,8 +15,8 @@
```bash
cd ../docker
cp middleware.env.example middleware.env
- # change the profile to other vector database if you are not using weaviate
- docker compose -f docker-compose.middleware.yaml --profile weaviate -p dify up -d
+ # change the profile to mysql if you are not using postgres,change the profile to other vector database if you are not using weaviate
+ docker compose -f docker-compose.middleware.yaml --profile postgresql --profile weaviate -p dify up -d
cd ../api
```
@@ -84,7 +84,7 @@
1. If you need to handle and debug the async tasks (e.g. dataset importing and documents indexing), please start the worker service.
```bash
-uv run celery -A app.celery worker -P threads -c 2 --loglevel INFO -Q dataset,mail,ops_trace,app_deletion,plugin,workflow_storage,conversation,priority_pipeline,pipeline
+uv run celery -A app.celery worker -P threads -c 2 --loglevel INFO -Q dataset,priority_dataset,priority_pipeline,pipeline,mail,ops_trace,app_deletion,plugin,workflow_storage,conversation,workflow,schedule_poller,schedule_executor,triggered_workflow_dispatcher,trigger_refresh_executor,retention
```
Additionally, if you want to debug the celery scheduled tasks, you can run the following command in another terminal to start the beat service:
diff --git a/api/app_factory.py b/api/app_factory.py
index 17c376de77..f827842d68 100644
--- a/api/app_factory.py
+++ b/api/app_factory.py
@@ -1,8 +1,12 @@
import logging
import time
+from opentelemetry.trace import get_current_span
+from opentelemetry.trace.span import INVALID_SPAN_ID, INVALID_TRACE_ID
+
from configs import dify_config
from contexts.wrapper import RecyclableContextVar
+from core.logging.context import init_request_context
from dify_app import DifyApp
logger = logging.getLogger(__name__)
@@ -18,15 +22,40 @@ def create_flask_app_with_configs() -> DifyApp:
"""
dify_app = DifyApp(__name__)
dify_app.config.from_mapping(dify_config.model_dump())
+ dify_app.config["RESTX_INCLUDE_ALL_MODELS"] = True
# add before request hook
@dify_app.before_request
def before_request():
- # add an unique identifier to each request
+ # Initialize logging context for this request
+ init_request_context()
RecyclableContextVar.increment_thread_recycles()
+ # add after request hook for injecting trace headers from OpenTelemetry span context
+ # Only adds headers when OTEL is enabled and has valid context
+ @dify_app.after_request
+ def add_trace_headers(response):
+ try:
+ span = get_current_span()
+ ctx = span.get_span_context() if span else None
+
+ if not ctx or not ctx.is_valid:
+ return response
+
+ # Inject trace headers from OTEL context
+ if ctx.trace_id != INVALID_TRACE_ID and "X-Trace-Id" not in response.headers:
+ response.headers["X-Trace-Id"] = format(ctx.trace_id, "032x")
+ if ctx.span_id != INVALID_SPAN_ID and "X-Span-Id" not in response.headers:
+ response.headers["X-Span-Id"] = format(ctx.span_id, "016x")
+
+ except Exception:
+ # Never break the response due to tracing header injection
+ logger.warning("Failed to add trace headers to response", exc_info=True)
+ return response
+
# Capture the decorator's return value to avoid pyright reportUnusedFunction
_ = before_request
+ _ = add_trace_headers
return dify_app
@@ -50,10 +79,12 @@ def initialize_extensions(app: DifyApp):
ext_commands,
ext_compress,
ext_database,
+ ext_forward_refs,
ext_hosting_provider,
ext_import_modules,
ext_logging,
ext_login,
+ ext_logstore,
ext_mail,
ext_migrate,
ext_orjson,
@@ -62,6 +93,7 @@ def initialize_extensions(app: DifyApp):
ext_redis,
ext_request_logging,
ext_sentry,
+ ext_session_factory,
ext_set_secretkey,
ext_storage,
ext_timezone,
@@ -74,6 +106,7 @@ def initialize_extensions(app: DifyApp):
ext_warnings,
ext_import_modules,
ext_orjson,
+ ext_forward_refs,
ext_set_secretkey,
ext_compress,
ext_code_based_extension,
@@ -82,6 +115,7 @@ def initialize_extensions(app: DifyApp):
ext_migrate,
ext_redis,
ext_storage,
+ ext_logstore, # Initialize logstore after storage, before celery
ext_celery,
ext_login,
ext_mail,
@@ -92,6 +126,7 @@ def initialize_extensions(app: DifyApp):
ext_commands,
ext_otel,
ext_request_logging,
+ ext_session_factory,
]
for ext in extensions:
short_name = ext.__name__.split(".")[-1]
diff --git a/api/commands.py b/api/commands.py
index e15c996a34..a8d89ac200 100644
--- a/api/commands.py
+++ b/api/commands.py
@@ -1139,6 +1139,7 @@ def remove_orphaned_files_on_storage(force: bool):
click.echo(click.style(f"Found {len(all_files_in_tables)} files in tables.", fg="white"))
except Exception as e:
click.echo(click.style(f"Error fetching keys: {str(e)}", fg="red"))
+ return
all_files_on_storage = []
for storage_path in storage_paths:
diff --git a/api/configs/extra/__init__.py b/api/configs/extra/__init__.py
index 4543b5389d..de97adfc0e 100644
--- a/api/configs/extra/__init__.py
+++ b/api/configs/extra/__init__.py
@@ -1,9 +1,11 @@
+from configs.extra.archive_config import ArchiveStorageConfig
from configs.extra.notion_config import NotionConfig
from configs.extra.sentry_config import SentryConfig
class ExtraServiceConfig(
# place the configs in alphabet order
+ ArchiveStorageConfig,
NotionConfig,
SentryConfig,
):
diff --git a/api/configs/extra/archive_config.py b/api/configs/extra/archive_config.py
new file mode 100644
index 0000000000..a85628fa61
--- /dev/null
+++ b/api/configs/extra/archive_config.py
@@ -0,0 +1,43 @@
+from pydantic import Field
+from pydantic_settings import BaseSettings
+
+
+class ArchiveStorageConfig(BaseSettings):
+ """
+ Configuration settings for workflow run logs archiving storage.
+ """
+
+ ARCHIVE_STORAGE_ENABLED: bool = Field(
+ description="Enable workflow run logs archiving to S3-compatible storage",
+ default=False,
+ )
+
+ ARCHIVE_STORAGE_ENDPOINT: str | None = Field(
+ description="URL of the S3-compatible storage endpoint (e.g., 'https://storage.example.com')",
+ default=None,
+ )
+
+ ARCHIVE_STORAGE_ARCHIVE_BUCKET: str | None = Field(
+ description="Name of the bucket to store archived workflow logs",
+ default=None,
+ )
+
+ ARCHIVE_STORAGE_EXPORT_BUCKET: str | None = Field(
+ description="Name of the bucket to store exported workflow runs",
+ default=None,
+ )
+
+ ARCHIVE_STORAGE_ACCESS_KEY: str | None = Field(
+ description="Access key ID for authenticating with storage",
+ default=None,
+ )
+
+ ARCHIVE_STORAGE_SECRET_KEY: str | None = Field(
+ description="Secret access key for authenticating with storage",
+ default=None,
+ )
+
+ ARCHIVE_STORAGE_REGION: str = Field(
+ description="Region for storage (use 'auto' if the provider supports it)",
+ default="auto",
+ )
diff --git a/api/configs/feature/__init__.py b/api/configs/feature/__init__.py
index ff1f983f94..6a04171d2d 100644
--- a/api/configs/feature/__init__.py
+++ b/api/configs/feature/__init__.py
@@ -73,14 +73,14 @@ class AppExecutionConfig(BaseSettings):
description="Maximum allowed execution time for the application in seconds",
default=1200,
)
+ APP_DEFAULT_ACTIVE_REQUESTS: NonNegativeInt = Field(
+ description="Default number of concurrent active requests per app (0 for unlimited)",
+ default=0,
+ )
APP_MAX_ACTIVE_REQUESTS: NonNegativeInt = Field(
description="Maximum number of concurrent active requests per app (0 for unlimited)",
default=0,
)
- APP_DAILY_RATE_LIMIT: NonNegativeInt = Field(
- description="Maximum number of requests per app per day",
- default=5000,
- )
class CodeExecutionSandboxConfig(BaseSettings):
@@ -218,7 +218,7 @@ class PluginConfig(BaseSettings):
PLUGIN_DAEMON_TIMEOUT: PositiveFloat | None = Field(
description="Timeout in seconds for requests to the plugin daemon (set to None to disable)",
- default=300.0,
+ default=600.0,
)
INNER_API_KEY_FOR_PLUGIN: str = Field(description="Inner api key for plugin", default="inner-api-key")
@@ -360,6 +360,57 @@ class FileUploadConfig(BaseSettings):
default=10,
)
+ IMAGE_FILE_BATCH_LIMIT: PositiveInt = Field(
+ description="Maximum number of files allowed in a image batch upload operation",
+ default=10,
+ )
+
+ SINGLE_CHUNK_ATTACHMENT_LIMIT: PositiveInt = Field(
+ description="Maximum number of files allowed in a single chunk attachment",
+ default=10,
+ )
+
+ ATTACHMENT_IMAGE_FILE_SIZE_LIMIT: NonNegativeInt = Field(
+ description="Maximum allowed image file size for attachments in megabytes",
+ default=2,
+ )
+
+ ATTACHMENT_IMAGE_DOWNLOAD_TIMEOUT: NonNegativeInt = Field(
+ description="Timeout for downloading image attachments in seconds",
+ default=60,
+ )
+
+ # Annotation Import Security Configurations
+ ANNOTATION_IMPORT_FILE_SIZE_LIMIT: NonNegativeInt = Field(
+ description="Maximum allowed CSV file size for annotation import in megabytes",
+ default=2,
+ )
+
+ ANNOTATION_IMPORT_MAX_RECORDS: PositiveInt = Field(
+ description="Maximum number of annotation records allowed in a single import",
+ default=10000,
+ )
+
+ ANNOTATION_IMPORT_MIN_RECORDS: PositiveInt = Field(
+ description="Minimum number of annotation records required in a single import",
+ default=1,
+ )
+
+ ANNOTATION_IMPORT_RATE_LIMIT_PER_MINUTE: PositiveInt = Field(
+ description="Maximum number of annotation import requests per minute per tenant",
+ default=5,
+ )
+
+ ANNOTATION_IMPORT_RATE_LIMIT_PER_HOUR: PositiveInt = Field(
+ description="Maximum number of annotation import requests per hour per tenant",
+ default=20,
+ )
+
+ ANNOTATION_IMPORT_MAX_CONCURRENT: PositiveInt = Field(
+ description="Maximum number of concurrent annotation import tasks per tenant",
+ default=2,
+ )
+
inner_UPLOAD_FILE_EXTENSION_BLACKLIST: str = Field(
description=(
"Comma-separated list of file extensions that are blocked from upload. "
@@ -536,6 +587,11 @@ class LoggingConfig(BaseSettings):
default="INFO",
)
+ LOG_OUTPUT_FORMAT: Literal["text", "json"] = Field(
+ description="Log output format: 'text' for human-readable, 'json' for structured JSON logs.",
+ default="text",
+ )
+
LOG_FILE: str | None = Field(
description="File path for log output.",
default=None,
@@ -553,7 +609,10 @@ class LoggingConfig(BaseSettings):
LOG_FORMAT: str = Field(
description="Format string for log messages",
- default="%(asctime)s.%(msecs)03d %(levelname)s [%(threadName)s] [%(filename)s:%(lineno)d] - %(message)s",
+ default=(
+ "%(asctime)s.%(msecs)03d %(levelname)s [%(threadName)s] "
+ "[%(filename)s:%(lineno)d] %(trace_id)s - %(message)s"
+ ),
)
LOG_DATEFORMAT: str | None = Field(
@@ -1086,7 +1145,7 @@ class CeleryScheduleTasksConfig(BaseSettings):
)
TRIGGER_PROVIDER_CREDENTIAL_THRESHOLD_SECONDS: int = Field(
description="Proactive credential refresh threshold in seconds",
- default=180,
+ default=60 * 60,
)
TRIGGER_PROVIDER_SUBSCRIPTION_THRESHOLD_SECONDS: int = Field(
description="Proactive subscription refresh threshold in seconds",
@@ -1216,6 +1275,21 @@ class TenantIsolatedTaskQueueConfig(BaseSettings):
)
+class SandboxExpiredRecordsCleanConfig(BaseSettings):
+ SANDBOX_EXPIRED_RECORDS_CLEAN_GRACEFUL_PERIOD: NonNegativeInt = Field(
+ description="Graceful period in days for sandbox records clean after subscription expiration",
+ default=21,
+ )
+ SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_SIZE: PositiveInt = Field(
+ description="Maximum number of records to process in each batch",
+ default=1000,
+ )
+ SANDBOX_EXPIRED_RECORDS_RETENTION_DAYS: PositiveInt = Field(
+ description="Retention days for sandbox expired workflow_run records and message records",
+ default=30,
+ )
+
+
class FeatureConfig(
# place the configs in alphabet order
AppExecutionConfig,
@@ -1241,6 +1315,7 @@ class FeatureConfig(
PositionConfig,
RagEtlConfig,
RepositoryConfig,
+ SandboxExpiredRecordsCleanConfig,
SecurityConfig,
TenantIsolatedTaskQueueConfig,
ToolConfig,
diff --git a/api/configs/middleware/__init__.py b/api/configs/middleware/__init__.py
index 816d0e442f..63f75924bf 100644
--- a/api/configs/middleware/__init__.py
+++ b/api/configs/middleware/__init__.py
@@ -26,6 +26,7 @@ from .vdb.clickzetta_config import ClickzettaConfig
from .vdb.couchbase_config import CouchbaseConfig
from .vdb.elasticsearch_config import ElasticsearchConfig
from .vdb.huawei_cloud_config import HuaweiCloudConfig
+from .vdb.iris_config import IrisVectorConfig
from .vdb.lindorm_config import LindormConfig
from .vdb.matrixone_config import MatrixoneConfig
from .vdb.milvus_config import MilvusConfig
@@ -105,6 +106,12 @@ class KeywordStoreConfig(BaseSettings):
class DatabaseConfig(BaseSettings):
+ # Database type selector
+ DB_TYPE: Literal["postgresql", "mysql", "oceanbase", "seekdb"] = Field(
+ description="Database type to use. OceanBase is MySQL-compatible.",
+ default="postgresql",
+ )
+
DB_HOST: str = Field(
description="Hostname or IP address of the database server.",
default="localhost",
@@ -140,10 +147,10 @@ class DatabaseConfig(BaseSettings):
default="",
)
- SQLALCHEMY_DATABASE_URI_SCHEME: str = Field(
- description="Database URI scheme for SQLAlchemy connection.",
- default="postgresql",
- )
+ @computed_field # type: ignore[prop-decorator]
+ @property
+ def SQLALCHEMY_DATABASE_URI_SCHEME(self) -> str:
+ return "postgresql" if self.DB_TYPE == "postgresql" else "mysql+pymysql"
@computed_field # type: ignore[prop-decorator]
@property
@@ -204,15 +211,15 @@ class DatabaseConfig(BaseSettings):
# Parse DB_EXTRAS for 'options'
db_extras_dict = dict(parse_qsl(self.DB_EXTRAS))
options = db_extras_dict.get("options", "")
- # Always include timezone
- timezone_opt = "-c timezone=UTC"
- if options:
- # Merge user options and timezone
- merged_options = f"{options} {timezone_opt}"
- else:
- merged_options = timezone_opt
-
- connect_args = {"options": merged_options}
+ connect_args = {}
+ # Use the dynamic SQLALCHEMY_DATABASE_URI_SCHEME property
+ if self.SQLALCHEMY_DATABASE_URI_SCHEME.startswith("postgresql"):
+ timezone_opt = "-c timezone=UTC"
+ if options:
+ merged_options = f"{options} {timezone_opt}"
+ else:
+ merged_options = timezone_opt
+ connect_args = {"options": merged_options}
return {
"pool_size": self.SQLALCHEMY_POOL_SIZE,
@@ -330,6 +337,7 @@ class MiddlewareConfig(
ChromaConfig,
ClickzettaConfig,
HuaweiCloudConfig,
+ IrisVectorConfig,
MilvusConfig,
AlibabaCloudMySQLConfig,
MyScaleConfig,
diff --git a/api/configs/middleware/storage/aliyun_oss_storage_config.py b/api/configs/middleware/storage/aliyun_oss_storage_config.py
index 331c486d54..6df14175ae 100644
--- a/api/configs/middleware/storage/aliyun_oss_storage_config.py
+++ b/api/configs/middleware/storage/aliyun_oss_storage_config.py
@@ -41,3 +41,8 @@ class AliyunOSSStorageConfig(BaseSettings):
description="Base path within the bucket to store objects (e.g., 'my-app-data/')",
default=None,
)
+
+ ALIYUN_CLOUDBOX_ID: str | None = Field(
+ description="Cloudbox id for aliyun cloudbox service",
+ default=None,
+ )
diff --git a/api/configs/middleware/storage/huawei_obs_storage_config.py b/api/configs/middleware/storage/huawei_obs_storage_config.py
index 5b5cd2f750..46b6f2e68d 100644
--- a/api/configs/middleware/storage/huawei_obs_storage_config.py
+++ b/api/configs/middleware/storage/huawei_obs_storage_config.py
@@ -26,3 +26,8 @@ class HuaweiCloudOBSStorageConfig(BaseSettings):
description="Endpoint URL for Huawei Cloud OBS (e.g., 'https://obs.cn-north-4.myhuaweicloud.com')",
default=None,
)
+
+ HUAWEI_OBS_PATH_STYLE: bool = Field(
+ description="Flag to indicate whether to use path-style URLs for OBS requests",
+ default=False,
+ )
diff --git a/api/configs/middleware/storage/tencent_cos_storage_config.py b/api/configs/middleware/storage/tencent_cos_storage_config.py
index e297e748e9..cdd10740f8 100644
--- a/api/configs/middleware/storage/tencent_cos_storage_config.py
+++ b/api/configs/middleware/storage/tencent_cos_storage_config.py
@@ -31,3 +31,8 @@ class TencentCloudCOSStorageConfig(BaseSettings):
description="Protocol scheme for COS requests: 'https' (recommended) or 'http'",
default=None,
)
+
+ TENCENT_COS_CUSTOM_DOMAIN: str | None = Field(
+ description="Tencent Cloud COS custom domain setting",
+ default=None,
+ )
diff --git a/api/configs/middleware/vdb/iris_config.py b/api/configs/middleware/vdb/iris_config.py
new file mode 100644
index 0000000000..c532d191c3
--- /dev/null
+++ b/api/configs/middleware/vdb/iris_config.py
@@ -0,0 +1,91 @@
+"""Configuration for InterSystems IRIS vector database."""
+
+from pydantic import Field, PositiveInt, model_validator
+from pydantic_settings import BaseSettings
+
+
+class IrisVectorConfig(BaseSettings):
+ """Configuration settings for IRIS vector database connection and pooling."""
+
+ IRIS_HOST: str | None = Field(
+ description="Hostname or IP address of the IRIS server.",
+ default="localhost",
+ )
+
+ IRIS_SUPER_SERVER_PORT: PositiveInt | None = Field(
+ description="Port number for IRIS connection.",
+ default=1972,
+ )
+
+ IRIS_USER: str | None = Field(
+ description="Username for IRIS authentication.",
+ default="_SYSTEM",
+ )
+
+ IRIS_PASSWORD: str | None = Field(
+ description="Password for IRIS authentication.",
+ default="Dify@1234",
+ )
+
+ IRIS_SCHEMA: str | None = Field(
+ description="Schema name for IRIS tables.",
+ default="dify",
+ )
+
+ IRIS_DATABASE: str | None = Field(
+ description="Database namespace for IRIS connection.",
+ default="USER",
+ )
+
+ IRIS_CONNECTION_URL: str | None = Field(
+ description="Full connection URL for IRIS (overrides individual fields if provided).",
+ default=None,
+ )
+
+ IRIS_MIN_CONNECTION: PositiveInt = Field(
+ description="Minimum number of connections in the pool.",
+ default=1,
+ )
+
+ IRIS_MAX_CONNECTION: PositiveInt = Field(
+ description="Maximum number of connections in the pool.",
+ default=3,
+ )
+
+ IRIS_TEXT_INDEX: bool = Field(
+ description="Enable full-text search index using %iFind.Index.Basic.",
+ default=True,
+ )
+
+ IRIS_TEXT_INDEX_LANGUAGE: str = Field(
+ description="Language for full-text search index (e.g., 'en', 'ja', 'zh', 'de').",
+ default="en",
+ )
+
+ @model_validator(mode="before")
+ @classmethod
+ def validate_config(cls, values: dict) -> dict:
+ """Validate IRIS configuration values.
+
+ Args:
+ values: Configuration dictionary
+
+ Returns:
+ Validated configuration dictionary
+
+ Raises:
+ ValueError: If required fields are missing or pool settings are invalid
+ """
+ # Only validate required fields if IRIS is being used as the vector store
+ # This allows the config to be loaded even when IRIS is not in use
+
+ # vector_store = os.environ.get("VECTOR_STORE", "")
+ # We rely on Pydantic defaults for required fields if they are missing from env.
+ # Strict existence check is removed to allow defaults to work.
+
+ min_conn = values.get("IRIS_MIN_CONNECTION", 1)
+ max_conn = values.get("IRIS_MAX_CONNECTION", 3)
+ if min_conn > max_conn:
+ raise ValueError("IRIS_MIN_CONNECTION must be less than or equal to IRIS_MAX_CONNECTION")
+
+ return values
diff --git a/api/configs/middleware/vdb/weaviate_config.py b/api/configs/middleware/vdb/weaviate_config.py
index aa81c870f6..6f4fccaa7f 100644
--- a/api/configs/middleware/vdb/weaviate_config.py
+++ b/api/configs/middleware/vdb/weaviate_config.py
@@ -31,3 +31,8 @@ class WeaviateConfig(BaseSettings):
description="Number of objects to be processed in a single batch operation (default is 100)",
default=100,
)
+
+ WEAVIATE_TOKENIZATION: str | None = Field(
+ description="Tokenization for Weaviate (default is word)",
+ default="word",
+ )
diff --git a/api/constants/languages.py b/api/constants/languages.py
index 0312a558c9..8c1ce368ac 100644
--- a/api/constants/languages.py
+++ b/api/constants/languages.py
@@ -20,6 +20,7 @@ language_timezone_mapping = {
"sl-SI": "Europe/Ljubljana",
"th-TH": "Asia/Bangkok",
"id-ID": "Asia/Jakarta",
+ "ar-TN": "Africa/Tunis",
}
languages = list(language_timezone_mapping.keys())
diff --git a/api/controllers/common/fields.py b/api/controllers/common/fields.py
index df9de825de..c16a23fac8 100644
--- a/api/controllers/common/fields.py
+++ b/api/controllers/common/fields.py
@@ -1,62 +1,59 @@
-from flask_restx import Api, Namespace, fields
+from __future__ import annotations
-from libs.helper import AppIconUrlField
+from typing import Any, TypeAlias
-parameters__system_parameters = {
- "image_file_size_limit": fields.Integer,
- "video_file_size_limit": fields.Integer,
- "audio_file_size_limit": fields.Integer,
- "file_size_limit": fields.Integer,
- "workflow_file_upload_limit": fields.Integer,
-}
+from pydantic import BaseModel, ConfigDict, computed_field
+
+from core.file import helpers as file_helpers
+from models.model import IconType
+
+JSONValue: TypeAlias = str | int | float | bool | None | dict[str, Any] | list[Any]
+JSONObject: TypeAlias = dict[str, Any]
-def build_system_parameters_model(api_or_ns: Api | Namespace):
- """Build the system parameters model for the API or Namespace."""
- return api_or_ns.model("SystemParameters", parameters__system_parameters)
+class SystemParameters(BaseModel):
+ image_file_size_limit: int
+ video_file_size_limit: int
+ audio_file_size_limit: int
+ file_size_limit: int
+ workflow_file_upload_limit: int
-parameters_fields = {
- "opening_statement": fields.String,
- "suggested_questions": fields.Raw,
- "suggested_questions_after_answer": fields.Raw,
- "speech_to_text": fields.Raw,
- "text_to_speech": fields.Raw,
- "retriever_resource": fields.Raw,
- "annotation_reply": fields.Raw,
- "more_like_this": fields.Raw,
- "user_input_form": fields.Raw,
- "sensitive_word_avoidance": fields.Raw,
- "file_upload": fields.Raw,
- "system_parameters": fields.Nested(parameters__system_parameters),
-}
+class Parameters(BaseModel):
+ opening_statement: str | None = None
+ suggested_questions: list[str]
+ suggested_questions_after_answer: JSONObject
+ speech_to_text: JSONObject
+ text_to_speech: JSONObject
+ retriever_resource: JSONObject
+ annotation_reply: JSONObject
+ more_like_this: JSONObject
+ user_input_form: list[JSONObject]
+ sensitive_word_avoidance: JSONObject
+ file_upload: JSONObject
+ system_parameters: SystemParameters
-def build_parameters_model(api_or_ns: Api | Namespace):
- """Build the parameters model for the API or Namespace."""
- copied_fields = parameters_fields.copy()
- copied_fields["system_parameters"] = fields.Nested(build_system_parameters_model(api_or_ns))
- return api_or_ns.model("Parameters", copied_fields)
+class Site(BaseModel):
+ model_config = ConfigDict(from_attributes=True)
+ title: str
+ chat_color_theme: str | None = None
+ chat_color_theme_inverted: bool
+ icon_type: str | None = None
+ icon: str | None = None
+ icon_background: str | None = None
+ description: str | None = None
+ copyright: str | None = None
+ privacy_policy: str | None = None
+ custom_disclaimer: str | None = None
+ default_language: str
+ show_workflow_steps: bool
+ use_icon_as_answer_icon: bool
-site_fields = {
- "title": fields.String,
- "chat_color_theme": fields.String,
- "chat_color_theme_inverted": fields.Boolean,
- "icon_type": fields.String,
- "icon": fields.String,
- "icon_background": fields.String,
- "icon_url": AppIconUrlField,
- "description": fields.String,
- "copyright": fields.String,
- "privacy_policy": fields.String,
- "custom_disclaimer": fields.String,
- "default_language": fields.String,
- "show_workflow_steps": fields.Boolean,
- "use_icon_as_answer_icon": fields.Boolean,
-}
-
-
-def build_site_model(api_or_ns: Api | Namespace):
- """Build the site model for the API or Namespace."""
- return api_or_ns.model("Site", site_fields)
+ @computed_field(return_type=str | None) # type: ignore
+ @property
+ def icon_url(self) -> str | None:
+ if self.icon and self.icon_type == IconType.IMAGE:
+ return file_helpers.get_signed_file_url(self.icon)
+ return None
diff --git a/api/controllers/common/file_response.py b/api/controllers/common/file_response.py
new file mode 100644
index 0000000000..ca8ea3d52e
--- /dev/null
+++ b/api/controllers/common/file_response.py
@@ -0,0 +1,57 @@
+import os
+from email.message import Message
+from urllib.parse import quote
+
+from flask import Response
+
+HTML_MIME_TYPES = frozenset({"text/html", "application/xhtml+xml"})
+HTML_EXTENSIONS = frozenset({"html", "htm"})
+
+
+def _normalize_mime_type(mime_type: str | None) -> str:
+ if not mime_type:
+ return ""
+ message = Message()
+ message["Content-Type"] = mime_type
+ return message.get_content_type().strip().lower()
+
+
+def _is_html_extension(extension: str | None) -> bool:
+ if not extension:
+ return False
+ return extension.lstrip(".").lower() in HTML_EXTENSIONS
+
+
+def is_html_content(mime_type: str | None, filename: str | None, extension: str | None = None) -> bool:
+ normalized_mime_type = _normalize_mime_type(mime_type)
+ if normalized_mime_type in HTML_MIME_TYPES:
+ return True
+
+ if _is_html_extension(extension):
+ return True
+
+ if filename:
+ return _is_html_extension(os.path.splitext(filename)[1])
+
+ return False
+
+
+def enforce_download_for_html(
+ response: Response,
+ *,
+ mime_type: str | None,
+ filename: str | None,
+ extension: str | None = None,
+) -> bool:
+ if not is_html_content(mime_type, filename, extension):
+ return False
+
+ if filename:
+ encoded_filename = quote(filename)
+ response.headers["Content-Disposition"] = f"attachment; filename*=UTF-8''{encoded_filename}"
+ else:
+ response.headers["Content-Disposition"] = "attachment"
+
+ response.headers["Content-Type"] = "application/octet-stream"
+ response.headers["X-Content-Type-Options"] = "nosniff"
+ return True
diff --git a/api/controllers/common/schema.py b/api/controllers/common/schema.py
new file mode 100644
index 0000000000..e0896a8dc2
--- /dev/null
+++ b/api/controllers/common/schema.py
@@ -0,0 +1,26 @@
+"""Helpers for registering Pydantic models with Flask-RESTX namespaces."""
+
+from flask_restx import Namespace
+from pydantic import BaseModel
+
+DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}"
+
+
+def register_schema_model(namespace: Namespace, model: type[BaseModel]) -> None:
+ """Register a single BaseModel with a namespace for Swagger documentation."""
+
+ namespace.schema_model(model.__name__, model.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0))
+
+
+def register_schema_models(namespace: Namespace, *models: type[BaseModel]) -> None:
+ """Register multiple BaseModels with a namespace."""
+
+ for model in models:
+ register_schema_model(namespace, model)
+
+
+__all__ = [
+ "DEFAULT_REF_TEMPLATE_SWAGGER_2_0",
+ "register_schema_model",
+ "register_schema_models",
+]
diff --git a/api/controllers/console/admin.py b/api/controllers/console/admin.py
index 2c4d8709eb..a25ca5ef51 100644
--- a/api/controllers/console/admin.py
+++ b/api/controllers/console/admin.py
@@ -3,21 +3,47 @@ from functools import wraps
from typing import ParamSpec, TypeVar
from flask import request
-from flask_restx import Resource, fields, reqparse
+from flask_restx import Resource
+from pydantic import BaseModel, Field, field_validator
from sqlalchemy import select
-from sqlalchemy.orm import Session
from werkzeug.exceptions import NotFound, Unauthorized
-P = ParamSpec("P")
-R = TypeVar("R")
from configs import dify_config
from constants.languages import supported_language
-from controllers.console import api, console_ns
+from controllers.console import console_ns
from controllers.console.wraps import only_edition_cloud
+from core.db.session_factory import session_factory
from extensions.ext_database import db
from libs.token import extract_access_token
from models.model import App, InstalledApp, RecommendedApp
+P = ParamSpec("P")
+R = TypeVar("R")
+
+DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}"
+
+
+class InsertExploreAppPayload(BaseModel):
+ app_id: str = Field(...)
+ desc: str | None = None
+ copyright: str | None = None
+ privacy_policy: str | None = None
+ custom_disclaimer: str | None = None
+ language: str = Field(...)
+ category: str = Field(...)
+ position: int = Field(...)
+
+ @field_validator("language")
+ @classmethod
+ def validate_language(cls, value: str) -> str:
+ return supported_language(value)
+
+
+console_ns.schema_model(
+ InsertExploreAppPayload.__name__,
+ InsertExploreAppPayload.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0),
+)
+
def admin_required(view: Callable[P, R]):
@wraps(view)
@@ -38,61 +64,36 @@ def admin_required(view: Callable[P, R]):
@console_ns.route("/admin/insert-explore-apps")
class InsertExploreAppListApi(Resource):
- @api.doc("insert_explore_app")
- @api.doc(description="Insert or update an app in the explore list")
- @api.expect(
- api.model(
- "InsertExploreAppRequest",
- {
- "app_id": fields.String(required=True, description="Application ID"),
- "desc": fields.String(description="App description"),
- "copyright": fields.String(description="Copyright information"),
- "privacy_policy": fields.String(description="Privacy policy"),
- "custom_disclaimer": fields.String(description="Custom disclaimer"),
- "language": fields.String(required=True, description="Language code"),
- "category": fields.String(required=True, description="App category"),
- "position": fields.Integer(required=True, description="Display position"),
- },
- )
- )
- @api.response(200, "App updated successfully")
- @api.response(201, "App inserted successfully")
- @api.response(404, "App not found")
+ @console_ns.doc("insert_explore_app")
+ @console_ns.doc(description="Insert or update an app in the explore list")
+ @console_ns.expect(console_ns.models[InsertExploreAppPayload.__name__])
+ @console_ns.response(200, "App updated successfully")
+ @console_ns.response(201, "App inserted successfully")
+ @console_ns.response(404, "App not found")
@only_edition_cloud
@admin_required
def post(self):
- parser = (
- reqparse.RequestParser()
- .add_argument("app_id", type=str, required=True, nullable=False, location="json")
- .add_argument("desc", type=str, location="json")
- .add_argument("copyright", type=str, location="json")
- .add_argument("privacy_policy", type=str, location="json")
- .add_argument("custom_disclaimer", type=str, location="json")
- .add_argument("language", type=supported_language, required=True, nullable=False, location="json")
- .add_argument("category", type=str, required=True, nullable=False, location="json")
- .add_argument("position", type=int, required=True, nullable=False, location="json")
- )
- args = parser.parse_args()
+ payload = InsertExploreAppPayload.model_validate(console_ns.payload)
- app = db.session.execute(select(App).where(App.id == args["app_id"])).scalar_one_or_none()
+ app = db.session.execute(select(App).where(App.id == payload.app_id)).scalar_one_or_none()
if not app:
- raise NotFound(f"App '{args['app_id']}' is not found")
+ raise NotFound(f"App '{payload.app_id}' is not found")
site = app.site
if not site:
- desc = args["desc"] or ""
- copy_right = args["copyright"] or ""
- privacy_policy = args["privacy_policy"] or ""
- custom_disclaimer = args["custom_disclaimer"] or ""
+ desc = payload.desc or ""
+ copy_right = payload.copyright or ""
+ privacy_policy = payload.privacy_policy or ""
+ custom_disclaimer = payload.custom_disclaimer or ""
else:
- desc = site.description or args["desc"] or ""
- copy_right = site.copyright or args["copyright"] or ""
- privacy_policy = site.privacy_policy or args["privacy_policy"] or ""
- custom_disclaimer = site.custom_disclaimer or args["custom_disclaimer"] or ""
+ desc = site.description or payload.desc or ""
+ copy_right = site.copyright or payload.copyright or ""
+ privacy_policy = site.privacy_policy or payload.privacy_policy or ""
+ custom_disclaimer = site.custom_disclaimer or payload.custom_disclaimer or ""
- with Session(db.engine) as session:
+ with session_factory.create_session() as session:
recommended_app = session.execute(
- select(RecommendedApp).where(RecommendedApp.app_id == args["app_id"])
+ select(RecommendedApp).where(RecommendedApp.app_id == payload.app_id)
).scalar_one_or_none()
if not recommended_app:
@@ -102,9 +103,9 @@ class InsertExploreAppListApi(Resource):
copyright=copy_right,
privacy_policy=privacy_policy,
custom_disclaimer=custom_disclaimer,
- language=args["language"],
- category=args["category"],
- position=args["position"],
+ language=payload.language,
+ category=payload.category,
+ position=payload.position,
)
db.session.add(recommended_app)
@@ -118,9 +119,9 @@ class InsertExploreAppListApi(Resource):
recommended_app.copyright = copy_right
recommended_app.privacy_policy = privacy_policy
recommended_app.custom_disclaimer = custom_disclaimer
- recommended_app.language = args["language"]
- recommended_app.category = args["category"]
- recommended_app.position = args["position"]
+ recommended_app.language = payload.language
+ recommended_app.category = payload.category
+ recommended_app.position = payload.position
app.is_public = True
@@ -131,14 +132,14 @@ class InsertExploreAppListApi(Resource):
@console_ns.route("/admin/insert-explore-apps/")
class InsertExploreAppApi(Resource):
- @api.doc("delete_explore_app")
- @api.doc(description="Remove an app from the explore list")
- @api.doc(params={"app_id": "Application ID to remove"})
- @api.response(204, "App removed successfully")
+ @console_ns.doc("delete_explore_app")
+ @console_ns.doc(description="Remove an app from the explore list")
+ @console_ns.doc(params={"app_id": "Application ID to remove"})
+ @console_ns.response(204, "App removed successfully")
@only_edition_cloud
@admin_required
def delete(self, app_id):
- with Session(db.engine) as session:
+ with session_factory.create_session() as session:
recommended_app = session.execute(
select(RecommendedApp).where(RecommendedApp.app_id == str(app_id))
).scalar_one_or_none()
@@ -146,13 +147,13 @@ class InsertExploreAppApi(Resource):
if not recommended_app:
return {"result": "success"}, 204
- with Session(db.engine) as session:
+ with session_factory.create_session() as session:
app = session.execute(select(App).where(App.id == recommended_app.app_id)).scalar_one_or_none()
if app:
app.is_public = False
- with Session(db.engine) as session:
+ with session_factory.create_session() as session:
installed_apps = (
session.execute(
select(InstalledApp).where(
diff --git a/api/controllers/console/apikey.py b/api/controllers/console/apikey.py
index 4f04af7932..9b0d4b1a78 100644
--- a/api/controllers/console/apikey.py
+++ b/api/controllers/console/apikey.py
@@ -11,7 +11,7 @@ from libs.login import current_account_with_tenant, login_required
from models.dataset import Dataset
from models.model import ApiToken, App
-from . import api, console_ns
+from . import console_ns
from .wraps import account_initialization_required, edit_permission_required, setup_required
api_key_fields = {
@@ -24,6 +24,12 @@ api_key_fields = {
api_key_list = {"data": fields.List(fields.Nested(api_key_fields), attribute="items")}
+api_key_item_model = console_ns.model("ApiKeyItem", api_key_fields)
+
+api_key_list_model = console_ns.model(
+ "ApiKeyList", {"data": fields.List(fields.Nested(api_key_item_model), attribute="items")}
+)
+
def _get_resource(resource_id, tenant_id, resource_model):
if resource_model == App:
@@ -52,7 +58,7 @@ class BaseApiKeyListResource(Resource):
token_prefix: str | None = None
max_keys = 10
- @marshal_with(api_key_list)
+ @marshal_with(api_key_list_model)
def get(self, resource_id):
assert self.resource_id_field is not None, "resource_id_field must be set"
resource_id = str(resource_id)
@@ -66,7 +72,7 @@ class BaseApiKeyListResource(Resource):
).all()
return {"items": keys}
- @marshal_with(api_key_fields)
+ @marshal_with(api_key_item_model)
@edit_permission_required
def post(self, resource_id):
assert self.resource_id_field is not None, "resource_id_field must be set"
@@ -104,14 +110,11 @@ class BaseApiKeyResource(Resource):
resource_model: type | None = None
resource_id_field: str | None = None
- def delete(self, resource_id, api_key_id):
+ def delete(self, resource_id: str, api_key_id: str):
assert self.resource_id_field is not None, "resource_id_field must be set"
- resource_id = str(resource_id)
- api_key_id = str(api_key_id)
current_user, current_tenant_id = current_account_with_tenant()
_get_resource(resource_id, current_tenant_id, self.resource_model)
- # The role of the current user in the ta table must be admin or owner
if not current_user.is_admin_or_owner:
raise Forbidden()
@@ -136,20 +139,20 @@ class BaseApiKeyResource(Resource):
@console_ns.route("/apps//api-keys")
class AppApiKeyListResource(BaseApiKeyListResource):
- @api.doc("get_app_api_keys")
- @api.doc(description="Get all API keys for an app")
- @api.doc(params={"resource_id": "App ID"})
- @api.response(200, "Success", api_key_list)
- def get(self, resource_id):
+ @console_ns.doc("get_app_api_keys")
+ @console_ns.doc(description="Get all API keys for an app")
+ @console_ns.doc(params={"resource_id": "App ID"})
+ @console_ns.response(200, "Success", api_key_list_model)
+ def get(self, resource_id): # type: ignore
"""Get all API keys for an app"""
return super().get(resource_id)
- @api.doc("create_app_api_key")
- @api.doc(description="Create a new API key for an app")
- @api.doc(params={"resource_id": "App ID"})
- @api.response(201, "API key created successfully", api_key_fields)
- @api.response(400, "Maximum keys exceeded")
- def post(self, resource_id):
+ @console_ns.doc("create_app_api_key")
+ @console_ns.doc(description="Create a new API key for an app")
+ @console_ns.doc(params={"resource_id": "App ID"})
+ @console_ns.response(201, "API key created successfully", api_key_item_model)
+ @console_ns.response(400, "Maximum keys exceeded")
+ def post(self, resource_id): # type: ignore
"""Create a new API key for an app"""
return super().post(resource_id)
@@ -161,10 +164,10 @@ class AppApiKeyListResource(BaseApiKeyListResource):
@console_ns.route("/apps//api-keys/")
class AppApiKeyResource(BaseApiKeyResource):
- @api.doc("delete_app_api_key")
- @api.doc(description="Delete an API key for an app")
- @api.doc(params={"resource_id": "App ID", "api_key_id": "API key ID"})
- @api.response(204, "API key deleted successfully")
+ @console_ns.doc("delete_app_api_key")
+ @console_ns.doc(description="Delete an API key for an app")
+ @console_ns.doc(params={"resource_id": "App ID", "api_key_id": "API key ID"})
+ @console_ns.response(204, "API key deleted successfully")
def delete(self, resource_id, api_key_id):
"""Delete an API key for an app"""
return super().delete(resource_id, api_key_id)
@@ -176,20 +179,20 @@ class AppApiKeyResource(BaseApiKeyResource):
@console_ns.route("/datasets//api-keys")
class DatasetApiKeyListResource(BaseApiKeyListResource):
- @api.doc("get_dataset_api_keys")
- @api.doc(description="Get all API keys for a dataset")
- @api.doc(params={"resource_id": "Dataset ID"})
- @api.response(200, "Success", api_key_list)
- def get(self, resource_id):
+ @console_ns.doc("get_dataset_api_keys")
+ @console_ns.doc(description="Get all API keys for a dataset")
+ @console_ns.doc(params={"resource_id": "Dataset ID"})
+ @console_ns.response(200, "Success", api_key_list_model)
+ def get(self, resource_id): # type: ignore
"""Get all API keys for a dataset"""
return super().get(resource_id)
- @api.doc("create_dataset_api_key")
- @api.doc(description="Create a new API key for a dataset")
- @api.doc(params={"resource_id": "Dataset ID"})
- @api.response(201, "API key created successfully", api_key_fields)
- @api.response(400, "Maximum keys exceeded")
- def post(self, resource_id):
+ @console_ns.doc("create_dataset_api_key")
+ @console_ns.doc(description="Create a new API key for a dataset")
+ @console_ns.doc(params={"resource_id": "Dataset ID"})
+ @console_ns.response(201, "API key created successfully", api_key_item_model)
+ @console_ns.response(400, "Maximum keys exceeded")
+ def post(self, resource_id): # type: ignore
"""Create a new API key for a dataset"""
return super().post(resource_id)
@@ -201,10 +204,10 @@ class DatasetApiKeyListResource(BaseApiKeyListResource):
@console_ns.route("/datasets//api-keys/")
class DatasetApiKeyResource(BaseApiKeyResource):
- @api.doc("delete_dataset_api_key")
- @api.doc(description="Delete an API key for a dataset")
- @api.doc(params={"resource_id": "Dataset ID", "api_key_id": "API key ID"})
- @api.response(204, "API key deleted successfully")
+ @console_ns.doc("delete_dataset_api_key")
+ @console_ns.doc(description="Delete an API key for a dataset")
+ @console_ns.doc(params={"resource_id": "Dataset ID", "api_key_id": "API key ID"})
+ @console_ns.response(204, "API key deleted successfully")
def delete(self, resource_id, api_key_id):
"""Delete an API key for a dataset"""
return super().delete(resource_id, api_key_id)
diff --git a/api/controllers/console/app/advanced_prompt_template.py b/api/controllers/console/app/advanced_prompt_template.py
index 075345d860..3bd61feb44 100644
--- a/api/controllers/console/app/advanced_prompt_template.py
+++ b/api/controllers/console/app/advanced_prompt_template.py
@@ -1,32 +1,39 @@
-from flask_restx import Resource, fields, reqparse
+from flask import request
+from flask_restx import Resource, fields
+from pydantic import BaseModel, Field
-from controllers.console import api, console_ns
+from controllers.console import console_ns
from controllers.console.wraps import account_initialization_required, setup_required
from libs.login import login_required
from services.advanced_prompt_template_service import AdvancedPromptTemplateService
-parser = (
- reqparse.RequestParser()
- .add_argument("app_mode", type=str, required=True, location="args", help="Application mode")
- .add_argument("model_mode", type=str, required=True, location="args", help="Model mode")
- .add_argument("has_context", type=str, required=False, default="true", location="args", help="Whether has context")
- .add_argument("model_name", type=str, required=True, location="args", help="Model name")
+
+class AdvancedPromptTemplateQuery(BaseModel):
+ app_mode: str = Field(..., description="Application mode")
+ model_mode: str = Field(..., description="Model mode")
+ has_context: str = Field(default="true", description="Whether has context")
+ model_name: str = Field(..., description="Model name")
+
+
+console_ns.schema_model(
+ AdvancedPromptTemplateQuery.__name__,
+ AdvancedPromptTemplateQuery.model_json_schema(ref_template="#/definitions/{model}"),
)
@console_ns.route("/app/prompt-templates")
class AdvancedPromptTemplateList(Resource):
- @api.doc("get_advanced_prompt_templates")
- @api.doc(description="Get advanced prompt templates based on app mode and model configuration")
- @api.expect(parser)
- @api.response(
+ @console_ns.doc("get_advanced_prompt_templates")
+ @console_ns.doc(description="Get advanced prompt templates based on app mode and model configuration")
+ @console_ns.expect(console_ns.models[AdvancedPromptTemplateQuery.__name__])
+ @console_ns.response(
200, "Prompt templates retrieved successfully", fields.List(fields.Raw(description="Prompt template data"))
)
- @api.response(400, "Invalid request parameters")
+ @console_ns.response(400, "Invalid request parameters")
@setup_required
@login_required
@account_initialization_required
def get(self):
- args = parser.parse_args()
+ args = AdvancedPromptTemplateQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore
- return AdvancedPromptTemplateService.get_prompt(args)
+ return AdvancedPromptTemplateService.get_prompt(args.model_dump())
diff --git a/api/controllers/console/app/agent.py b/api/controllers/console/app/agent.py
index fde28fdb98..cfdb9cf417 100644
--- a/api/controllers/console/app/agent.py
+++ b/api/controllers/console/app/agent.py
@@ -1,6 +1,8 @@
-from flask_restx import Resource, fields, reqparse
+from flask import request
+from flask_restx import Resource, fields
+from pydantic import BaseModel, Field, field_validator
-from controllers.console import api, console_ns
+from controllers.console import console_ns
from controllers.console.app.wraps import get_app_model
from controllers.console.wraps import account_initialization_required, setup_required
from libs.helper import uuid_value
@@ -8,27 +10,40 @@ from libs.login import login_required
from models.model import AppMode
from services.agent_service import AgentService
-parser = (
- reqparse.RequestParser()
- .add_argument("message_id", type=uuid_value, required=True, location="args", help="Message UUID")
- .add_argument("conversation_id", type=uuid_value, required=True, location="args", help="Conversation UUID")
+DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}"
+
+
+class AgentLogQuery(BaseModel):
+ message_id: str = Field(..., description="Message UUID")
+ conversation_id: str = Field(..., description="Conversation UUID")
+
+ @field_validator("message_id", "conversation_id")
+ @classmethod
+ def validate_uuid(cls, value: str) -> str:
+ return uuid_value(value)
+
+
+console_ns.schema_model(
+ AgentLogQuery.__name__, AgentLogQuery.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0)
)
@console_ns.route("/apps//agent/logs")
class AgentLogApi(Resource):
- @api.doc("get_agent_logs")
- @api.doc(description="Get agent execution logs for an application")
- @api.doc(params={"app_id": "Application ID"})
- @api.expect(parser)
- @api.response(200, "Agent logs retrieved successfully", fields.List(fields.Raw(description="Agent log entries")))
- @api.response(400, "Invalid request parameters")
+ @console_ns.doc("get_agent_logs")
+ @console_ns.doc(description="Get agent execution logs for an application")
+ @console_ns.doc(params={"app_id": "Application ID"})
+ @console_ns.expect(console_ns.models[AgentLogQuery.__name__])
+ @console_ns.response(
+ 200, "Agent logs retrieved successfully", fields.List(fields.Raw(description="Agent log entries"))
+ )
+ @console_ns.response(400, "Invalid request parameters")
@setup_required
@login_required
@account_initialization_required
@get_app_model(mode=[AppMode.AGENT_CHAT])
def get(self, app_model):
"""Get agent logs"""
- args = parser.parse_args()
+ args = AgentLogQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore
- return AgentService.get_agent_logs(app_model, args["conversation_id"], args["message_id"])
+ return AgentService.get_agent_logs(app_model, args.conversation_id, args.message_id)
diff --git a/api/controllers/console/app/annotation.py b/api/controllers/console/app/annotation.py
index bc4113b5c7..6a4c1528b0 100644
--- a/api/controllers/console/app/annotation.py
+++ b/api/controllers/console/app/annotation.py
@@ -1,12 +1,15 @@
-from typing import Literal
+from typing import Any, Literal
-from flask import request
-from flask_restx import Resource, fields, marshal, marshal_with, reqparse
+from flask import abort, make_response, request
+from flask_restx import Resource, fields, marshal, marshal_with
+from pydantic import BaseModel, Field, field_validator
from controllers.common.errors import NoFileUploadedError, TooManyFilesError
-from controllers.console import api, console_ns
+from controllers.console import console_ns
from controllers.console.wraps import (
account_initialization_required,
+ annotation_import_concurrency_limit,
+ annotation_import_rate_limit,
cloud_edition_billing_resource_check,
edit_permission_required,
setup_required,
@@ -15,29 +18,87 @@ from extensions.ext_redis import redis_client
from fields.annotation_fields import (
annotation_fields,
annotation_hit_history_fields,
+ build_annotation_model,
)
from libs.helper import uuid_value
from libs.login import login_required
from services.annotation_service import AppAnnotationService
+DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}"
+
+
+class AnnotationReplyPayload(BaseModel):
+ score_threshold: float = Field(..., description="Score threshold for annotation matching")
+ embedding_provider_name: str = Field(..., description="Embedding provider name")
+ embedding_model_name: str = Field(..., description="Embedding model name")
+
+
+class AnnotationSettingUpdatePayload(BaseModel):
+ score_threshold: float = Field(..., description="Score threshold")
+
+
+class AnnotationListQuery(BaseModel):
+ page: int = Field(default=1, ge=1, description="Page number")
+ limit: int = Field(default=20, ge=1, description="Page size")
+ keyword: str = Field(default="", description="Search keyword")
+
+
+class CreateAnnotationPayload(BaseModel):
+ message_id: str | None = Field(default=None, description="Message ID")
+ question: str | None = Field(default=None, description="Question text")
+ answer: str | None = Field(default=None, description="Answer text")
+ content: str | None = Field(default=None, description="Content text")
+ annotation_reply: dict[str, Any] | None = Field(default=None, description="Annotation reply data")
+
+ @field_validator("message_id")
+ @classmethod
+ def validate_message_id(cls, value: str | None) -> str | None:
+ if value is None:
+ return value
+ return uuid_value(value)
+
+
+class UpdateAnnotationPayload(BaseModel):
+ question: str | None = None
+ answer: str | None = None
+ content: str | None = None
+ annotation_reply: dict[str, Any] | None = None
+
+
+class AnnotationReplyStatusQuery(BaseModel):
+ action: Literal["enable", "disable"]
+
+
+class AnnotationFilePayload(BaseModel):
+ message_id: str = Field(..., description="Message ID")
+
+ @field_validator("message_id")
+ @classmethod
+ def validate_message_id(cls, value: str) -> str:
+ return uuid_value(value)
+
+
+def reg(model: type[BaseModel]) -> None:
+ console_ns.schema_model(model.__name__, model.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0))
+
+
+reg(AnnotationReplyPayload)
+reg(AnnotationSettingUpdatePayload)
+reg(AnnotationListQuery)
+reg(CreateAnnotationPayload)
+reg(UpdateAnnotationPayload)
+reg(AnnotationReplyStatusQuery)
+reg(AnnotationFilePayload)
+
@console_ns.route("/apps//annotation-reply/")
class AnnotationReplyActionApi(Resource):
- @api.doc("annotation_reply_action")
- @api.doc(description="Enable or disable annotation reply for an app")
- @api.doc(params={"app_id": "Application ID", "action": "Action to perform (enable/disable)"})
- @api.expect(
- api.model(
- "AnnotationReplyActionRequest",
- {
- "score_threshold": fields.Float(required=True, description="Score threshold for annotation matching"),
- "embedding_provider_name": fields.String(required=True, description="Embedding provider name"),
- "embedding_model_name": fields.String(required=True, description="Embedding model name"),
- },
- )
- )
- @api.response(200, "Action completed successfully")
- @api.response(403, "Insufficient permissions")
+ @console_ns.doc("annotation_reply_action")
+ @console_ns.doc(description="Enable or disable annotation reply for an app")
+ @console_ns.doc(params={"app_id": "Application ID", "action": "Action to perform (enable/disable)"})
+ @console_ns.expect(console_ns.models[AnnotationReplyPayload.__name__])
+ @console_ns.response(200, "Action completed successfully")
+ @console_ns.response(403, "Insufficient permissions")
@setup_required
@login_required
@account_initialization_required
@@ -45,15 +106,9 @@ class AnnotationReplyActionApi(Resource):
@edit_permission_required
def post(self, app_id, action: Literal["enable", "disable"]):
app_id = str(app_id)
- parser = (
- reqparse.RequestParser()
- .add_argument("score_threshold", required=True, type=float, location="json")
- .add_argument("embedding_provider_name", required=True, type=str, location="json")
- .add_argument("embedding_model_name", required=True, type=str, location="json")
- )
- args = parser.parse_args()
+ args = AnnotationReplyPayload.model_validate(console_ns.payload)
if action == "enable":
- result = AppAnnotationService.enable_app_annotation(args, app_id)
+ result = AppAnnotationService.enable_app_annotation(args.model_dump(), app_id)
elif action == "disable":
result = AppAnnotationService.disable_app_annotation(app_id)
return result, 200
@@ -61,11 +116,11 @@ class AnnotationReplyActionApi(Resource):
@console_ns.route("/apps//annotation-setting")
class AppAnnotationSettingDetailApi(Resource):
- @api.doc("get_annotation_setting")
- @api.doc(description="Get annotation settings for an app")
- @api.doc(params={"app_id": "Application ID"})
- @api.response(200, "Annotation settings retrieved successfully")
- @api.response(403, "Insufficient permissions")
+ @console_ns.doc("get_annotation_setting")
+ @console_ns.doc(description="Get annotation settings for an app")
+ @console_ns.doc(params={"app_id": "Application ID"})
+ @console_ns.response(200, "Annotation settings retrieved successfully")
+ @console_ns.response(403, "Insufficient permissions")
@setup_required
@login_required
@account_initialization_required
@@ -78,21 +133,12 @@ class AppAnnotationSettingDetailApi(Resource):
@console_ns.route("/apps//annotation-settings/")
class AppAnnotationSettingUpdateApi(Resource):
- @api.doc("update_annotation_setting")
- @api.doc(description="Update annotation settings for an app")
- @api.doc(params={"app_id": "Application ID", "annotation_setting_id": "Annotation setting ID"})
- @api.expect(
- api.model(
- "AnnotationSettingUpdateRequest",
- {
- "score_threshold": fields.Float(required=True, description="Score threshold"),
- "embedding_provider_name": fields.String(required=True, description="Embedding provider"),
- "embedding_model_name": fields.String(required=True, description="Embedding model"),
- },
- )
- )
- @api.response(200, "Settings updated successfully")
- @api.response(403, "Insufficient permissions")
+ @console_ns.doc("update_annotation_setting")
+ @console_ns.doc(description="Update annotation settings for an app")
+ @console_ns.doc(params={"app_id": "Application ID", "annotation_setting_id": "Annotation setting ID"})
+ @console_ns.expect(console_ns.models[AnnotationSettingUpdatePayload.__name__])
+ @console_ns.response(200, "Settings updated successfully")
+ @console_ns.response(403, "Insufficient permissions")
@setup_required
@login_required
@account_initialization_required
@@ -101,20 +147,19 @@ class AppAnnotationSettingUpdateApi(Resource):
app_id = str(app_id)
annotation_setting_id = str(annotation_setting_id)
- parser = reqparse.RequestParser().add_argument("score_threshold", required=True, type=float, location="json")
- args = parser.parse_args()
+ args = AnnotationSettingUpdatePayload.model_validate(console_ns.payload)
- result = AppAnnotationService.update_app_annotation_setting(app_id, annotation_setting_id, args)
+ result = AppAnnotationService.update_app_annotation_setting(app_id, annotation_setting_id, args.model_dump())
return result, 200
@console_ns.route("/apps//annotation-reply//status/")
class AnnotationReplyActionStatusApi(Resource):
- @api.doc("get_annotation_reply_action_status")
- @api.doc(description="Get status of annotation reply action job")
- @api.doc(params={"app_id": "Application ID", "job_id": "Job ID", "action": "Action type"})
- @api.response(200, "Job status retrieved successfully")
- @api.response(403, "Insufficient permissions")
+ @console_ns.doc("get_annotation_reply_action_status")
+ @console_ns.doc(description="Get status of annotation reply action job")
+ @console_ns.doc(params={"app_id": "Application ID", "job_id": "Job ID", "action": "Action type"})
+ @console_ns.response(200, "Job status retrieved successfully")
+ @console_ns.response(403, "Insufficient permissions")
@setup_required
@login_required
@account_initialization_required
@@ -138,25 +183,21 @@ class AnnotationReplyActionStatusApi(Resource):
@console_ns.route("/apps//annotations")
class AnnotationApi(Resource):
- @api.doc("list_annotations")
- @api.doc(description="Get annotations for an app with pagination")
- @api.doc(params={"app_id": "Application ID"})
- @api.expect(
- api.parser()
- .add_argument("page", type=int, location="args", default=1, help="Page number")
- .add_argument("limit", type=int, location="args", default=20, help="Page size")
- .add_argument("keyword", type=str, location="args", default="", help="Search keyword")
- )
- @api.response(200, "Annotations retrieved successfully")
- @api.response(403, "Insufficient permissions")
+ @console_ns.doc("list_annotations")
+ @console_ns.doc(description="Get annotations for an app with pagination")
+ @console_ns.doc(params={"app_id": "Application ID"})
+ @console_ns.expect(console_ns.models[AnnotationListQuery.__name__])
+ @console_ns.response(200, "Annotations retrieved successfully")
+ @console_ns.response(403, "Insufficient permissions")
@setup_required
@login_required
@account_initialization_required
@edit_permission_required
def get(self, app_id):
- page = request.args.get("page", default=1, type=int)
- limit = request.args.get("limit", default=20, type=int)
- keyword = request.args.get("keyword", default="", type=str)
+ args = AnnotationListQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore
+ page = args.page
+ limit = args.limit
+ keyword = args.keyword
app_id = str(app_id)
annotation_list, total = AppAnnotationService.get_annotation_list_by_app_id(app_id, page, limit, keyword)
@@ -169,23 +210,12 @@ class AnnotationApi(Resource):
}
return response, 200
- @api.doc("create_annotation")
- @api.doc(description="Create a new annotation for an app")
- @api.doc(params={"app_id": "Application ID"})
- @api.expect(
- api.model(
- "CreateAnnotationRequest",
- {
- "message_id": fields.String(description="Message ID (optional)"),
- "question": fields.String(description="Question text (required when message_id not provided)"),
- "answer": fields.String(description="Answer text (use 'answer' or 'content')"),
- "content": fields.String(description="Content text (use 'answer' or 'content')"),
- "annotation_reply": fields.Raw(description="Annotation reply data"),
- },
- )
- )
- @api.response(201, "Annotation created successfully", annotation_fields)
- @api.response(403, "Insufficient permissions")
+ @console_ns.doc("create_annotation")
+ @console_ns.doc(description="Create a new annotation for an app")
+ @console_ns.doc(params={"app_id": "Application ID"})
+ @console_ns.expect(console_ns.models[CreateAnnotationPayload.__name__])
+ @console_ns.response(201, "Annotation created successfully", build_annotation_model(console_ns))
+ @console_ns.response(403, "Insufficient permissions")
@setup_required
@login_required
@account_initialization_required
@@ -194,16 +224,9 @@ class AnnotationApi(Resource):
@edit_permission_required
def post(self, app_id):
app_id = str(app_id)
- parser = (
- reqparse.RequestParser()
- .add_argument("message_id", required=False, type=uuid_value, location="json")
- .add_argument("question", required=False, type=str, location="json")
- .add_argument("answer", required=False, type=str, location="json")
- .add_argument("content", required=False, type=str, location="json")
- .add_argument("annotation_reply", required=False, type=dict, location="json")
- )
- args = parser.parse_args()
- annotation = AppAnnotationService.up_insert_app_annotation_from_message(args, app_id)
+ args = CreateAnnotationPayload.model_validate(console_ns.payload)
+ data = args.model_dump(exclude_none=True)
+ annotation = AppAnnotationService.up_insert_app_annotation_from_message(data, app_id)
return annotation
@setup_required
@@ -235,11 +258,15 @@ class AnnotationApi(Resource):
@console_ns.route("/apps//annotations/export")
class AnnotationExportApi(Resource):
- @api.doc("export_annotations")
- @api.doc(description="Export all annotations for an app")
- @api.doc(params={"app_id": "Application ID"})
- @api.response(200, "Annotations exported successfully", fields.List(fields.Nested(annotation_fields)))
- @api.response(403, "Insufficient permissions")
+ @console_ns.doc("export_annotations")
+ @console_ns.doc(description="Export all annotations for an app with CSV injection protection")
+ @console_ns.doc(params={"app_id": "Application ID"})
+ @console_ns.response(
+ 200,
+ "Annotations exported successfully",
+ console_ns.model("AnnotationList", {"data": fields.List(fields.Nested(build_annotation_model(console_ns)))}),
+ )
+ @console_ns.response(403, "Insufficient permissions")
@setup_required
@login_required
@account_initialization_required
@@ -247,26 +274,25 @@ class AnnotationExportApi(Resource):
def get(self, app_id):
app_id = str(app_id)
annotation_list = AppAnnotationService.export_annotation_list_by_app_id(app_id)
- response = {"data": marshal(annotation_list, annotation_fields)}
- return response, 200
+ response_data = {"data": marshal(annotation_list, annotation_fields)}
+ # Create response with secure headers for CSV export
+ response = make_response(response_data, 200)
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
+ response.headers["X-Content-Type-Options"] = "nosniff"
-parser = (
- reqparse.RequestParser()
- .add_argument("question", required=True, type=str, location="json")
- .add_argument("answer", required=True, type=str, location="json")
-)
+ return response
@console_ns.route("/apps//annotations/")
class AnnotationUpdateDeleteApi(Resource):
- @api.doc("update_delete_annotation")
- @api.doc(description="Update or delete an annotation")
- @api.doc(params={"app_id": "Application ID", "annotation_id": "Annotation ID"})
- @api.response(200, "Annotation updated successfully", annotation_fields)
- @api.response(204, "Annotation deleted successfully")
- @api.response(403, "Insufficient permissions")
- @api.expect(parser)
+ @console_ns.doc("update_delete_annotation")
+ @console_ns.doc(description="Update or delete an annotation")
+ @console_ns.doc(params={"app_id": "Application ID", "annotation_id": "Annotation ID"})
+ @console_ns.response(200, "Annotation updated successfully", build_annotation_model(console_ns))
+ @console_ns.response(204, "Annotation deleted successfully")
+ @console_ns.response(403, "Insufficient permissions")
+ @console_ns.expect(console_ns.models[UpdateAnnotationPayload.__name__])
@setup_required
@login_required
@account_initialization_required
@@ -276,8 +302,10 @@ class AnnotationUpdateDeleteApi(Resource):
def post(self, app_id, annotation_id):
app_id = str(app_id)
annotation_id = str(annotation_id)
- args = parser.parse_args()
- annotation = AppAnnotationService.update_app_annotation_directly(args, app_id, annotation_id)
+ args = UpdateAnnotationPayload.model_validate(console_ns.payload)
+ annotation = AppAnnotationService.update_app_annotation_directly(
+ args.model_dump(exclude_none=True), app_id, annotation_id
+ )
return annotation
@setup_required
@@ -293,19 +321,26 @@ class AnnotationUpdateDeleteApi(Resource):
@console_ns.route("/apps//annotations/batch-import")
class AnnotationBatchImportApi(Resource):
- @api.doc("batch_import_annotations")
- @api.doc(description="Batch import annotations from CSV file")
- @api.doc(params={"app_id": "Application ID"})
- @api.response(200, "Batch import started successfully")
- @api.response(403, "Insufficient permissions")
- @api.response(400, "No file uploaded or too many files")
+ @console_ns.doc("batch_import_annotations")
+ @console_ns.doc(description="Batch import annotations from CSV file with rate limiting and security checks")
+ @console_ns.doc(params={"app_id": "Application ID"})
+ @console_ns.response(200, "Batch import started successfully")
+ @console_ns.response(403, "Insufficient permissions")
+ @console_ns.response(400, "No file uploaded or too many files")
+ @console_ns.response(413, "File too large")
+ @console_ns.response(429, "Too many requests or concurrent imports")
@setup_required
@login_required
@account_initialization_required
@cloud_edition_billing_resource_check("annotation")
+ @annotation_import_rate_limit
+ @annotation_import_concurrency_limit
@edit_permission_required
def post(self, app_id):
+ from configs import dify_config
+
app_id = str(app_id)
+
# check file
if "file" not in request.files:
raise NoFileUploadedError()
@@ -315,19 +350,37 @@ class AnnotationBatchImportApi(Resource):
# get file from request
file = request.files["file"]
+
# check file type
if not file.filename or not file.filename.lower().endswith(".csv"):
raise ValueError("Invalid file type. Only CSV files are allowed")
+
+ # Check file size before processing
+ file.seek(0, 2) # Seek to end of file
+ file_size = file.tell()
+ file.seek(0) # Reset to beginning
+
+ max_size_bytes = dify_config.ANNOTATION_IMPORT_FILE_SIZE_LIMIT * 1024 * 1024
+ if file_size > max_size_bytes:
+ abort(
+ 413,
+ f"File size exceeds maximum limit of {dify_config.ANNOTATION_IMPORT_FILE_SIZE_LIMIT}MB. "
+ f"Please reduce the file size and try again.",
+ )
+
+ if file_size == 0:
+ raise ValueError("The uploaded file is empty")
+
return AppAnnotationService.batch_import_app_annotations(app_id, file)
@console_ns.route("/apps//annotations/batch-import-status/")
class AnnotationBatchImportStatusApi(Resource):
- @api.doc("get_batch_import_status")
- @api.doc(description="Get status of batch import job")
- @api.doc(params={"app_id": "Application ID", "job_id": "Job ID"})
- @api.response(200, "Job status retrieved successfully")
- @api.response(403, "Insufficient permissions")
+ @console_ns.doc("get_batch_import_status")
+ @console_ns.doc(description="Get status of batch import job")
+ @console_ns.doc(params={"app_id": "Application ID", "job_id": "Job ID"})
+ @console_ns.response(200, "Job status retrieved successfully")
+ @console_ns.response(403, "Insufficient permissions")
@setup_required
@login_required
@account_initialization_required
@@ -350,18 +403,27 @@ class AnnotationBatchImportStatusApi(Resource):
@console_ns.route("/apps//annotations//hit-histories")
class AnnotationHitHistoryListApi(Resource):
- @api.doc("list_annotation_hit_histories")
- @api.doc(description="Get hit histories for an annotation")
- @api.doc(params={"app_id": "Application ID", "annotation_id": "Annotation ID"})
- @api.expect(
- api.parser()
+ @console_ns.doc("list_annotation_hit_histories")
+ @console_ns.doc(description="Get hit histories for an annotation")
+ @console_ns.doc(params={"app_id": "Application ID", "annotation_id": "Annotation ID"})
+ @console_ns.expect(
+ console_ns.parser()
.add_argument("page", type=int, location="args", default=1, help="Page number")
.add_argument("limit", type=int, location="args", default=20, help="Page size")
)
- @api.response(
- 200, "Hit histories retrieved successfully", fields.List(fields.Nested(annotation_hit_history_fields))
+ @console_ns.response(
+ 200,
+ "Hit histories retrieved successfully",
+ console_ns.model(
+ "AnnotationHitHistoryList",
+ {
+ "data": fields.List(
+ fields.Nested(console_ns.model("AnnotationHitHistoryItem", annotation_hit_history_fields))
+ )
+ },
+ ),
)
- @api.response(403, "Insufficient permissions")
+ @console_ns.response(403, "Insufficient permissions")
@setup_required
@login_required
@account_initialization_required
diff --git a/api/controllers/console/app/app.py b/api/controllers/console/app/app.py
index 0724a6355d..44cf89d6a9 100644
--- a/api/controllers/console/app/app.py
+++ b/api/controllers/console/app/app.py
@@ -1,25 +1,37 @@
+import re
import uuid
+from typing import Literal
-from flask_restx import Resource, fields, inputs, marshal, marshal_with, reqparse
+from flask import request
+from flask_restx import Resource, fields, marshal, marshal_with
+from pydantic import BaseModel, Field, field_validator
from sqlalchemy import select
from sqlalchemy.orm import Session
-from werkzeug.exceptions import BadRequest, Forbidden, abort
+from werkzeug.exceptions import BadRequest
-from controllers.console import api, console_ns
+from controllers.console import console_ns
from controllers.console.app.wraps import get_app_model
from controllers.console.wraps import (
account_initialization_required,
cloud_edition_billing_resource_check,
edit_permission_required,
enterprise_license_required,
+ is_admin_or_owner_required,
setup_required,
)
from core.ops.ops_trace_manager import OpsTraceManager
from core.workflow.enums import NodeType
from extensions.ext_database import db
-from fields.app_fields import app_detail_fields, app_detail_fields_with_site, app_pagination_fields
+from fields.app_fields import (
+ deleted_tool_fields,
+ model_config_fields,
+ model_config_partial_fields,
+ site_fields,
+ tag_fields,
+)
+from fields.workflow_fields import workflow_partial_fields as _workflow_partial_fields_dict
+from libs.helper import AppIconUrlField, TimestampField
from libs.login import current_account_with_tenant, login_required
-from libs.validators import validate_description_length
from models import App, Workflow
from services.app_dsl_service import AppDslService, ImportMode
from services.app_service import AppService
@@ -27,29 +39,286 @@ from services.enterprise.enterprise_service import EnterpriseService
from services.feature_service import FeatureService
ALLOW_CREATE_APP_MODES = ["chat", "agent-chat", "advanced-chat", "workflow", "completion"]
+DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}"
+
+
+class AppListQuery(BaseModel):
+ page: int = Field(default=1, ge=1, le=99999, description="Page number (1-99999)")
+ limit: int = Field(default=20, ge=1, le=100, description="Page size (1-100)")
+ mode: Literal["completion", "chat", "advanced-chat", "workflow", "agent-chat", "channel", "all"] = Field(
+ default="all", description="App mode filter"
+ )
+ name: str | None = Field(default=None, description="Filter by app name")
+ tag_ids: list[str] | None = Field(default=None, description="Comma-separated tag IDs")
+ is_created_by_me: bool | None = Field(default=None, description="Filter by creator")
+
+ @field_validator("tag_ids", mode="before")
+ @classmethod
+ def validate_tag_ids(cls, value: str | list[str] | None) -> list[str] | None:
+ if not value:
+ return None
+
+ if isinstance(value, str):
+ items = [item.strip() for item in value.split(",") if item.strip()]
+ elif isinstance(value, list):
+ items = [str(item).strip() for item in value if item and str(item).strip()]
+ else:
+ raise TypeError("Unsupported tag_ids type.")
+
+ if not items:
+ return None
+
+ try:
+ return [str(uuid.UUID(item)) for item in items]
+ except ValueError as exc:
+ raise ValueError("Invalid UUID format in tag_ids.") from exc
+
+
+# XSS prevention: patterns that could lead to XSS attacks
+# Includes: script tags, iframe tags, javascript: protocol, SVG with onload, etc.
+_XSS_PATTERNS = [
+ r"", # Script tags
+ r")", # Iframe tags (including self-closing)
+ r"javascript:", # JavaScript protocol
+ r"