diff --git a/.claude/settings.json b/.claude/settings.json new file mode 100644 index 0000000000..7d42234cae --- /dev/null +++ b/.claude/settings.json @@ -0,0 +1,8 @@ +{ + "enabledPlugins": { + "feature-dev@claude-plugins-official": true, + "context7@claude-plugins-official": true, + "typescript-lsp@claude-plugins-official": true, + "pyright-lsp@claude-plugins-official": true + } +} diff --git a/.claude/settings.json.example b/.claude/settings.json.example deleted file mode 100644 index 1149895340..0000000000 --- a/.claude/settings.json.example +++ /dev/null @@ -1,19 +0,0 @@ -{ - "permissions": { - "allow": [], - "deny": [] - }, - "env": { - "__comment": "Environment variables for MCP servers. Override in .claude/settings.local.json with actual values.", - "GITHUB_PERSONAL_ACCESS_TOKEN": "ghp_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" - }, - "enabledMcpjsonServers": [ - "context7", - "sequential-thinking", - "github", - "fetch", - "playwright", - "ide" - ], - "enableAllProjectMcpServers": true - } \ No newline at end of file diff --git a/.claude/skills/component-refactoring/SKILL.md b/.claude/skills/component-refactoring/SKILL.md new file mode 100644 index 0000000000..ea695ea442 --- /dev/null +++ b/.claude/skills/component-refactoring/SKILL.md @@ -0,0 +1,483 @@ +--- +name: component-refactoring +description: Refactor high-complexity React components in Dify frontend. Use when `pnpm analyze-component --json` shows complexity > 50 or lineCount > 300, when the user asks for code splitting, hook extraction, or complexity reduction, or when `pnpm analyze-component` warns to refactor before testing; avoid for simple/well-structured components, third-party wrappers, or when the user explicitly wants testing without refactoring. +--- + +# Dify Component Refactoring Skill + +Refactor high-complexity React components in the Dify frontend codebase with the patterns and workflow below. + +> **Complexity Threshold**: Components with complexity > 50 (measured by `pnpm analyze-component`) should be refactored before testing. + +## Quick Reference + +### Commands (run from `web/`) + +Use paths relative to `web/` (e.g., `app/components/...`). +Use `refactor-component` for refactoring prompts and `analyze-component` for testing prompts and metrics. + +```bash +cd web + +# Generate refactoring prompt +pnpm refactor-component + +# Output refactoring analysis as JSON +pnpm refactor-component --json + +# Generate testing prompt (after refactoring) +pnpm analyze-component + +# Output testing analysis as JSON +pnpm analyze-component --json +``` + +### Complexity Analysis + +```bash +# Analyze component complexity +pnpm analyze-component --json + +# Key metrics to check: +# - complexity: normalized score 0-100 (target < 50) +# - maxComplexity: highest single function complexity +# - lineCount: total lines (target < 300) +``` + +### Complexity Score Interpretation + +| Score | Level | Action | +|-------|-------|--------| +| 0-25 | 🟢 Simple | Ready for testing | +| 26-50 | 🟔 Medium | Consider minor refactoring | +| 51-75 | 🟠 Complex | **Refactor before testing** | +| 76-100 | šŸ”“ Very Complex | **Must refactor** | + +## Core Refactoring Patterns + +### Pattern 1: Extract Custom Hooks + +**When**: Component has complex state management, multiple `useState`/`useEffect`, or business logic mixed with UI. + +**Dify Convention**: Place hooks in a `hooks/` subdirectory or alongside the component as `use-.ts`. + +```typescript +// āŒ Before: Complex state logic in component +const Configuration: FC = () => { + const [modelConfig, setModelConfig] = useState(...) + const [datasetConfigs, setDatasetConfigs] = useState(...) + const [completionParams, setCompletionParams] = useState({}) + + // 50+ lines of state management logic... + + return
...
+} + +// āœ… After: Extract to custom hook +// hooks/use-model-config.ts +export const useModelConfig = (appId: string) => { + const [modelConfig, setModelConfig] = useState(...) + const [completionParams, setCompletionParams] = useState({}) + + // Related state management logic here + + return { modelConfig, setModelConfig, completionParams, setCompletionParams } +} + +// Component becomes cleaner +const Configuration: FC = () => { + const { modelConfig, setModelConfig } = useModelConfig(appId) + return
...
+} +``` + +**Dify Examples**: +- `web/app/components/app/configuration/hooks/use-advanced-prompt-config.ts` +- `web/app/components/app/configuration/debug/hooks.tsx` +- `web/app/components/workflow/hooks/use-workflow.ts` + +### Pattern 2: Extract Sub-Components + +**When**: Single component has multiple UI sections, conditional rendering blocks, or repeated patterns. + +**Dify Convention**: Place sub-components in subdirectories or as separate files in the same directory. + +```typescript +// āŒ Before: Monolithic JSX with multiple sections +const AppInfo = () => { + return ( +
+ {/* 100 lines of header UI */} + {/* 100 lines of operations UI */} + {/* 100 lines of modals */} +
+ ) +} + +// āœ… After: Split into focused components +// app-info/ +// ā”œā”€ā”€ index.tsx (orchestration only) +// ā”œā”€ā”€ app-header.tsx (header UI) +// ā”œā”€ā”€ app-operations.tsx (operations UI) +// └── app-modals.tsx (modal management) + +const AppInfo = () => { + const { showModal, setShowModal } = useAppInfoModals() + + return ( +
+ + + setShowModal(null)} /> +
+ ) +} +``` + +**Dify Examples**: +- `web/app/components/app/configuration/` directory structure +- `web/app/components/workflow/nodes/` per-node organization + +### Pattern 3: Simplify Conditional Logic + +**When**: Deep nesting (> 3 levels), complex ternaries, or multiple `if/else` chains. + +```typescript +// āŒ Before: Deeply nested conditionals +const Template = useMemo(() => { + if (appDetail?.mode === AppModeEnum.CHAT) { + switch (locale) { + case LanguagesSupported[1]: + return + case LanguagesSupported[7]: + return + default: + return + } + } + if (appDetail?.mode === AppModeEnum.ADVANCED_CHAT) { + // Another 15 lines... + } + // More conditions... +}, [appDetail, locale]) + +// āœ… After: Use lookup tables + early returns +const TEMPLATE_MAP = { + [AppModeEnum.CHAT]: { + [LanguagesSupported[1]]: TemplateChatZh, + [LanguagesSupported[7]]: TemplateChatJa, + default: TemplateChatEn, + }, + [AppModeEnum.ADVANCED_CHAT]: { + [LanguagesSupported[1]]: TemplateAdvancedChatZh, + // ... + }, +} + +const Template = useMemo(() => { + const modeTemplates = TEMPLATE_MAP[appDetail?.mode] + if (!modeTemplates) return null + + const TemplateComponent = modeTemplates[locale] || modeTemplates.default + return +}, [appDetail, locale]) +``` + +### Pattern 4: Extract API/Data Logic + +**When**: Component directly handles API calls, data transformation, or complex async operations. + +**Dify Convention**: Use `@tanstack/react-query` hooks from `web/service/use-*.ts` or create custom data hooks. Project is migrating from SWR to React Query. + +```typescript +// āŒ Before: API logic in component +const MCPServiceCard = () => { + const [basicAppConfig, setBasicAppConfig] = useState({}) + + useEffect(() => { + if (isBasicApp && appId) { + (async () => { + const res = await fetchAppDetail({ url: '/apps', id: appId }) + setBasicAppConfig(res?.model_config || {}) + })() + } + }, [appId, isBasicApp]) + + // More API-related logic... +} + +// āœ… After: Extract to data hook using React Query +// use-app-config.ts +import { useQuery } from '@tanstack/react-query' +import { get } from '@/service/base' + +const NAME_SPACE = 'appConfig' + +export const useAppConfig = (appId: string, isBasicApp: boolean) => { + return useQuery({ + enabled: isBasicApp && !!appId, + queryKey: [NAME_SPACE, 'detail', appId], + queryFn: () => get(`/apps/${appId}`), + select: data => data?.model_config || {}, + }) +} + +// Component becomes cleaner +const MCPServiceCard = () => { + const { data: config, isLoading } = useAppConfig(appId, isBasicApp) + // UI only +} +``` + +**React Query Best Practices in Dify**: +- Define `NAME_SPACE` for query key organization +- Use `enabled` option for conditional fetching +- Use `select` for data transformation +- Export invalidation hooks: `useInvalidXxx` + +**Dify Examples**: +- `web/service/use-workflow.ts` +- `web/service/use-common.ts` +- `web/service/knowledge/use-dataset.ts` +- `web/service/knowledge/use-document.ts` + +### Pattern 5: Extract Modal/Dialog Management + +**When**: Component manages multiple modals with complex open/close states. + +**Dify Convention**: Modals should be extracted with their state management. + +```typescript +// āŒ Before: Multiple modal states in component +const AppInfo = () => { + const [showEditModal, setShowEditModal] = useState(false) + const [showDuplicateModal, setShowDuplicateModal] = useState(false) + const [showConfirmDelete, setShowConfirmDelete] = useState(false) + const [showSwitchModal, setShowSwitchModal] = useState(false) + const [showImportDSLModal, setShowImportDSLModal] = useState(false) + // 5+ more modal states... +} + +// āœ… After: Extract to modal management hook +type ModalType = 'edit' | 'duplicate' | 'delete' | 'switch' | 'import' | null + +const useAppInfoModals = () => { + const [activeModal, setActiveModal] = useState(null) + + const openModal = useCallback((type: ModalType) => setActiveModal(type), []) + const closeModal = useCallback(() => setActiveModal(null), []) + + return { + activeModal, + openModal, + closeModal, + isOpen: (type: ModalType) => activeModal === type, + } +} +``` + +### Pattern 6: Extract Form Logic + +**When**: Complex form validation, submission handling, or field transformation. + +**Dify Convention**: Use `@tanstack/react-form` patterns from `web/app/components/base/form/`. + +```typescript +// āœ… Use existing form infrastructure +import { useAppForm } from '@/app/components/base/form' + +const ConfigForm = () => { + const form = useAppForm({ + defaultValues: { name: '', description: '' }, + onSubmit: handleSubmit, + }) + + return ... +} +``` + +## Dify-Specific Refactoring Guidelines + +### 1. Context Provider Extraction + +**When**: Component provides complex context values with multiple states. + +```typescript +// āŒ Before: Large context value object +const value = { + appId, isAPIKeySet, isTrailFinished, mode, modelModeType, + promptMode, isAdvancedMode, isAgent, isOpenAI, isFunctionCall, + // 50+ more properties... +} +return ... + +// āœ… After: Split into domain-specific contexts + + + + {children} + + + +``` + +**Dify Reference**: `web/context/` directory structure + +### 2. Workflow Node Components + +**When**: Refactoring workflow node components (`web/app/components/workflow/nodes/`). + +**Conventions**: +- Keep node logic in `use-interactions.ts` +- Extract panel UI to separate files +- Use `_base` components for common patterns + +``` +nodes// + ā”œā”€ā”€ index.tsx # Node registration + ā”œā”€ā”€ node.tsx # Node visual component + ā”œā”€ā”€ panel.tsx # Configuration panel + ā”œā”€ā”€ use-interactions.ts # Node-specific hooks + └── types.ts # Type definitions +``` + +### 3. Configuration Components + +**When**: Refactoring app configuration components. + +**Conventions**: +- Separate config sections into subdirectories +- Use existing patterns from `web/app/components/app/configuration/` +- Keep feature toggles in dedicated components + +### 4. Tool/Plugin Components + +**When**: Refactoring tool-related components (`web/app/components/tools/`). + +**Conventions**: +- Follow existing modal patterns +- Use service hooks from `web/service/use-tools.ts` +- Keep provider-specific logic isolated + +## Refactoring Workflow + +### Step 1: Generate Refactoring Prompt + +```bash +pnpm refactor-component +``` + +This command will: +- Analyze component complexity and features +- Identify specific refactoring actions needed +- Generate a prompt for AI assistant (auto-copied to clipboard on macOS) +- Provide detailed requirements based on detected patterns + +### Step 2: Analyze Details + +```bash +pnpm analyze-component --json +``` + +Identify: +- Total complexity score +- Max function complexity +- Line count +- Features detected (state, effects, API, etc.) + +### Step 3: Plan + +Create a refactoring plan based on detected features: + +| Detected Feature | Refactoring Action | +|------------------|-------------------| +| `hasState: true` + `hasEffects: true` | Extract custom hook | +| `hasAPI: true` | Extract data/service hook | +| `hasEvents: true` (many) | Extract event handlers | +| `lineCount > 300` | Split into sub-components | +| `maxComplexity > 50` | Simplify conditional logic | + +### Step 4: Execute Incrementally + +1. **Extract one piece at a time** +2. **Run lint, type-check, and tests after each extraction** +3. **Verify functionality before next step** + +``` +For each extraction: + ā”Œā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā” + │ 1. Extract code │ + │ 2. Run: pnpm lint:fix │ + │ 3. Run: pnpm type-check:tsgo │ + │ 4. Run: pnpm test │ + │ 5. Test functionality manually │ + │ 6. PASS? → Next extraction │ + │ FAIL? → Fix before continuing │ + ā””ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”˜ +``` + +### Step 5: Verify + +After refactoring: + +```bash +# Re-run refactor command to verify improvements +pnpm refactor-component + +# If complexity < 25 and lines < 200, you'll see: +# āœ… COMPONENT IS WELL-STRUCTURED + +# For detailed metrics: +pnpm analyze-component --json + +# Target metrics: +# - complexity < 50 +# - lineCount < 300 +# - maxComplexity < 30 +``` + +## Common Mistakes to Avoid + +### āŒ Over-Engineering + +```typescript +// āŒ Too many tiny hooks +const useButtonText = () => useState('Click') +const useButtonDisabled = () => useState(false) +const useButtonLoading = () => useState(false) + +// āœ… Cohesive hook with related state +const useButtonState = () => { + const [text, setText] = useState('Click') + const [disabled, setDisabled] = useState(false) + const [loading, setLoading] = useState(false) + return { text, setText, disabled, setDisabled, loading, setLoading } +} +``` + +### āŒ Breaking Existing Patterns + +- Follow existing directory structures +- Maintain naming conventions +- Preserve export patterns for compatibility + +### āŒ Premature Abstraction + +- Only extract when there's clear complexity benefit +- Don't create abstractions for single-use code +- Keep refactored code in the same domain area + +## References + +### Dify Codebase Examples + +- **Hook extraction**: `web/app/components/app/configuration/hooks/` +- **Component splitting**: `web/app/components/app/configuration/` +- **Service hooks**: `web/service/use-*.ts` +- **Workflow patterns**: `web/app/components/workflow/hooks/` +- **Form patterns**: `web/app/components/base/form/` + +### Related Skills + +- `frontend-testing` - For testing refactored components +- `web/testing/testing.md` - Testing specification diff --git a/.claude/skills/component-refactoring/references/complexity-patterns.md b/.claude/skills/component-refactoring/references/complexity-patterns.md new file mode 100644 index 0000000000..5a0a268f38 --- /dev/null +++ b/.claude/skills/component-refactoring/references/complexity-patterns.md @@ -0,0 +1,493 @@ +# Complexity Reduction Patterns + +This document provides patterns for reducing cognitive complexity in Dify React components. + +## Understanding Complexity + +### SonarJS Cognitive Complexity + +The `pnpm analyze-component` tool uses SonarJS cognitive complexity metrics: + +- **Total Complexity**: Sum of all functions' complexity in the file +- **Max Complexity**: Highest single function complexity + +### What Increases Complexity + +| Pattern | Complexity Impact | +|---------|-------------------| +| `if/else` | +1 per branch | +| Nested conditions | +1 per nesting level | +| `switch/case` | +1 per case | +| `for/while/do` | +1 per loop | +| `&&`/`||` chains | +1 per operator | +| Nested callbacks | +1 per nesting level | +| `try/catch` | +1 per catch | +| Ternary expressions | +1 per nesting | + +## Pattern 1: Replace Conditionals with Lookup Tables + +**Before** (complexity: ~15): + +```typescript +const Template = useMemo(() => { + if (appDetail?.mode === AppModeEnum.CHAT) { + switch (locale) { + case LanguagesSupported[1]: + return + case LanguagesSupported[7]: + return + default: + return + } + } + if (appDetail?.mode === AppModeEnum.ADVANCED_CHAT) { + switch (locale) { + case LanguagesSupported[1]: + return + case LanguagesSupported[7]: + return + default: + return + } + } + if (appDetail?.mode === AppModeEnum.WORKFLOW) { + // Similar pattern... + } + return null +}, [appDetail, locale]) +``` + +**After** (complexity: ~3): + +```typescript +// Define lookup table outside component +const TEMPLATE_MAP: Record>> = { + [AppModeEnum.CHAT]: { + [LanguagesSupported[1]]: TemplateChatZh, + [LanguagesSupported[7]]: TemplateChatJa, + default: TemplateChatEn, + }, + [AppModeEnum.ADVANCED_CHAT]: { + [LanguagesSupported[1]]: TemplateAdvancedChatZh, + [LanguagesSupported[7]]: TemplateAdvancedChatJa, + default: TemplateAdvancedChatEn, + }, + [AppModeEnum.WORKFLOW]: { + [LanguagesSupported[1]]: TemplateWorkflowZh, + [LanguagesSupported[7]]: TemplateWorkflowJa, + default: TemplateWorkflowEn, + }, + // ... +} + +// Clean component logic +const Template = useMemo(() => { + if (!appDetail?.mode) return null + + const templates = TEMPLATE_MAP[appDetail.mode] + if (!templates) return null + + const TemplateComponent = templates[locale] ?? templates.default + return +}, [appDetail, locale]) +``` + +## Pattern 2: Use Early Returns + +**Before** (complexity: ~10): + +```typescript +const handleSubmit = () => { + if (isValid) { + if (hasChanges) { + if (isConnected) { + submitData() + } else { + showConnectionError() + } + } else { + showNoChangesMessage() + } + } else { + showValidationError() + } +} +``` + +**After** (complexity: ~4): + +```typescript +const handleSubmit = () => { + if (!isValid) { + showValidationError() + return + } + + if (!hasChanges) { + showNoChangesMessage() + return + } + + if (!isConnected) { + showConnectionError() + return + } + + submitData() +} +``` + +## Pattern 3: Extract Complex Conditions + +**Before** (complexity: high): + +```typescript +const canPublish = (() => { + if (mode !== AppModeEnum.COMPLETION) { + if (!isAdvancedMode) + return true + + if (modelModeType === ModelModeType.completion) { + if (!hasSetBlockStatus.history || !hasSetBlockStatus.query) + return false + return true + } + return true + } + return !promptEmpty +})() +``` + +**After** (complexity: lower): + +```typescript +// Extract to named functions +const canPublishInCompletionMode = () => !promptEmpty + +const canPublishInChatMode = () => { + if (!isAdvancedMode) return true + if (modelModeType !== ModelModeType.completion) return true + return hasSetBlockStatus.history && hasSetBlockStatus.query +} + +// Clean main logic +const canPublish = mode === AppModeEnum.COMPLETION + ? canPublishInCompletionMode() + : canPublishInChatMode() +``` + +## Pattern 4: Replace Chained Ternaries + +**Before** (complexity: ~5): + +```typescript +const statusText = serverActivated + ? t('status.running') + : serverPublished + ? t('status.inactive') + : appUnpublished + ? t('status.unpublished') + : t('status.notConfigured') +``` + +**After** (complexity: ~2): + +```typescript +const getStatusText = () => { + if (serverActivated) return t('status.running') + if (serverPublished) return t('status.inactive') + if (appUnpublished) return t('status.unpublished') + return t('status.notConfigured') +} + +const statusText = getStatusText() +``` + +Or use lookup: + +```typescript +const STATUS_TEXT_MAP = { + running: 'status.running', + inactive: 'status.inactive', + unpublished: 'status.unpublished', + notConfigured: 'status.notConfigured', +} as const + +const getStatusKey = (): keyof typeof STATUS_TEXT_MAP => { + if (serverActivated) return 'running' + if (serverPublished) return 'inactive' + if (appUnpublished) return 'unpublished' + return 'notConfigured' +} + +const statusText = t(STATUS_TEXT_MAP[getStatusKey()]) +``` + +## Pattern 5: Flatten Nested Loops + +**Before** (complexity: high): + +```typescript +const processData = (items: Item[]) => { + const results: ProcessedItem[] = [] + + for (const item of items) { + if (item.isValid) { + for (const child of item.children) { + if (child.isActive) { + for (const prop of child.properties) { + if (prop.value !== null) { + results.push({ + itemId: item.id, + childId: child.id, + propValue: prop.value, + }) + } + } + } + } + } + } + + return results +} +``` + +**After** (complexity: lower): + +```typescript +// Use functional approach +const processData = (items: Item[]) => { + return items + .filter(item => item.isValid) + .flatMap(item => + item.children + .filter(child => child.isActive) + .flatMap(child => + child.properties + .filter(prop => prop.value !== null) + .map(prop => ({ + itemId: item.id, + childId: child.id, + propValue: prop.value, + })) + ) + ) +} +``` + +## Pattern 6: Extract Event Handler Logic + +**Before** (complexity: high in component): + +```typescript +const Component = () => { + const handleSelect = (data: DataSet[]) => { + if (isEqual(data.map(item => item.id), dataSets.map(item => item.id))) { + hideSelectDataSet() + return + } + + formattingChangedDispatcher() + let newDatasets = data + if (data.find(item => !item.name)) { + const newSelected = produce(data, (draft) => { + data.forEach((item, index) => { + if (!item.name) { + const newItem = dataSets.find(i => i.id === item.id) + if (newItem) + draft[index] = newItem + } + }) + }) + setDataSets(newSelected) + newDatasets = newSelected + } + else { + setDataSets(data) + } + hideSelectDataSet() + + // 40 more lines of logic... + } + + return
...
+} +``` + +**After** (complexity: lower): + +```typescript +// Extract to hook or utility +const useDatasetSelection = (dataSets: DataSet[], setDataSets: SetState) => { + const normalizeSelection = (data: DataSet[]) => { + const hasUnloadedItem = data.some(item => !item.name) + if (!hasUnloadedItem) return data + + return produce(data, (draft) => { + data.forEach((item, index) => { + if (!item.name) { + const existing = dataSets.find(i => i.id === item.id) + if (existing) draft[index] = existing + } + }) + }) + } + + const hasSelectionChanged = (newData: DataSet[]) => { + return !isEqual( + newData.map(item => item.id), + dataSets.map(item => item.id) + ) + } + + return { normalizeSelection, hasSelectionChanged } +} + +// Component becomes cleaner +const Component = () => { + const { normalizeSelection, hasSelectionChanged } = useDatasetSelection(dataSets, setDataSets) + + const handleSelect = (data: DataSet[]) => { + if (!hasSelectionChanged(data)) { + hideSelectDataSet() + return + } + + formattingChangedDispatcher() + const normalized = normalizeSelection(data) + setDataSets(normalized) + hideSelectDataSet() + } + + return
...
+} +``` + +## Pattern 7: Reduce Boolean Logic Complexity + +**Before** (complexity: ~8): + +```typescript +const toggleDisabled = hasInsufficientPermissions + || appUnpublished + || missingStartNode + || triggerModeDisabled + || (isAdvancedApp && !currentWorkflow?.graph) + || (isBasicApp && !basicAppConfig.updated_at) +``` + +**After** (complexity: ~3): + +```typescript +// Extract meaningful boolean functions +const isAppReady = () => { + if (isAdvancedApp) return !!currentWorkflow?.graph + return !!basicAppConfig.updated_at +} + +const hasRequiredPermissions = () => { + return isCurrentWorkspaceEditor && !hasInsufficientPermissions +} + +const canToggle = () => { + if (!hasRequiredPermissions()) return false + if (!isAppReady()) return false + if (missingStartNode) return false + if (triggerModeDisabled) return false + return true +} + +const toggleDisabled = !canToggle() +``` + +## Pattern 8: Simplify useMemo/useCallback Dependencies + +**Before** (complexity: multiple recalculations): + +```typescript +const payload = useMemo(() => { + let parameters: Parameter[] = [] + let outputParameters: OutputParameter[] = [] + + if (!published) { + parameters = (inputs || []).map((item) => ({ + name: item.variable, + description: '', + form: 'llm', + required: item.required, + type: item.type, + })) + outputParameters = (outputs || []).map((item) => ({ + name: item.variable, + description: '', + type: item.value_type, + })) + } + else if (detail && detail.tool) { + parameters = (inputs || []).map((item) => ({ + // Complex transformation... + })) + outputParameters = (outputs || []).map((item) => ({ + // Complex transformation... + })) + } + + return { + icon: detail?.icon || icon, + label: detail?.label || name, + // ...more fields + } +}, [detail, published, workflowAppId, icon, name, description, inputs, outputs]) +``` + +**After** (complexity: separated concerns): + +```typescript +// Separate transformations +const useParameterTransform = (inputs: InputVar[], detail?: ToolDetail, published?: boolean) => { + return useMemo(() => { + if (!published) { + return inputs.map(item => ({ + name: item.variable, + description: '', + form: 'llm', + required: item.required, + type: item.type, + })) + } + + if (!detail?.tool) return [] + + return inputs.map(item => ({ + name: item.variable, + required: item.required, + type: item.type === 'paragraph' ? 'string' : item.type, + description: detail.tool.parameters.find(p => p.name === item.variable)?.llm_description || '', + form: detail.tool.parameters.find(p => p.name === item.variable)?.form || 'llm', + })) + }, [inputs, detail, published]) +} + +// Component uses hook +const parameters = useParameterTransform(inputs, detail, published) +const outputParameters = useOutputTransform(outputs, detail, published) + +const payload = useMemo(() => ({ + icon: detail?.icon || icon, + label: detail?.label || name, + parameters, + outputParameters, + // ... +}), [detail, icon, name, parameters, outputParameters]) +``` + +## Target Metrics After Refactoring + +| Metric | Target | +|--------|--------| +| Total Complexity | < 50 | +| Max Function Complexity | < 30 | +| Function Length | < 30 lines | +| Nesting Depth | ≤ 3 levels | +| Conditional Chains | ≤ 3 conditions | diff --git a/.claude/skills/component-refactoring/references/component-splitting.md b/.claude/skills/component-refactoring/references/component-splitting.md new file mode 100644 index 0000000000..78a3389100 --- /dev/null +++ b/.claude/skills/component-refactoring/references/component-splitting.md @@ -0,0 +1,477 @@ +# Component Splitting Patterns + +This document provides detailed guidance on splitting large components into smaller, focused components in Dify. + +## When to Split Components + +Split a component when you identify: + +1. **Multiple UI sections** - Distinct visual areas with minimal coupling that can be composed independently +1. **Conditional rendering blocks** - Large `{condition && }` blocks +1. **Repeated patterns** - Similar UI structures used multiple times +1. **300+ lines** - Component exceeds manageable size +1. **Modal clusters** - Multiple modals rendered in one component + +## Splitting Strategies + +### Strategy 1: Section-Based Splitting + +Identify visual sections and extract each as a component. + +```typescript +// āŒ Before: Monolithic component (500+ lines) +const ConfigurationPage = () => { + return ( +
+ {/* Header Section - 50 lines */} +
+

{t('configuration.title')}

+
+ {isAdvancedMode && Advanced} + + +
+
+ + {/* Config Section - 200 lines */} +
+ +
+ + {/* Debug Section - 150 lines */} +
+ +
+ + {/* Modals Section - 100 lines */} + {showSelectDataSet && } + {showHistoryModal && } + {showUseGPT4Confirm && } +
+ ) +} + +// āœ… After: Split into focused components +// configuration/ +// ā”œā”€ā”€ index.tsx (orchestration) +// ā”œā”€ā”€ configuration-header.tsx +// ā”œā”€ā”€ configuration-content.tsx +// ā”œā”€ā”€ configuration-debug.tsx +// └── configuration-modals.tsx + +// configuration-header.tsx +interface ConfigurationHeaderProps { + isAdvancedMode: boolean + onPublish: () => void +} + +const ConfigurationHeader: FC = ({ + isAdvancedMode, + onPublish, +}) => { + const { t } = useTranslation() + + return ( +
+

{t('configuration.title')}

+
+ {isAdvancedMode && Advanced} + + +
+
+ ) +} + +// index.tsx (orchestration only) +const ConfigurationPage = () => { + const { modelConfig, setModelConfig } = useModelConfig() + const { activeModal, openModal, closeModal } = useModalState() + + return ( +
+ + + {!isMobile && ( + + )} + +
+ ) +} +``` + +### Strategy 2: Conditional Block Extraction + +Extract large conditional rendering blocks. + +```typescript +// āŒ Before: Large conditional blocks +const AppInfo = () => { + return ( +
+ {expand ? ( +
+ {/* 100 lines of expanded view */} +
+ ) : ( +
+ {/* 50 lines of collapsed view */} +
+ )} +
+ ) +} + +// āœ… After: Separate view components +const AppInfoExpanded: FC = ({ appDetail, onAction }) => { + return ( +
+ {/* Clean, focused expanded view */} +
+ ) +} + +const AppInfoCollapsed: FC = ({ appDetail, onAction }) => { + return ( +
+ {/* Clean, focused collapsed view */} +
+ ) +} + +const AppInfo = () => { + return ( +
+ {expand + ? + : + } +
+ ) +} +``` + +### Strategy 3: Modal Extraction + +Extract modals with their trigger logic. + +```typescript +// āŒ Before: Multiple modals in one component +const AppInfo = () => { + const [showEdit, setShowEdit] = useState(false) + const [showDuplicate, setShowDuplicate] = useState(false) + const [showDelete, setShowDelete] = useState(false) + const [showSwitch, setShowSwitch] = useState(false) + + const onEdit = async (data) => { /* 20 lines */ } + const onDuplicate = async (data) => { /* 20 lines */ } + const onDelete = async () => { /* 15 lines */ } + + return ( +
+ {/* Main content */} + + {showEdit && setShowEdit(false)} />} + {showDuplicate && setShowDuplicate(false)} />} + {showDelete && setShowDelete(false)} />} + {showSwitch && } +
+ ) +} + +// āœ… After: Modal manager component +// app-info-modals.tsx +type ModalType = 'edit' | 'duplicate' | 'delete' | 'switch' | null + +interface AppInfoModalsProps { + appDetail: AppDetail + activeModal: ModalType + onClose: () => void + onSuccess: () => void +} + +const AppInfoModals: FC = ({ + appDetail, + activeModal, + onClose, + onSuccess, +}) => { + const handleEdit = async (data) => { /* logic */ } + const handleDuplicate = async (data) => { /* logic */ } + const handleDelete = async () => { /* logic */ } + + return ( + <> + {activeModal === 'edit' && ( + + )} + {activeModal === 'duplicate' && ( + + )} + {activeModal === 'delete' && ( + + )} + {activeModal === 'switch' && ( + + )} + + ) +} + +// Parent component +const AppInfo = () => { + const { activeModal, openModal, closeModal } = useModalState() + + return ( +
+ {/* Main content with openModal triggers */} + + + +
+ ) +} +``` + +### Strategy 4: List Item Extraction + +Extract repeated item rendering. + +```typescript +// āŒ Before: Inline item rendering +const OperationsList = () => { + return ( +
+ {operations.map(op => ( +
+ {op.icon} + {op.title} + {op.description} + + {op.badge && {op.badge}} + {/* More complex rendering... */} +
+ ))} +
+ ) +} + +// āœ… After: Extracted item component +interface OperationItemProps { + operation: Operation + onAction: (id: string) => void +} + +const OperationItem: FC = ({ operation, onAction }) => { + return ( +
+ {operation.icon} + {operation.title} + {operation.description} + + {operation.badge && {operation.badge}} +
+ ) +} + +const OperationsList = () => { + const handleAction = useCallback((id: string) => { + const op = operations.find(o => o.id === id) + op?.onClick() + }, [operations]) + + return ( +
+ {operations.map(op => ( + + ))} +
+ ) +} +``` + +## Directory Structure Patterns + +### Pattern A: Flat Structure (Simple Components) + +For components with 2-3 sub-components: + +``` +component-name/ + ā”œā”€ā”€ index.tsx # Main component + ā”œā”€ā”€ sub-component-a.tsx + ā”œā”€ā”€ sub-component-b.tsx + └── types.ts # Shared types +``` + +### Pattern B: Nested Structure (Complex Components) + +For components with many sub-components: + +``` +component-name/ + ā”œā”€ā”€ index.tsx # Main orchestration + ā”œā”€ā”€ types.ts # Shared types + ā”œā”€ā”€ hooks/ + │ ā”œā”€ā”€ use-feature-a.ts + │ └── use-feature-b.ts + ā”œā”€ā”€ components/ + │ ā”œā”€ā”€ header/ + │ │ └── index.tsx + │ ā”œā”€ā”€ content/ + │ │ └── index.tsx + │ └── modals/ + │ └── index.tsx + └── utils/ + └── helpers.ts +``` + +### Pattern C: Feature-Based Structure (Dify Standard) + +Following Dify's existing patterns: + +``` +configuration/ + ā”œā”€ā”€ index.tsx # Main page component + ā”œā”€ā”€ base/ # Base/shared components + │ ā”œā”€ā”€ feature-panel/ + │ ā”œā”€ā”€ group-name/ + │ └── operation-btn/ + ā”œā”€ā”€ config/ # Config section + │ ā”œā”€ā”€ index.tsx + │ ā”œā”€ā”€ agent/ + │ └── automatic/ + ā”œā”€ā”€ dataset-config/ # Dataset section + │ ā”œā”€ā”€ index.tsx + │ ā”œā”€ā”€ card-item/ + │ └── params-config/ + ā”œā”€ā”€ debug/ # Debug section + │ ā”œā”€ā”€ index.tsx + │ └── hooks.tsx + └── hooks/ # Shared hooks + └── use-advanced-prompt-config.ts +``` + +## Props Design + +### Minimal Props Principle + +Pass only what's needed: + +```typescript +// āŒ Bad: Passing entire objects when only some fields needed + + +// āœ… Good: Destructure to minimum required + +``` + +### Callback Props Pattern + +Use callbacks for child-to-parent communication: + +```typescript +// Parent +const Parent = () => { + const [value, setValue] = useState('') + + return ( + + ) +} + +// Child +interface ChildProps { + value: string + onChange: (value: string) => void + onSubmit: () => void +} + +const Child: FC = ({ value, onChange, onSubmit }) => { + return ( +
+ onChange(e.target.value)} /> + +
+ ) +} +``` + +### Render Props for Flexibility + +When sub-components need parent context: + +```typescript +interface ListProps { + items: T[] + renderItem: (item: T, index: number) => React.ReactNode + renderEmpty?: () => React.ReactNode +} + +function List({ items, renderItem, renderEmpty }: ListProps) { + if (items.length === 0 && renderEmpty) { + return <>{renderEmpty()} + } + + return ( +
+ {items.map((item, index) => renderItem(item, index))} +
+ ) +} + +// Usage + } + renderEmpty={() => } +/> +``` diff --git a/.claude/skills/component-refactoring/references/hook-extraction.md b/.claude/skills/component-refactoring/references/hook-extraction.md new file mode 100644 index 0000000000..a8d75deffd --- /dev/null +++ b/.claude/skills/component-refactoring/references/hook-extraction.md @@ -0,0 +1,317 @@ +# Hook Extraction Patterns + +This document provides detailed guidance on extracting custom hooks from complex components in Dify. + +## When to Extract Hooks + +Extract a custom hook when you identify: + +1. **Coupled state groups** - Multiple `useState` hooks that are always used together +1. **Complex effects** - `useEffect` with multiple dependencies or cleanup logic +1. **Business logic** - Data transformations, validations, or calculations +1. **Reusable patterns** - Logic that appears in multiple components + +## Extraction Process + +### Step 1: Identify State Groups + +Look for state variables that are logically related: + +```typescript +// āŒ These belong together - extract to hook +const [modelConfig, setModelConfig] = useState(...) +const [completionParams, setCompletionParams] = useState({}) +const [modelModeType, setModelModeType] = useState(...) + +// These are model-related state that should be in useModelConfig() +``` + +### Step 2: Identify Related Effects + +Find effects that modify the grouped state: + +```typescript +// āŒ These effects belong with the state above +useEffect(() => { + if (hasFetchedDetail && !modelModeType) { + const mode = currModel?.model_properties.mode + if (mode) { + const newModelConfig = produce(modelConfig, (draft) => { + draft.mode = mode + }) + setModelConfig(newModelConfig) + } + } +}, [textGenerationModelList, hasFetchedDetail, modelModeType, currModel]) +``` + +### Step 3: Create the Hook + +```typescript +// hooks/use-model-config.ts +import type { FormValue } from '@/app/components/header/account-setting/model-provider-page/declarations' +import type { ModelConfig } from '@/models/debug' +import { produce } from 'immer' +import { useEffect, useState } from 'react' +import { ModelModeType } from '@/types/app' + +interface UseModelConfigParams { + initialConfig?: Partial + currModel?: { model_properties?: { mode?: ModelModeType } } + hasFetchedDetail: boolean +} + +interface UseModelConfigReturn { + modelConfig: ModelConfig + setModelConfig: (config: ModelConfig) => void + completionParams: FormValue + setCompletionParams: (params: FormValue) => void + modelModeType: ModelModeType +} + +export const useModelConfig = ({ + initialConfig, + currModel, + hasFetchedDetail, +}: UseModelConfigParams): UseModelConfigReturn => { + const [modelConfig, setModelConfig] = useState({ + provider: 'langgenius/openai/openai', + model_id: 'gpt-3.5-turbo', + mode: ModelModeType.unset, + // ... default values + ...initialConfig, + }) + + const [completionParams, setCompletionParams] = useState({}) + + const modelModeType = modelConfig.mode + + // Fill old app data missing model mode + useEffect(() => { + if (hasFetchedDetail && !modelModeType) { + const mode = currModel?.model_properties?.mode + if (mode) { + setModelConfig(produce(modelConfig, (draft) => { + draft.mode = mode + })) + } + } + }, [hasFetchedDetail, modelModeType, currModel]) + + return { + modelConfig, + setModelConfig, + completionParams, + setCompletionParams, + modelModeType, + } +} +``` + +### Step 4: Update Component + +```typescript +// Before: 50+ lines of state management +const Configuration: FC = () => { + const [modelConfig, setModelConfig] = useState(...) + // ... lots of related state and effects +} + +// After: Clean component +const Configuration: FC = () => { + const { + modelConfig, + setModelConfig, + completionParams, + setCompletionParams, + modelModeType, + } = useModelConfig({ + currModel, + hasFetchedDetail, + }) + + // Component now focuses on UI +} +``` + +## Naming Conventions + +### Hook Names + +- Use `use` prefix: `useModelConfig`, `useDatasetConfig` +- Be specific: `useAdvancedPromptConfig` not `usePrompt` +- Include domain: `useWorkflowVariables`, `useMCPServer` + +### File Names + +- Kebab-case: `use-model-config.ts` +- Place in `hooks/` subdirectory when multiple hooks exist +- Place alongside component for single-use hooks + +### Return Type Names + +- Suffix with `Return`: `UseModelConfigReturn` +- Suffix params with `Params`: `UseModelConfigParams` + +## Common Hook Patterns in Dify + +### 1. Data Fetching Hook (React Query) + +```typescript +// Pattern: Use @tanstack/react-query for data fetching +import { useQuery, useQueryClient } from '@tanstack/react-query' +import { get } from '@/service/base' +import { useInvalid } from '@/service/use-base' + +const NAME_SPACE = 'appConfig' + +// Query keys for cache management +export const appConfigQueryKeys = { + detail: (appId: string) => [NAME_SPACE, 'detail', appId] as const, +} + +// Main data hook +export const useAppConfig = (appId: string) => { + return useQuery({ + enabled: !!appId, + queryKey: appConfigQueryKeys.detail(appId), + queryFn: () => get(`/apps/${appId}`), + select: data => data?.model_config || null, + }) +} + +// Invalidation hook for refreshing data +export const useInvalidAppConfig = () => { + return useInvalid([NAME_SPACE]) +} + +// Usage in component +const Component = () => { + const { data: config, isLoading, error, refetch } = useAppConfig(appId) + const invalidAppConfig = useInvalidAppConfig() + + const handleRefresh = () => { + invalidAppConfig() // Invalidates cache and triggers refetch + } + + return
...
+} +``` + +### 2. Form State Hook + +```typescript +// Pattern: Form state + validation + submission +export const useConfigForm = (initialValues: ConfigFormValues) => { + const [values, setValues] = useState(initialValues) + const [errors, setErrors] = useState>({}) + const [isSubmitting, setIsSubmitting] = useState(false) + + const validate = useCallback(() => { + const newErrors: Record = {} + if (!values.name) newErrors.name = 'Name is required' + setErrors(newErrors) + return Object.keys(newErrors).length === 0 + }, [values]) + + const handleChange = useCallback((field: string, value: any) => { + setValues(prev => ({ ...prev, [field]: value })) + }, []) + + const handleSubmit = useCallback(async (onSubmit: (values: ConfigFormValues) => Promise) => { + if (!validate()) return + setIsSubmitting(true) + try { + await onSubmit(values) + } finally { + setIsSubmitting(false) + } + }, [values, validate]) + + return { values, errors, isSubmitting, handleChange, handleSubmit } +} +``` + +### 3. Modal State Hook + +```typescript +// Pattern: Multiple modal management +type ModalType = 'edit' | 'delete' | 'duplicate' | null + +export const useModalState = () => { + const [activeModal, setActiveModal] = useState(null) + const [modalData, setModalData] = useState(null) + + const openModal = useCallback((type: ModalType, data?: any) => { + setActiveModal(type) + setModalData(data) + }, []) + + const closeModal = useCallback(() => { + setActiveModal(null) + setModalData(null) + }, []) + + return { + activeModal, + modalData, + openModal, + closeModal, + isOpen: useCallback((type: ModalType) => activeModal === type, [activeModal]), + } +} +``` + +### 4. Toggle/Boolean Hook + +```typescript +// Pattern: Boolean state with convenience methods +export const useToggle = (initialValue = false) => { + const [value, setValue] = useState(initialValue) + + const toggle = useCallback(() => setValue(v => !v), []) + const setTrue = useCallback(() => setValue(true), []) + const setFalse = useCallback(() => setValue(false), []) + + return [value, { toggle, setTrue, setFalse, set: setValue }] as const +} + +// Usage +const [isExpanded, { toggle, setTrue: expand, setFalse: collapse }] = useToggle() +``` + +## Testing Extracted Hooks + +After extraction, test hooks in isolation: + +```typescript +// use-model-config.spec.ts +import { renderHook, act } from '@testing-library/react' +import { useModelConfig } from './use-model-config' + +describe('useModelConfig', () => { + it('should initialize with default values', () => { + const { result } = renderHook(() => useModelConfig({ + hasFetchedDetail: false, + })) + + expect(result.current.modelConfig.provider).toBe('langgenius/openai/openai') + expect(result.current.modelModeType).toBe(ModelModeType.unset) + }) + + it('should update model config', () => { + const { result } = renderHook(() => useModelConfig({ + hasFetchedDetail: true, + })) + + act(() => { + result.current.setModelConfig({ + ...result.current.modelConfig, + model_id: 'gpt-4', + }) + }) + + expect(result.current.modelConfig.model_id).toBe('gpt-4') + }) +}) +``` diff --git a/.claude/skills/frontend-testing/SKILL.md b/.claude/skills/frontend-testing/SKILL.md index 65602c92eb..dd9677a78e 100644 --- a/.claude/skills/frontend-testing/SKILL.md +++ b/.claude/skills/frontend-testing/SKILL.md @@ -318,5 +318,5 @@ For more detailed information, refer to: - `web/vitest.config.ts` - Vitest configuration - `web/vitest.setup.ts` - Test environment setup -- `web/testing/analyze-component.js` - Component analysis tool +- `web/scripts/analyze-component.js` - Component analysis tool - Modules are not mocked automatically. Global mocks live in `web/vitest.setup.ts` (for example `react-i18next`, `next/image`); mock other modules like `ky` or `mime` locally in test files. diff --git a/.mcp.json b/.mcp.json deleted file mode 100644 index 8eceaf9ead..0000000000 --- a/.mcp.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "mcpServers": { - "context7": { - "type": "http", - "url": "https://mcp.context7.com/mcp" - }, - "sequential-thinking": { - "type": "stdio", - "command": "npx", - "args": ["-y", "@modelcontextprotocol/server-sequential-thinking"], - "env": {} - }, - "github": { - "type": "stdio", - "command": "npx", - "args": ["-y", "@modelcontextprotocol/server-github"], - "env": { - "GITHUB_PERSONAL_ACCESS_TOKEN": "${GITHUB_PERSONAL_ACCESS_TOKEN}" - } - }, - "fetch": { - "type": "stdio", - "command": "uvx", - "args": ["mcp-server-fetch"], - "env": {} - }, - "playwright": { - "type": "stdio", - "command": "npx", - "args": ["-y", "@playwright/mcp@latest"], - "env": {} - } - } - } \ No newline at end of file diff --git a/api/controllers/common/file_response.py b/api/controllers/common/file_response.py new file mode 100644 index 0000000000..ca8ea3d52e --- /dev/null +++ b/api/controllers/common/file_response.py @@ -0,0 +1,57 @@ +import os +from email.message import Message +from urllib.parse import quote + +from flask import Response + +HTML_MIME_TYPES = frozenset({"text/html", "application/xhtml+xml"}) +HTML_EXTENSIONS = frozenset({"html", "htm"}) + + +def _normalize_mime_type(mime_type: str | None) -> str: + if not mime_type: + return "" + message = Message() + message["Content-Type"] = mime_type + return message.get_content_type().strip().lower() + + +def _is_html_extension(extension: str | None) -> bool: + if not extension: + return False + return extension.lstrip(".").lower() in HTML_EXTENSIONS + + +def is_html_content(mime_type: str | None, filename: str | None, extension: str | None = None) -> bool: + normalized_mime_type = _normalize_mime_type(mime_type) + if normalized_mime_type in HTML_MIME_TYPES: + return True + + if _is_html_extension(extension): + return True + + if filename: + return _is_html_extension(os.path.splitext(filename)[1]) + + return False + + +def enforce_download_for_html( + response: Response, + *, + mime_type: str | None, + filename: str | None, + extension: str | None = None, +) -> bool: + if not is_html_content(mime_type, filename, extension): + return False + + if filename: + encoded_filename = quote(filename) + response.headers["Content-Disposition"] = f"attachment; filename*=UTF-8''{encoded_filename}" + else: + response.headers["Content-Disposition"] = "attachment" + + response.headers["Content-Type"] = "application/octet-stream" + response.headers["X-Content-Type-Options"] = "nosniff" + return True diff --git a/api/controllers/console/billing/billing.py b/api/controllers/console/billing/billing.py index 7f907dc420..ac039f9c5d 100644 --- a/api/controllers/console/billing/billing.py +++ b/api/controllers/console/billing/billing.py @@ -1,8 +1,9 @@ import base64 +from typing import Literal from flask import request from flask_restx import Resource, fields -from pydantic import BaseModel, Field, field_validator +from pydantic import BaseModel, Field from werkzeug.exceptions import BadRequest from controllers.console import console_ns @@ -15,22 +16,8 @@ DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" class SubscriptionQuery(BaseModel): - plan: str = Field(..., description="Subscription plan") - interval: str = Field(..., description="Billing interval") - - @field_validator("plan") - @classmethod - def validate_plan(cls, value: str) -> str: - if value not in [CloudPlan.PROFESSIONAL, CloudPlan.TEAM]: - raise ValueError("Invalid plan") - return value - - @field_validator("interval") - @classmethod - def validate_interval(cls, value: str) -> str: - if value not in {"month", "year"}: - raise ValueError("Invalid interval") - return value + plan: Literal[CloudPlan.PROFESSIONAL, CloudPlan.TEAM] = Field(..., description="Subscription plan") + interval: Literal["month", "year"] = Field(..., description="Billing interval") class PartnerTenantsPayload(BaseModel): diff --git a/api/controllers/console/explore/message.py b/api/controllers/console/explore/message.py index 229b7c8865..d596d60b36 100644 --- a/api/controllers/console/explore/message.py +++ b/api/controllers/console/explore/message.py @@ -1,6 +1,5 @@ import logging from typing import Literal -from uuid import UUID from flask import request from flask_restx import marshal_with @@ -26,6 +25,7 @@ from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotIni from core.model_runtime.errors.invoke import InvokeError from fields.message_fields import message_infinite_scroll_pagination_fields from libs import helper +from libs.helper import UUIDStrOrEmpty from libs.login import current_account_with_tenant from models.model import AppMode from services.app_generate_service import AppGenerateService @@ -44,8 +44,8 @@ logger = logging.getLogger(__name__) class MessageListQuery(BaseModel): - conversation_id: UUID - first_id: UUID | None = None + conversation_id: UUIDStrOrEmpty + first_id: UUIDStrOrEmpty | None = None limit: int = Field(default=20, ge=1, le=100) diff --git a/api/controllers/console/explore/saved_message.py b/api/controllers/console/explore/saved_message.py index 6a9e274a0e..bc7b8e7651 100644 --- a/api/controllers/console/explore/saved_message.py +++ b/api/controllers/console/explore/saved_message.py @@ -1,5 +1,3 @@ -from uuid import UUID - from flask import request from flask_restx import fields, marshal_with from pydantic import BaseModel, Field @@ -10,19 +8,19 @@ from controllers.console import console_ns from controllers.console.explore.error import NotCompletionAppError from controllers.console.explore.wraps import InstalledAppResource from fields.conversation_fields import message_file_fields -from libs.helper import TimestampField +from libs.helper import TimestampField, UUIDStrOrEmpty from libs.login import current_account_with_tenant from services.errors.message import MessageNotExistsError from services.saved_message_service import SavedMessageService class SavedMessageListQuery(BaseModel): - last_id: UUID | None = None + last_id: UUIDStrOrEmpty | None = None limit: int = Field(default=20, ge=1, le=100) class SavedMessageCreatePayload(BaseModel): - message_id: UUID + message_id: UUIDStrOrEmpty register_schema_models(console_ns, SavedMessageListQuery, SavedMessageCreatePayload) diff --git a/api/controllers/console/workspace/load_balancing_config.py b/api/controllers/console/workspace/load_balancing_config.py index 9bf393ea2e..ccb60b1461 100644 --- a/api/controllers/console/workspace/load_balancing_config.py +++ b/api/controllers/console/workspace/load_balancing_config.py @@ -1,6 +1,8 @@ -from flask_restx import Resource, reqparse +from flask_restx import Resource +from pydantic import BaseModel from werkzeug.exceptions import Forbidden +from controllers.common.schema import register_schema_models from controllers.console import console_ns from controllers.console.wraps import account_initialization_required, setup_required from core.model_runtime.entities.model_entities import ModelType @@ -10,10 +12,20 @@ from models import TenantAccountRole from services.model_load_balancing_service import ModelLoadBalancingService +class LoadBalancingCredentialPayload(BaseModel): + model: str + model_type: ModelType + credentials: dict[str, object] + + +register_schema_models(console_ns, LoadBalancingCredentialPayload) + + @console_ns.route( "/workspaces/current/model-providers//models/load-balancing-configs/credentials-validate" ) class LoadBalancingCredentialsValidateApi(Resource): + @console_ns.expect(console_ns.models[LoadBalancingCredentialPayload.__name__]) @setup_required @login_required @account_initialization_required @@ -24,20 +36,7 @@ class LoadBalancingCredentialsValidateApi(Resource): tenant_id = current_tenant_id - parser = ( - reqparse.RequestParser() - .add_argument("model", type=str, required=True, nullable=False, location="json") - .add_argument( - "model_type", - type=str, - required=True, - nullable=False, - choices=[mt.value for mt in ModelType], - location="json", - ) - .add_argument("credentials", type=dict, required=True, nullable=False, location="json") - ) - args = parser.parse_args() + payload = LoadBalancingCredentialPayload.model_validate(console_ns.payload or {}) # validate model load balancing credentials model_load_balancing_service = ModelLoadBalancingService() @@ -49,9 +48,9 @@ class LoadBalancingCredentialsValidateApi(Resource): model_load_balancing_service.validate_load_balancing_credentials( tenant_id=tenant_id, provider=provider, - model=args["model"], - model_type=args["model_type"], - credentials=args["credentials"], + model=payload.model, + model_type=payload.model_type, + credentials=payload.credentials, ) except CredentialsValidateFailedError as ex: result = False @@ -69,6 +68,7 @@ class LoadBalancingCredentialsValidateApi(Resource): "/workspaces/current/model-providers//models/load-balancing-configs//credentials-validate" ) class LoadBalancingConfigCredentialsValidateApi(Resource): + @console_ns.expect(console_ns.models[LoadBalancingCredentialPayload.__name__]) @setup_required @login_required @account_initialization_required @@ -79,20 +79,7 @@ class LoadBalancingConfigCredentialsValidateApi(Resource): tenant_id = current_tenant_id - parser = ( - reqparse.RequestParser() - .add_argument("model", type=str, required=True, nullable=False, location="json") - .add_argument( - "model_type", - type=str, - required=True, - nullable=False, - choices=[mt.value for mt in ModelType], - location="json", - ) - .add_argument("credentials", type=dict, required=True, nullable=False, location="json") - ) - args = parser.parse_args() + payload = LoadBalancingCredentialPayload.model_validate(console_ns.payload or {}) # validate model load balancing config credentials model_load_balancing_service = ModelLoadBalancingService() @@ -104,9 +91,9 @@ class LoadBalancingConfigCredentialsValidateApi(Resource): model_load_balancing_service.validate_load_balancing_credentials( tenant_id=tenant_id, provider=provider, - model=args["model"], - model_type=args["model_type"], - credentials=args["credentials"], + model=payload.model, + model_type=payload.model_type, + credentials=payload.credentials, config_id=config_id, ) except CredentialsValidateFailedError as ex: diff --git a/api/controllers/console/workspace/tool_providers.py b/api/controllers/console/workspace/tool_providers.py index cb711d16e4..d51b37a9cd 100644 --- a/api/controllers/console/workspace/tool_providers.py +++ b/api/controllers/console/workspace/tool_providers.py @@ -1,4 +1,5 @@ import io +import logging from urllib.parse import urlparse from flask import make_response, redirect, request, send_file @@ -17,6 +18,7 @@ from controllers.console.wraps import ( is_admin_or_owner_required, setup_required, ) +from core.db.session_factory import session_factory from core.entities.mcp_provider import MCPAuthentication, MCPConfiguration from core.helper.tool_provider_cache import ToolProviderListCache from core.mcp.auth.auth_flow import auth, handle_callback @@ -40,6 +42,8 @@ from services.tools.tools_manage_service import ToolCommonService from services.tools.tools_transform_service import ToolTransformService from services.tools.workflow_tools_manage_service import WorkflowToolManageService +logger = logging.getLogger(__name__) + def is_valid_url(url: str) -> bool: if not url: @@ -945,8 +949,8 @@ class ToolProviderMCPApi(Resource): configuration = MCPConfiguration.model_validate(args["configuration"]) authentication = MCPAuthentication.model_validate(args["authentication"]) if args["authentication"] else None - # Create provider in transaction - with Session(db.engine) as session, session.begin(): + # 1) Create provider in a short transaction (no network I/O inside) + with session_factory.create_session() as session, session.begin(): service = MCPToolManageService(session=session) result = service.create_provider( tenant_id=tenant_id, @@ -962,7 +966,28 @@ class ToolProviderMCPApi(Resource): authentication=authentication, ) - # Invalidate cache AFTER transaction commits to avoid holding locks during Redis operations + # 2) Try to fetch tools immediately after creation so they appear without a second save. + # Perform network I/O outside any DB session to avoid holding locks. + try: + reconnect = MCPToolManageService.reconnect_with_url( + server_url=args["server_url"], + headers=args.get("headers") or {}, + timeout=configuration.timeout, + sse_read_timeout=configuration.sse_read_timeout, + ) + # Update just-created provider with authed/tools in a new short transaction + with session_factory.create_session() as session, session.begin(): + service = MCPToolManageService(session=session) + db_provider = service.get_provider(provider_id=result.id, tenant_id=tenant_id) + db_provider.authed = reconnect.authed + db_provider.tools = reconnect.tools + + result = ToolTransformService.mcp_provider_to_user_provider(db_provider, for_list=True) + except Exception: + # Best-effort: if initial fetch fails (e.g., auth required), return created provider as-is + logger.warning("Failed to fetch MCP tools after creation", exc_info=True) + + # Final cache invalidation to ensure list views are up to date ToolProviderListCache.invalidate_cache(tenant_id) return jsonable_encoder(result) diff --git a/api/controllers/files/image_preview.py b/api/controllers/files/image_preview.py index 64f47f426a..04db1c67cb 100644 --- a/api/controllers/files/image_preview.py +++ b/api/controllers/files/image_preview.py @@ -7,6 +7,7 @@ from werkzeug.exceptions import NotFound import services from controllers.common.errors import UnsupportedFileTypeError +from controllers.common.file_response import enforce_download_for_html from controllers.files import files_ns from extensions.ext_database import db from services.account_service import TenantService @@ -138,6 +139,13 @@ class FilePreviewApi(Resource): response.headers["Content-Disposition"] = f"attachment; filename*=UTF-8''{encoded_filename}" response.headers["Content-Type"] = "application/octet-stream" + enforce_download_for_html( + response, + mime_type=upload_file.mime_type, + filename=upload_file.name, + extension=upload_file.extension, + ) + return response diff --git a/api/controllers/files/tool_files.py b/api/controllers/files/tool_files.py index c487a0a915..89aa472015 100644 --- a/api/controllers/files/tool_files.py +++ b/api/controllers/files/tool_files.py @@ -6,6 +6,7 @@ from pydantic import BaseModel, Field from werkzeug.exceptions import Forbidden, NotFound from controllers.common.errors import UnsupportedFileTypeError +from controllers.common.file_response import enforce_download_for_html from controllers.files import files_ns from core.tools.signature import verify_tool_file_signature from core.tools.tool_file_manager import ToolFileManager @@ -78,4 +79,11 @@ class ToolFileApi(Resource): encoded_filename = quote(tool_file.name) response.headers["Content-Disposition"] = f"attachment; filename*=UTF-8''{encoded_filename}" + enforce_download_for_html( + response, + mime_type=tool_file.mimetype, + filename=tool_file.name, + extension=extension, + ) + return response diff --git a/api/controllers/service_api/app/file_preview.py b/api/controllers/service_api/app/file_preview.py index 60f422b88e..f853a124ef 100644 --- a/api/controllers/service_api/app/file_preview.py +++ b/api/controllers/service_api/app/file_preview.py @@ -5,6 +5,7 @@ from flask import Response, request from flask_restx import Resource from pydantic import BaseModel, Field +from controllers.common.file_response import enforce_download_for_html from controllers.common.schema import register_schema_model from controllers.service_api import service_api_ns from controllers.service_api.app.error import ( @@ -183,6 +184,13 @@ class FilePreviewApi(Resource): # Override content-type for downloads to force download response.headers["Content-Type"] = "application/octet-stream" + enforce_download_for_html( + response, + mime_type=upload_file.mime_type, + filename=upload_file.name, + extension=upload_file.extension, + ) + # Add caching headers for performance response.headers["Cache-Control"] = "public, max-age=3600" # Cache for 1 hour diff --git a/api/controllers/service_api/dataset/dataset.py b/api/controllers/service_api/dataset/dataset.py index 4f91f40c55..94faf8dd42 100644 --- a/api/controllers/service_api/dataset/dataset.py +++ b/api/controllers/service_api/dataset/dataset.py @@ -13,7 +13,6 @@ from controllers.service_api.dataset.error import DatasetInUseError, DatasetName from controllers.service_api.wraps import ( DatasetApiResource, cloud_edition_billing_rate_limit_check, - validate_dataset_token, ) from core.model_runtime.entities.model_entities import ModelType from core.provider_manager import ProviderManager @@ -460,9 +459,8 @@ class DatasetTagsApi(DatasetApiResource): 401: "Unauthorized - invalid API token", } ) - @validate_dataset_token @service_api_ns.marshal_with(build_dataset_tag_fields(service_api_ns)) - def get(self, _, dataset_id): + def get(self, _): """Get all knowledge type tags.""" assert isinstance(current_user, Account) cid = current_user.current_tenant_id @@ -482,8 +480,7 @@ class DatasetTagsApi(DatasetApiResource): } ) @service_api_ns.marshal_with(build_dataset_tag_fields(service_api_ns)) - @validate_dataset_token - def post(self, _, dataset_id): + def post(self, _): """Add a knowledge type tag.""" assert isinstance(current_user, Account) if not (current_user.has_edit_permission or current_user.is_dataset_editor): @@ -506,8 +503,7 @@ class DatasetTagsApi(DatasetApiResource): } ) @service_api_ns.marshal_with(build_dataset_tag_fields(service_api_ns)) - @validate_dataset_token - def patch(self, _, dataset_id): + def patch(self, _): assert isinstance(current_user, Account) if not (current_user.has_edit_permission or current_user.is_dataset_editor): raise Forbidden() @@ -533,9 +529,8 @@ class DatasetTagsApi(DatasetApiResource): 403: "Forbidden - insufficient permissions", } ) - @validate_dataset_token @edit_permission_required - def delete(self, _, dataset_id): + def delete(self, _): """Delete a knowledge type tag.""" payload = TagDeletePayload.model_validate(service_api_ns.payload or {}) TagService.delete_tag(payload.tag_id) @@ -555,8 +550,7 @@ class DatasetTagBindingApi(DatasetApiResource): 403: "Forbidden - insufficient permissions", } ) - @validate_dataset_token - def post(self, _, dataset_id): + def post(self, _): # The role of the current user in the ta table must be admin, owner, editor, or dataset_operator assert isinstance(current_user, Account) if not (current_user.has_edit_permission or current_user.is_dataset_editor): @@ -580,8 +574,7 @@ class DatasetTagUnbindingApi(DatasetApiResource): 403: "Forbidden - insufficient permissions", } ) - @validate_dataset_token - def post(self, _, dataset_id): + def post(self, _): # The role of the current user in the ta table must be admin, owner, editor, or dataset_operator assert isinstance(current_user, Account) if not (current_user.has_edit_permission or current_user.is_dataset_editor): @@ -604,7 +597,6 @@ class DatasetTagsBindingStatusApi(DatasetApiResource): 401: "Unauthorized - invalid API token", } ) - @validate_dataset_token def get(self, _, *args, **kwargs): """Get all knowledge type tags.""" dataset_id = kwargs.get("dataset_id") diff --git a/api/core/helper/tool_provider_cache.py b/api/core/helper/tool_provider_cache.py index eef5937407..c5447c2b3f 100644 --- a/api/core/helper/tool_provider_cache.py +++ b/api/core/helper/tool_provider_cache.py @@ -1,6 +1,6 @@ import json import logging -from typing import Any +from typing import Any, cast from core.tools.entities.api_entities import ToolProviderTypeApiLiteral from extensions.ext_redis import redis_client, redis_fallback @@ -50,7 +50,9 @@ class ToolProviderListCache: redis_client.delete(cache_key) else: # Invalidate all caches for this tenant - pattern = f"tool_providers:tenant_id:{tenant_id}:*" - keys = list(redis_client.scan_iter(pattern)) - if keys: - redis_client.delete(*keys) + keys = ["builtin", "model", "api", "workflow", "mcp"] + pipeline = redis_client.pipeline() + for key in keys: + cache_key = ToolProviderListCache._generate_cache_key(tenant_id, cast(ToolProviderTypeApiLiteral, key)) + pipeline.delete(cache_key) + pipeline.execute() diff --git a/api/core/mcp/client/streamable_client.py b/api/core/mcp/client/streamable_client.py index f81e7cead8..5c3cd0d8f8 100644 --- a/api/core/mcp/client/streamable_client.py +++ b/api/core/mcp/client/streamable_client.py @@ -313,17 +313,20 @@ class StreamableHTTPTransport: if is_initialization: self._maybe_extract_session_id_from_response(response) - content_type = cast(str, response.headers.get(CONTENT_TYPE, "").lower()) + # Per https://modelcontextprotocol.io/specification/2025-06-18/basic#notifications: + # The server MUST NOT send a response to notifications. + if isinstance(message.root, JSONRPCRequest): + content_type = cast(str, response.headers.get(CONTENT_TYPE, "").lower()) - if content_type.startswith(JSON): - self._handle_json_response(response, ctx.server_to_client_queue) - elif content_type.startswith(SSE): - self._handle_sse_response(response, ctx) - else: - self._handle_unexpected_content_type( - content_type, - ctx.server_to_client_queue, - ) + if content_type.startswith(JSON): + self._handle_json_response(response, ctx.server_to_client_queue) + elif content_type.startswith(SSE): + self._handle_sse_response(response, ctx) + else: + self._handle_unexpected_content_type( + content_type, + ctx.server_to_client_queue, + ) def _handle_json_response( self, diff --git a/api/core/plugin/entities/parameters.py b/api/core/plugin/entities/parameters.py index 88a3a7bd43..bfa662b9f6 100644 --- a/api/core/plugin/entities/parameters.py +++ b/api/core/plugin/entities/parameters.py @@ -76,7 +76,7 @@ class PluginParameter(BaseModel): auto_generate: PluginParameterAutoGenerate | None = None template: PluginParameterTemplate | None = None required: bool = False - default: Union[float, int, str, bool] | None = None + default: Union[float, int, str, bool, list, dict] | None = None min: Union[float, int] | None = None max: Union[float, int] | None = None precision: int | None = None diff --git a/api/core/rag/datasource/retrieval_service.py b/api/core/rag/datasource/retrieval_service.py index 9807cb4e6a..43912cd75d 100644 --- a/api/core/rag/datasource/retrieval_service.py +++ b/api/core/rag/datasource/retrieval_service.py @@ -13,7 +13,7 @@ from core.model_runtime.entities.model_entities import ModelType from core.rag.data_post_processor.data_post_processor import DataPostProcessor from core.rag.datasource.keyword.keyword_factory import Keyword from core.rag.datasource.vdb.vector_factory import Vector -from core.rag.embedding.retrieval import RetrievalSegments +from core.rag.embedding.retrieval import RetrievalChildChunk, RetrievalSegments from core.rag.entities.metadata_entities import MetadataCondition from core.rag.index_processor.constant.doc_type import DocType from core.rag.index_processor.constant.index_type import IndexStructureType @@ -381,10 +381,9 @@ class RetrievalService: records = [] include_segment_ids = set() segment_child_map = {} - segment_file_map = {} valid_dataset_documents = {} - image_doc_ids = [] + image_doc_ids: list[Any] = [] child_index_node_ids = [] index_node_ids = [] doc_to_document_map = {} @@ -417,28 +416,39 @@ class RetrievalService: child_index_node_ids = [i for i in child_index_node_ids if i] index_node_ids = [i for i in index_node_ids if i] - segment_ids = [] + segment_ids: list[str] = [] index_node_segments: list[DocumentSegment] = [] segments: list[DocumentSegment] = [] - attachment_map = {} - child_chunk_map = {} - doc_segment_map = {} + attachment_map: dict[str, list[dict[str, Any]]] = {} + child_chunk_map: dict[str, list[ChildChunk]] = {} + doc_segment_map: dict[str, list[str]] = {} with session_factory.create_session() as session: attachments = cls.get_segment_attachment_infos(image_doc_ids, session) for attachment in attachments: segment_ids.append(attachment["segment_id"]) - attachment_map[attachment["segment_id"]] = attachment - doc_segment_map[attachment["segment_id"]] = attachment["attachment_id"] - + if attachment["segment_id"] in attachment_map: + attachment_map[attachment["segment_id"]].append(attachment["attachment_info"]) + else: + attachment_map[attachment["segment_id"]] = [attachment["attachment_info"]] + if attachment["segment_id"] in doc_segment_map: + doc_segment_map[attachment["segment_id"]].append(attachment["attachment_id"]) + else: + doc_segment_map[attachment["segment_id"]] = [attachment["attachment_id"]] child_chunk_stmt = select(ChildChunk).where(ChildChunk.index_node_id.in_(child_index_node_ids)) child_index_nodes = session.execute(child_chunk_stmt).scalars().all() for i in child_index_nodes: segment_ids.append(i.segment_id) - child_chunk_map[i.segment_id] = i - doc_segment_map[i.segment_id] = i.index_node_id + if i.segment_id in child_chunk_map: + child_chunk_map[i.segment_id].append(i) + else: + child_chunk_map[i.segment_id] = [i] + if i.segment_id in doc_segment_map: + doc_segment_map[i.segment_id].append(i.index_node_id) + else: + doc_segment_map[i.segment_id] = [i.index_node_id] if index_node_ids: document_segment_stmt = select(DocumentSegment).where( @@ -448,7 +458,7 @@ class RetrievalService: ) index_node_segments = session.execute(document_segment_stmt).scalars().all() # type: ignore for index_node_segment in index_node_segments: - doc_segment_map[index_node_segment.id] = index_node_segment.index_node_id + doc_segment_map[index_node_segment.id] = [index_node_segment.index_node_id] if segment_ids: document_segment_stmt = select(DocumentSegment).where( DocumentSegment.enabled == True, @@ -461,95 +471,86 @@ class RetrievalService: segments.extend(index_node_segments) for segment in segments: - doc_id = doc_segment_map.get(segment.id) - child_chunk = child_chunk_map.get(segment.id) - attachment_info = attachment_map.get(segment.id) + child_chunks: list[ChildChunk] = child_chunk_map.get(segment.id, []) + attachment_infos: list[dict[str, Any]] = attachment_map.get(segment.id, []) + ds_dataset_document: DatasetDocument | None = valid_dataset_documents.get(segment.document_id) - if doc_id: - document = doc_to_document_map[doc_id] - ds_dataset_document: DatasetDocument | None = valid_dataset_documents.get( - document.metadata.get("document_id") - ) - - if ds_dataset_document and ds_dataset_document.doc_form == IndexStructureType.PARENT_CHILD_INDEX: - if segment.id not in include_segment_ids: - include_segment_ids.add(segment.id) - if child_chunk: + if ds_dataset_document and ds_dataset_document.doc_form == IndexStructureType.PARENT_CHILD_INDEX: + if segment.id not in include_segment_ids: + include_segment_ids.add(segment.id) + if child_chunks or attachment_infos: + child_chunk_details = [] + max_score = 0.0 + for child_chunk in child_chunks: + document = doc_to_document_map[child_chunk.index_node_id] child_chunk_detail = { "id": child_chunk.id, "content": child_chunk.content, "position": child_chunk.position, "score": document.metadata.get("score", 0.0) if document else 0.0, } - map_detail = { - "max_score": document.metadata.get("score", 0.0) if document else 0.0, - "child_chunks": [child_chunk_detail], - } - segment_child_map[segment.id] = map_detail - record = { - "segment": segment, + child_chunk_details.append(child_chunk_detail) + max_score = max(max_score, document.metadata.get("score", 0.0) if document else 0.0) + for attachment_info in attachment_infos: + file_document = doc_to_document_map[attachment_info["id"]] + max_score = max( + max_score, file_document.metadata.get("score", 0.0) if file_document else 0.0 + ) + + map_detail = { + "max_score": max_score, + "child_chunks": child_chunk_details, } - if attachment_info: - segment_file_map[segment.id] = [attachment_info] - records.append(record) - else: - if child_chunk: - child_chunk_detail = { - "id": child_chunk.id, - "content": child_chunk.content, - "position": child_chunk.position, - "score": document.metadata.get("score", 0.0), - } - if segment.id in segment_child_map: - segment_child_map[segment.id]["child_chunks"].append(child_chunk_detail) # type: ignore - segment_child_map[segment.id]["max_score"] = max( - segment_child_map[segment.id]["max_score"], - document.metadata.get("score", 0.0) if document else 0.0, - ) - else: - segment_child_map[segment.id] = { - "max_score": document.metadata.get("score", 0.0) if document else 0.0, - "child_chunks": [child_chunk_detail], - } - if attachment_info: - if segment.id in segment_file_map: - segment_file_map[segment.id].append(attachment_info) - else: - segment_file_map[segment.id] = [attachment_info] - else: - if segment.id not in include_segment_ids: - include_segment_ids.add(segment.id) - record = { - "segment": segment, - "score": document.metadata.get("score", 0.0), # type: ignore - } - if attachment_info: - segment_file_map[segment.id] = [attachment_info] - records.append(record) - else: - if attachment_info: - attachment_infos = segment_file_map.get(segment.id, []) - if attachment_info not in attachment_infos: - attachment_infos.append(attachment_info) - segment_file_map[segment.id] = attachment_infos + segment_child_map[segment.id] = map_detail + record: dict[str, Any] = { + "segment": segment, + } + records.append(record) + else: + if segment.id not in include_segment_ids: + include_segment_ids.add(segment.id) + max_score = 0.0 + segment_document = doc_to_document_map.get(segment.index_node_id) + if segment_document: + max_score = max(max_score, segment_document.metadata.get("score", 0.0)) + for attachment_info in attachment_infos: + file_doc = doc_to_document_map.get(attachment_info["id"]) + if file_doc: + max_score = max(max_score, file_doc.metadata.get("score", 0.0)) + record = { + "segment": segment, + "score": max_score, + } + records.append(record) # Add child chunks information to records for record in records: if record["segment"].id in segment_child_map: record["child_chunks"] = segment_child_map[record["segment"].id].get("child_chunks") # type: ignore record["score"] = segment_child_map[record["segment"].id]["max_score"] # type: ignore - if record["segment"].id in segment_file_map: - record["files"] = segment_file_map[record["segment"].id] # type: ignore[assignment] + if record["segment"].id in attachment_map: + record["files"] = attachment_map[record["segment"].id] # type: ignore[assignment] - result = [] + result: list[RetrievalSegments] = [] for record in records: # Extract segment segment = record["segment"] # Extract child_chunks, ensuring it's a list or None - child_chunks = record.get("child_chunks") - if not isinstance(child_chunks, list): - child_chunks = None + raw_child_chunks = record.get("child_chunks") + child_chunks_list: list[RetrievalChildChunk] | None = None + if isinstance(raw_child_chunks, list): + # Sort by score descending + sorted_chunks = sorted(raw_child_chunks, key=lambda x: x.get("score", 0.0), reverse=True) + child_chunks_list = [ + RetrievalChildChunk( + id=chunk["id"], + content=chunk["content"], + score=chunk.get("score", 0.0), + position=chunk["position"], + ) + for chunk in sorted_chunks + ] # Extract files, ensuring it's a list or None files = record.get("files") @@ -566,11 +567,11 @@ class RetrievalService: # Create RetrievalSegments object retrieval_segment = RetrievalSegments( - segment=segment, child_chunks=child_chunks, score=score, files=files + segment=segment, child_chunks=child_chunks_list, score=score, files=files ) result.append(retrieval_segment) - return result + return sorted(result, key=lambda x: x.score if x.score is not None else 0.0, reverse=True) except Exception as e: db.session.rollback() raise e diff --git a/api/core/rag/datasource/vdb/pgvector/pgvector.py b/api/core/rag/datasource/vdb/pgvector/pgvector.py index 445a0a7f8b..0615b8312c 100644 --- a/api/core/rag/datasource/vdb/pgvector/pgvector.py +++ b/api/core/rag/datasource/vdb/pgvector/pgvector.py @@ -255,7 +255,10 @@ class PGVector(BaseVector): return with self._get_cursor() as cur: - cur.execute("CREATE EXTENSION IF NOT EXISTS vector") + cur.execute("SELECT 1 FROM pg_extension WHERE extname = 'vector'") + if not cur.fetchone(): + cur.execute("CREATE EXTENSION vector") + cur.execute(SQL_CREATE_TABLE.format(table_name=self.table_name, dimension=dimension)) # PG hnsw index only support 2000 dimension or less # ref: https://github.com/pgvector/pgvector?tab=readme-ov-file#indexing diff --git a/api/core/rag/retrieval/dataset_retrieval.py b/api/core/rag/retrieval/dataset_retrieval.py index baf879df95..2c3fc5ab75 100644 --- a/api/core/rag/retrieval/dataset_retrieval.py +++ b/api/core/rag/retrieval/dataset_retrieval.py @@ -7,7 +7,7 @@ from collections.abc import Generator, Mapping from typing import Any, Union, cast from flask import Flask, current_app -from sqlalchemy import and_, or_, select +from sqlalchemy import and_, literal, or_, select from sqlalchemy.orm import Session from core.app.app_config.entities import ( @@ -1036,7 +1036,7 @@ class DatasetRetrieval: if automatic_metadata_filters: conditions = [] for sequence, filter in enumerate(automatic_metadata_filters): - self._process_metadata_filter_func( + self.process_metadata_filter_func( sequence, filter.get("condition"), # type: ignore filter.get("metadata_name"), # type: ignore @@ -1072,7 +1072,7 @@ class DatasetRetrieval: value=expected_value, ) ) - filters = self._process_metadata_filter_func( + filters = self.process_metadata_filter_func( sequence, condition.comparison_operator, metadata_name, @@ -1168,8 +1168,9 @@ class DatasetRetrieval: return None return automatic_metadata_filters - def _process_metadata_filter_func( - self, sequence: int, condition: str, metadata_name: str, value: Any | None, filters: list + @classmethod + def process_metadata_filter_func( + cls, sequence: int, condition: str, metadata_name: str, value: Any | None, filters: list ): if value is None and condition not in ("empty", "not empty"): return filters @@ -1218,6 +1219,20 @@ class DatasetRetrieval: case "≄" | ">=": filters.append(DatasetDocument.doc_metadata[metadata_name].as_float() >= value) + case "in" | "not in": + if isinstance(value, str): + value_list = [v.strip() for v in value.split(",") if v.strip()] + elif isinstance(value, (list, tuple)): + value_list = [str(v) for v in value if v is not None] + else: + value_list = [str(value)] if value is not None else [] + + if not value_list: + # `field in []` is False, `field not in []` is True + filters.append(literal(condition == "not in")) + else: + op = json_field.in_ if condition == "in" else json_field.notin_ + filters.append(op(value_list)) case _: pass diff --git a/api/core/tools/workflow_as_tool/provider.py b/api/core/tools/workflow_as_tool/provider.py index 0439fb1d60..2bd973f831 100644 --- a/api/core/tools/workflow_as_tool/provider.py +++ b/api/core/tools/workflow_as_tool/provider.py @@ -5,6 +5,7 @@ from sqlalchemy.orm import Session from core.app.app_config.entities import VariableEntity, VariableEntityType from core.app.apps.workflow.app_config_manager import WorkflowAppConfigManager +from core.db.session_factory import session_factory from core.plugin.entities.parameters import PluginParameterOption from core.tools.__base.tool_provider import ToolProviderController from core.tools.__base.tool_runtime import ToolRuntime @@ -47,33 +48,30 @@ class WorkflowToolProviderController(ToolProviderController): @classmethod def from_db(cls, db_provider: WorkflowToolProvider) -> "WorkflowToolProviderController": - with Session(db.engine, expire_on_commit=False) as session, session.begin(): - provider = session.get(WorkflowToolProvider, db_provider.id) if db_provider.id else None - if not provider: - raise ValueError("workflow provider not found") - app = session.get(App, provider.app_id) + with session_factory.create_session() as session, session.begin(): + app = session.get(App, db_provider.app_id) if not app: raise ValueError("app not found") - user = session.get(Account, provider.user_id) if provider.user_id else None + user = session.get(Account, db_provider.user_id) if db_provider.user_id else None controller = WorkflowToolProviderController( entity=ToolProviderEntity( identity=ToolProviderIdentity( author=user.name if user else "", - name=provider.label, - label=I18nObject(en_US=provider.label, zh_Hans=provider.label), - description=I18nObject(en_US=provider.description, zh_Hans=provider.description), - icon=provider.icon, + name=db_provider.label, + label=I18nObject(en_US=db_provider.label, zh_Hans=db_provider.label), + description=I18nObject(en_US=db_provider.description, zh_Hans=db_provider.description), + icon=db_provider.icon, ), credentials_schema=[], plugin_id=None, ), - provider_id=provider.id or "", + provider_id="", ) controller.tools = [ - controller._get_db_provider_tool(provider, app, session=session, user=user), + controller._get_db_provider_tool(db_provider, app, session=session, user=user), ] return controller diff --git a/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py b/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py index adc474bd60..8670a71aa3 100644 --- a/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py +++ b/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py @@ -6,7 +6,7 @@ from collections import defaultdict from collections.abc import Mapping, Sequence from typing import TYPE_CHECKING, Any, cast -from sqlalchemy import and_, func, literal, or_, select +from sqlalchemy import and_, func, or_, select from sqlalchemy.orm import sessionmaker from core.app.app_config.entities import DatasetRetrieveConfigEntity @@ -460,7 +460,7 @@ class KnowledgeRetrievalNode(LLMUsageTrackingMixin, Node[KnowledgeRetrievalNodeD if automatic_metadata_filters: conditions = [] for sequence, filter in enumerate(automatic_metadata_filters): - self._process_metadata_filter_func( + DatasetRetrieval.process_metadata_filter_func( sequence, filter.get("condition", ""), filter.get("metadata_name", ""), @@ -504,7 +504,7 @@ class KnowledgeRetrievalNode(LLMUsageTrackingMixin, Node[KnowledgeRetrievalNodeD value=expected_value, ) ) - filters = self._process_metadata_filter_func( + filters = DatasetRetrieval.process_metadata_filter_func( sequence, condition.comparison_operator, metadata_name, @@ -603,87 +603,6 @@ class KnowledgeRetrievalNode(LLMUsageTrackingMixin, Node[KnowledgeRetrievalNodeD return [], usage return automatic_metadata_filters, usage - def _process_metadata_filter_func( - self, sequence: int, condition: str, metadata_name: str, value: Any, filters: list[Any] - ) -> list[Any]: - if value is None and condition not in ("empty", "not empty"): - return filters - - json_field = Document.doc_metadata[metadata_name].as_string() - - match condition: - case "contains": - filters.append(json_field.like(f"%{value}%")) - - case "not contains": - filters.append(json_field.notlike(f"%{value}%")) - - case "start with": - filters.append(json_field.like(f"{value}%")) - - case "end with": - filters.append(json_field.like(f"%{value}")) - case "in": - if isinstance(value, str): - value_list = [v.strip() for v in value.split(",") if v.strip()] - elif isinstance(value, (list, tuple)): - value_list = [str(v) for v in value if v is not None] - else: - value_list = [str(value)] if value is not None else [] - - if not value_list: - filters.append(literal(False)) - else: - filters.append(json_field.in_(value_list)) - - case "not in": - if isinstance(value, str): - value_list = [v.strip() for v in value.split(",") if v.strip()] - elif isinstance(value, (list, tuple)): - value_list = [str(v) for v in value if v is not None] - else: - value_list = [str(value)] if value is not None else [] - - if not value_list: - filters.append(literal(True)) - else: - filters.append(json_field.notin_(value_list)) - - case "is" | "=": - if isinstance(value, str): - filters.append(json_field == value) - elif isinstance(value, (int, float)): - filters.append(Document.doc_metadata[metadata_name].as_float() == value) - - case "is not" | "≠": - if isinstance(value, str): - filters.append(json_field != value) - elif isinstance(value, (int, float)): - filters.append(Document.doc_metadata[metadata_name].as_float() != value) - - case "empty": - filters.append(Document.doc_metadata[metadata_name].is_(None)) - - case "not empty": - filters.append(Document.doc_metadata[metadata_name].isnot(None)) - - case "before" | "<": - filters.append(Document.doc_metadata[metadata_name].as_float() < value) - - case "after" | ">": - filters.append(Document.doc_metadata[metadata_name].as_float() > value) - - case "≤" | "<=": - filters.append(Document.doc_metadata[metadata_name].as_float() <= value) - - case "≄" | ">=": - filters.append(Document.doc_metadata[metadata_name].as_float() >= value) - - case _: - pass - - return filters - @classmethod def _extract_variable_selector_to_variable_mapping( cls, diff --git a/api/pyproject.toml b/api/pyproject.toml index 6716603dd4..dbc6a2eb83 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "dify-api" -version = "1.11.1" +version = "1.11.2" requires-python = ">=3.11,<3.13" dependencies = [ diff --git a/api/services/app_generate_service.py b/api/services/app_generate_service.py index 4514c86f7c..cc58899dc4 100644 --- a/api/services/app_generate_service.py +++ b/api/services/app_generate_service.py @@ -14,7 +14,8 @@ from enums.quota_type import QuotaType, unlimited from extensions.otel import AppGenerateHandler, trace_span from models.model import Account, App, AppMode, EndUser from models.workflow import Workflow -from services.errors.app import InvokeRateLimitError, QuotaExceededError, WorkflowIdFormatError, WorkflowNotFoundError +from services.errors.app import QuotaExceededError, WorkflowIdFormatError, WorkflowNotFoundError +from services.errors.llm import InvokeRateLimitError from services.workflow_service import WorkflowService diff --git a/api/services/async_workflow_service.py b/api/services/async_workflow_service.py index e100582511..bc73b7c8c2 100644 --- a/api/services/async_workflow_service.py +++ b/api/services/async_workflow_service.py @@ -21,7 +21,7 @@ from models.model import App, EndUser from models.trigger import WorkflowTriggerLog from models.workflow import Workflow from repositories.sqlalchemy_workflow_trigger_log_repository import SQLAlchemyWorkflowTriggerLogRepository -from services.errors.app import InvokeRateLimitError, QuotaExceededError, WorkflowNotFoundError +from services.errors.app import QuotaExceededError, WorkflowNotFoundError, WorkflowQuotaLimitError from services.workflow.entities import AsyncTriggerResponse, TriggerData, WorkflowTaskData from services.workflow.queue_dispatcher import QueueDispatcherManager, QueuePriority from services.workflow_service import WorkflowService @@ -141,7 +141,7 @@ class AsyncWorkflowService: trigger_log_repo.update(trigger_log) session.commit() - raise InvokeRateLimitError( + raise WorkflowQuotaLimitError( f"Workflow execution quota limit reached for tenant {trigger_data.tenant_id}" ) from e diff --git a/api/services/dataset_service.py b/api/services/dataset_service.py index 970192fde5..ac4b25c5dc 100644 --- a/api/services/dataset_service.py +++ b/api/services/dataset_service.py @@ -3458,7 +3458,7 @@ class SegmentService: if keyword: query = query.where(DocumentSegment.content.ilike(f"%{keyword}%")) - query = query.order_by(DocumentSegment.position.asc()) + query = query.order_by(DocumentSegment.position.asc(), DocumentSegment.id.asc()) paginated_segments = db.paginate(select=query, page=page, per_page=limit, max_per_page=100, error_out=False) return paginated_segments.items, paginated_segments.total diff --git a/api/services/enterprise/enterprise_service.py b/api/services/enterprise/enterprise_service.py index 83d0fcf296..c0cc0e5233 100644 --- a/api/services/enterprise/enterprise_service.py +++ b/api/services/enterprise/enterprise_service.py @@ -110,5 +110,5 @@ class EnterpriseService: if not app_id: raise ValueError("app_id must be provided.") - body = {"appId": app_id} - EnterpriseRequest.send_request("DELETE", "/webapp/clean", json=body) + params = {"appId": app_id} + EnterpriseRequest.send_request("DELETE", "/webapp/clean", params=params) diff --git a/api/services/errors/app.py b/api/services/errors/app.py index 24e4760acc..60e59e97dc 100644 --- a/api/services/errors/app.py +++ b/api/services/errors/app.py @@ -18,8 +18,8 @@ class WorkflowIdFormatError(Exception): pass -class InvokeRateLimitError(Exception): - """Raised when rate limit is exceeded for workflow invocations.""" +class WorkflowQuotaLimitError(Exception): + """Raised when workflow execution quota is exceeded (for async/background workflows).""" pass diff --git a/api/services/plugin/plugin_parameter_service.py b/api/services/plugin/plugin_parameter_service.py index 5dcbf5fec5..40565c56ed 100644 --- a/api/services/plugin/plugin_parameter_service.py +++ b/api/services/plugin/plugin_parameter_service.py @@ -146,7 +146,7 @@ class PluginParameterService: provider, action, resolved_credentials, - CredentialType.API_KEY.value, + original_subscription.credential_type or CredentialType.UNAUTHORIZED.value, parameter, ) .options diff --git a/api/services/tools/builtin_tools_manage_service.py b/api/services/tools/builtin_tools_manage_service.py index cf1d39fa25..87951d53e6 100644 --- a/api/services/tools/builtin_tools_manage_service.py +++ b/api/services/tools/builtin_tools_manage_service.py @@ -286,12 +286,12 @@ class BuiltinToolManageService: session.add(db_provider) session.commit() - - # Invalidate tool providers cache - ToolProviderListCache.invalidate_cache(tenant_id) except Exception as e: session.rollback() raise ValueError(str(e)) + + # Invalidate tool providers cache + ToolProviderListCache.invalidate_cache(tenant_id, "builtin") return {"result": "success"} @staticmethod diff --git a/api/services/tools/mcp_tools_manage_service.py b/api/services/tools/mcp_tools_manage_service.py index 252be77b27..0be106f597 100644 --- a/api/services/tools/mcp_tools_manage_service.py +++ b/api/services/tools/mcp_tools_manage_service.py @@ -319,8 +319,14 @@ class MCPToolManageService: except MCPError as e: raise ValueError(f"Failed to connect to MCP server: {e}") - # Update database with retrieved tools - db_provider.tools = json.dumps([tool.model_dump() for tool in tools]) + # Update database with retrieved tools (ensure description is a non-null string) + tools_payload = [] + for tool in tools: + data = tool.model_dump() + if data.get("description") is None: + data["description"] = "" + tools_payload.append(data) + db_provider.tools = json.dumps(tools_payload) db_provider.authed = True db_provider.updated_at = datetime.now() self._session.flush() @@ -620,6 +626,21 @@ class MCPToolManageService: server_url_hash=new_server_url_hash, ) + @staticmethod + def reconnect_with_url( + *, + server_url: str, + headers: dict[str, str], + timeout: float | None, + sse_read_timeout: float | None, + ) -> ReconnectResult: + return MCPToolManageService._reconnect_with_url( + server_url=server_url, + headers=headers, + timeout=timeout, + sse_read_timeout=sse_read_timeout, + ) + @staticmethod def _reconnect_with_url( *, @@ -642,9 +663,16 @@ class MCPToolManageService: sse_read_timeout=sse_read_timeout, ) as mcp_client: tools = mcp_client.list_tools() + # Ensure tool descriptions are non-null in payload + tools_payload = [] + for t in tools: + d = t.model_dump() + if d.get("description") is None: + d["description"] = "" + tools_payload.append(d) return ReconnectResult( authed=True, - tools=json.dumps([tool.model_dump() for tool in tools]), + tools=json.dumps(tools_payload), encrypted_credentials=EMPTY_CREDENTIALS_JSON, ) except MCPAuthError: diff --git a/api/services/tools/workflow_tools_manage_service.py b/api/services/tools/workflow_tools_manage_service.py index fe77ff2dc5..714a651839 100644 --- a/api/services/tools/workflow_tools_manage_service.py +++ b/api/services/tools/workflow_tools_manage_service.py @@ -5,8 +5,8 @@ from datetime import datetime from typing import Any from sqlalchemy import or_, select -from sqlalchemy.orm import Session +from core.db.session_factory import session_factory from core.helper.tool_provider_cache import ToolProviderListCache from core.model_runtime.utils.encoders import jsonable_encoder from core.tools.__base.tool_provider import ToolProviderController @@ -68,26 +68,27 @@ class WorkflowToolManageService: if workflow is None: raise ValueError(f"Workflow not found for app {workflow_app_id}") - with Session(db.engine, expire_on_commit=False) as session, session.begin(): - workflow_tool_provider = WorkflowToolProvider( - tenant_id=tenant_id, - user_id=user_id, - app_id=workflow_app_id, - name=name, - label=label, - icon=json.dumps(icon), - description=description, - parameter_configuration=json.dumps(parameters), - privacy_policy=privacy_policy, - version=workflow.version, - ) - session.add(workflow_tool_provider) + workflow_tool_provider = WorkflowToolProvider( + tenant_id=tenant_id, + user_id=user_id, + app_id=workflow_app_id, + name=name, + label=label, + icon=json.dumps(icon), + description=description, + parameter_configuration=json.dumps(parameters), + privacy_policy=privacy_policy, + version=workflow.version, + ) try: WorkflowToolProviderController.from_db(workflow_tool_provider) except Exception as e: raise ValueError(str(e)) + with session_factory.create_session() as session, session.begin(): + session.add(workflow_tool_provider) + if labels is not None: ToolLabelManager.update_tool_labels( ToolTransformService.workflow_provider_to_controller(workflow_tool_provider), labels diff --git a/api/services/trigger/trigger_provider_service.py b/api/services/trigger/trigger_provider_service.py index 57de9b3cee..ef77c33c1b 100644 --- a/api/services/trigger/trigger_provider_service.py +++ b/api/services/trigger/trigger_provider_service.py @@ -868,48 +868,111 @@ class TriggerProviderService: if not provider_controller: raise ValueError(f"Provider {provider_id} not found") - subscription = TriggerProviderService.get_subscription_by_id( - tenant_id=tenant_id, - subscription_id=subscription_id, - ) - if not subscription: - raise ValueError(f"Subscription {subscription_id} not found") + # Use distributed lock to prevent race conditions on the same subscription + lock_key = f"trigger_subscription_rebuild_lock:{tenant_id}_{subscription_id}" + with redis_client.lock(lock_key, timeout=20): + with Session(db.engine, expire_on_commit=False) as session: + try: + # Get subscription within the transaction + subscription: TriggerSubscription | None = ( + session.query(TriggerSubscription).filter_by(tenant_id=tenant_id, id=subscription_id).first() + ) + if not subscription: + raise ValueError(f"Subscription {subscription_id} not found") - credential_type = CredentialType.of(subscription.credential_type) - if credential_type not in [CredentialType.OAUTH2, CredentialType.API_KEY]: - raise ValueError("Credential type not supported for rebuild") + credential_type = CredentialType.of(subscription.credential_type) + if credential_type not in [CredentialType.OAUTH2, CredentialType.API_KEY]: + raise ValueError("Credential type not supported for rebuild") - # TODO: Trying to invoke update api of the plugin trigger provider + # Decrypt existing credentials for merging + credential_encrypter, _ = create_trigger_provider_encrypter_for_subscription( + tenant_id=tenant_id, + controller=provider_controller, + subscription=subscription, + ) + decrypted_credentials = dict(credential_encrypter.decrypt(subscription.credentials)) - # FALLBACK: If the update api is not implemented, delete the previous subscription and create a new one + # Merge credentials: if caller passed HIDDEN_VALUE, retain existing decrypted value + merged_credentials: dict[str, Any] = { + key: value if value != HIDDEN_VALUE else decrypted_credentials.get(key, UNKNOWN_VALUE) + for key, value in credentials.items() + } - # Delete the previous subscription - user_id = subscription.user_id - TriggerManager.unsubscribe_trigger( - tenant_id=tenant_id, - user_id=user_id, - provider_id=provider_id, - subscription=subscription.to_entity(), - credentials=subscription.credentials, - credential_type=credential_type, - ) + user_id = subscription.user_id - # Create a new subscription with the same subscription_id and endpoint_id - new_subscription: TriggerSubscriptionEntity = TriggerManager.subscribe_trigger( - tenant_id=tenant_id, - user_id=user_id, - provider_id=provider_id, - endpoint=generate_plugin_trigger_endpoint_url(subscription.endpoint_id), - parameters=parameters, - credentials=credentials, - credential_type=credential_type, - ) - TriggerProviderService.update_trigger_subscription( - tenant_id=tenant_id, - subscription_id=subscription.id, - name=name, - parameters=parameters, - credentials=credentials, - properties=new_subscription.properties, - expires_at=new_subscription.expires_at, - ) + # TODO: Trying to invoke update api of the plugin trigger provider + + # FALLBACK: If the update api is not implemented, + # delete the previous subscription and create a new one + + # Unsubscribe the previous subscription (external call, but we'll handle errors) + try: + TriggerManager.unsubscribe_trigger( + tenant_id=tenant_id, + user_id=user_id, + provider_id=provider_id, + subscription=subscription.to_entity(), + credentials=decrypted_credentials, + credential_type=credential_type, + ) + except Exception as e: + logger.exception("Error unsubscribing trigger during rebuild", exc_info=e) + # Continue anyway - the subscription might already be deleted externally + + # Create a new subscription with the same subscription_id and endpoint_id (external call) + new_subscription: TriggerSubscriptionEntity = TriggerManager.subscribe_trigger( + tenant_id=tenant_id, + user_id=user_id, + provider_id=provider_id, + endpoint=generate_plugin_trigger_endpoint_url(subscription.endpoint_id), + parameters=parameters, + credentials=merged_credentials, + credential_type=credential_type, + ) + + # Update the subscription in the same transaction + # Inline update logic to reuse the same session + if name is not None and name != subscription.name: + existing = ( + session.query(TriggerSubscription) + .filter_by(tenant_id=tenant_id, provider_id=str(provider_id), name=name) + .first() + ) + if existing and existing.id != subscription.id: + raise ValueError(f"Subscription name '{name}' already exists for this provider") + subscription.name = name + + # Update parameters + subscription.parameters = dict(parameters) + + # Update credentials with merged (and encrypted) values + subscription.credentials = dict(credential_encrypter.encrypt(merged_credentials)) + + # Update properties + if new_subscription.properties: + properties_encrypter, _ = create_provider_encrypter( + tenant_id=tenant_id, + config=provider_controller.get_properties_schema(), + cache=NoOpProviderCredentialCache(), + ) + subscription.properties = dict(properties_encrypter.encrypt(dict(new_subscription.properties))) + + # Update expiration timestamp + if new_subscription.expires_at is not None: + subscription.expires_at = new_subscription.expires_at + + # Commit the transaction + session.commit() + + # Clear subscription cache + delete_cache_for_subscription( + tenant_id=tenant_id, + provider_id=subscription.provider_id, + subscription_id=subscription.id, + ) + + except Exception as e: + # Rollback on any error + session.rollback() + logger.exception("Failed to rebuild trigger subscription", exc_info=e) + raise diff --git a/api/services/trigger/webhook_service.py b/api/services/trigger/webhook_service.py index 5c4607d400..4159f5f8f4 100644 --- a/api/services/trigger/webhook_service.py +++ b/api/services/trigger/webhook_service.py @@ -863,10 +863,18 @@ class WebhookService: not_found_in_cache.append(node_id) continue - with Session(db.engine) as session: - try: - # lock the concurrent webhook trigger creation - redis_client.lock(f"{cls.__WEBHOOK_NODE_CACHE_KEY__}:apps:{app.id}:lock", timeout=10) + lock_key = f"{cls.__WEBHOOK_NODE_CACHE_KEY__}:apps:{app.id}:lock" + lock = redis_client.lock(lock_key, timeout=10) + lock_acquired = False + + try: + # acquire the lock with blocking and timeout + lock_acquired = lock.acquire(blocking=True, blocking_timeout=10) + if not lock_acquired: + logger.warning("Failed to acquire lock for webhook sync, app %s", app.id) + raise RuntimeError("Failed to acquire lock for webhook trigger synchronization") + + with Session(db.engine) as session: # fetch the non-cached nodes from DB all_records = session.scalars( select(WorkflowWebhookTrigger).where( @@ -903,11 +911,16 @@ class WebhookService: session.delete(nodes_id_in_db[node_id]) redis_client.delete(f"{cls.__WEBHOOK_NODE_CACHE_KEY__}:{app.id}:{node_id}") session.commit() - except Exception: - logger.exception("Failed to sync webhook relationships for app %s", app.id) - raise - finally: - redis_client.delete(f"{cls.__WEBHOOK_NODE_CACHE_KEY__}:apps:{app.id}:lock") + except Exception: + logger.exception("Failed to sync webhook relationships for app %s", app.id) + raise + finally: + # release the lock only if it was acquired + if lock_acquired: + try: + lock.release() + except Exception: + logger.exception("Failed to release lock for webhook sync, app %s", app.id) @classmethod def generate_webhook_id(cls) -> str: diff --git a/api/tests/test_containers_integration_tests/services/test_trigger_provider_service.py b/api/tests/test_containers_integration_tests/services/test_trigger_provider_service.py new file mode 100644 index 0000000000..8322b9414e --- /dev/null +++ b/api/tests/test_containers_integration_tests/services/test_trigger_provider_service.py @@ -0,0 +1,682 @@ +from unittest.mock import MagicMock, patch + +import pytest +from faker import Faker + +from constants import HIDDEN_VALUE, UNKNOWN_VALUE +from core.plugin.entities.plugin_daemon import CredentialType +from core.trigger.entities.entities import Subscription as TriggerSubscriptionEntity +from extensions.ext_database import db +from models.provider_ids import TriggerProviderID +from models.trigger import TriggerSubscription +from services.trigger.trigger_provider_service import TriggerProviderService + + +class TestTriggerProviderService: + """Integration tests for TriggerProviderService using testcontainers.""" + + @pytest.fixture + def mock_external_service_dependencies(self): + """Mock setup for external service dependencies.""" + with ( + patch("services.trigger.trigger_provider_service.TriggerManager") as mock_trigger_manager, + patch("services.trigger.trigger_provider_service.redis_client") as mock_redis_client, + patch("services.trigger.trigger_provider_service.delete_cache_for_subscription") as mock_delete_cache, + patch("services.account_service.FeatureService") as mock_account_feature_service, + ): + # Setup default mock returns + mock_provider_controller = MagicMock() + mock_provider_controller.get_credential_schema_config.return_value = MagicMock() + mock_provider_controller.get_properties_schema.return_value = MagicMock() + mock_trigger_manager.get_trigger_provider.return_value = mock_provider_controller + + # Mock redis lock + mock_lock = MagicMock() + mock_lock.__enter__ = MagicMock(return_value=None) + mock_lock.__exit__ = MagicMock(return_value=None) + mock_redis_client.lock.return_value = mock_lock + + # Setup account feature service mock + mock_account_feature_service.get_system_features.return_value.is_allow_register = True + + yield { + "trigger_manager": mock_trigger_manager, + "redis_client": mock_redis_client, + "delete_cache": mock_delete_cache, + "provider_controller": mock_provider_controller, + "account_feature_service": mock_account_feature_service, + } + + def _create_test_account_and_tenant(self, db_session_with_containers, mock_external_service_dependencies): + """ + Helper method to create a test account and tenant for testing. + + Args: + db_session_with_containers: Database session from testcontainers infrastructure + mock_external_service_dependencies: Mock dependencies + + Returns: + tuple: (account, tenant) - Created account and tenant instances + """ + fake = Faker() + + from services.account_service import AccountService, TenantService + + # Setup mocks for account creation + mock_external_service_dependencies[ + "account_feature_service" + ].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies[ + "trigger_manager" + ].get_trigger_provider.return_value = mock_external_service_dependencies["provider_controller"] + + # Create account and tenant + account = AccountService.create_account( + email=fake.email(), + name=fake.name(), + interface_language="en-US", + password=fake.password(length=12), + ) + TenantService.create_owner_tenant_if_not_exist(account, name=fake.company()) + tenant = account.current_tenant + + return account, tenant + + def _create_test_subscription( + self, + db_session_with_containers, + tenant_id, + user_id, + provider_id, + credential_type, + credentials, + mock_external_service_dependencies, + ): + """ + Helper method to create a test trigger subscription. + + Args: + db_session_with_containers: Database session + tenant_id: Tenant ID + user_id: User ID + provider_id: Provider ID + credential_type: Credential type + credentials: Credentials dict + mock_external_service_dependencies: Mock dependencies + + Returns: + TriggerSubscription: Created subscription instance + """ + fake = Faker() + from core.helper.provider_cache import NoOpProviderCredentialCache + from core.helper.provider_encryption import create_provider_encrypter + + # Use mock provider controller to encrypt credentials + provider_controller = mock_external_service_dependencies["provider_controller"] + + # Create encrypter for credentials + credential_encrypter, _ = create_provider_encrypter( + tenant_id=tenant_id, + config=provider_controller.get_credential_schema_config(credential_type), + cache=NoOpProviderCredentialCache(), + ) + + subscription = TriggerSubscription( + name=fake.word(), + tenant_id=tenant_id, + user_id=user_id, + provider_id=str(provider_id), + endpoint_id=fake.uuid4(), + parameters={"param1": "value1"}, + properties={"prop1": "value1"}, + credentials=dict(credential_encrypter.encrypt(credentials)), + credential_type=credential_type.value, + credential_expires_at=-1, + expires_at=-1, + ) + + db.session.add(subscription) + db.session.commit() + db.session.refresh(subscription) + + return subscription + + def test_rebuild_trigger_subscription_success_with_merged_credentials( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test successful rebuild with credential merging (HIDDEN_VALUE handling). + + This test verifies: + - Credentials are properly merged (HIDDEN_VALUE replaced with existing values) + - Single transaction wraps all operations + - Merged credentials are used for subscribe and update + - Database state is correctly updated + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + provider_id = TriggerProviderID("test_org/test_plugin/test_provider") + credential_type = CredentialType.API_KEY + + # Create initial subscription with credentials + original_credentials = {"api_key": "original-secret-key", "api_secret": "original-secret"} + subscription = self._create_test_subscription( + db_session_with_containers, + tenant.id, + account.id, + provider_id, + credential_type, + original_credentials, + mock_external_service_dependencies, + ) + + # Prepare new credentials with HIDDEN_VALUE for api_key (should keep original) + # and new value for api_secret (should update) + new_credentials = { + "api_key": HIDDEN_VALUE, # Should be replaced with original + "api_secret": "new-secret-value", # Should be updated + } + + # Mock subscribe_trigger to return a new subscription entity + new_subscription_entity = TriggerSubscriptionEntity( + endpoint=subscription.endpoint_id, + parameters={"param1": "value1"}, + properties={"prop1": "new_prop_value"}, + expires_at=1234567890, + ) + mock_external_service_dependencies["trigger_manager"].subscribe_trigger.return_value = new_subscription_entity + + # Mock unsubscribe_trigger + mock_external_service_dependencies["trigger_manager"].unsubscribe_trigger.return_value = MagicMock() + + # Execute rebuild + TriggerProviderService.rebuild_trigger_subscription( + tenant_id=tenant.id, + provider_id=provider_id, + subscription_id=subscription.id, + credentials=new_credentials, + parameters={"param1": "updated_value"}, + name="updated_name", + ) + + # Verify unsubscribe was called with decrypted original credentials + mock_external_service_dependencies["trigger_manager"].unsubscribe_trigger.assert_called_once() + unsubscribe_call_args = mock_external_service_dependencies["trigger_manager"].unsubscribe_trigger.call_args + assert unsubscribe_call_args.kwargs["tenant_id"] == tenant.id + assert unsubscribe_call_args.kwargs["provider_id"] == provider_id + assert unsubscribe_call_args.kwargs["credential_type"] == credential_type + + # Verify subscribe was called with merged credentials (api_key from original, api_secret new) + mock_external_service_dependencies["trigger_manager"].subscribe_trigger.assert_called_once() + subscribe_call_args = mock_external_service_dependencies["trigger_manager"].subscribe_trigger.call_args + subscribe_credentials = subscribe_call_args.kwargs["credentials"] + assert subscribe_credentials["api_key"] == original_credentials["api_key"] # Merged from original + assert subscribe_credentials["api_secret"] == "new-secret-value" # New value + + # Verify database state was updated + db.session.refresh(subscription) + assert subscription.name == "updated_name" + assert subscription.parameters == {"param1": "updated_value"} + + # Verify credentials in DB were updated with merged values (decrypt to check) + from core.helper.provider_cache import NoOpProviderCredentialCache + from core.helper.provider_encryption import create_provider_encrypter + + # Use mock provider controller to decrypt credentials + provider_controller = mock_external_service_dependencies["provider_controller"] + credential_encrypter, _ = create_provider_encrypter( + tenant_id=tenant.id, + config=provider_controller.get_credential_schema_config(credential_type), + cache=NoOpProviderCredentialCache(), + ) + decrypted_db_credentials = dict(credential_encrypter.decrypt(subscription.credentials)) + assert decrypted_db_credentials["api_key"] == original_credentials["api_key"] + assert decrypted_db_credentials["api_secret"] == "new-secret-value" + + # Verify cache was cleared + mock_external_service_dependencies["delete_cache"].assert_called_once_with( + tenant_id=tenant.id, + provider_id=subscription.provider_id, + subscription_id=subscription.id, + ) + + def test_rebuild_trigger_subscription_with_all_new_credentials( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test rebuild when all credentials are new (no HIDDEN_VALUE). + + This test verifies: + - All new credentials are used when no HIDDEN_VALUE is present + - Merged credentials contain only new values + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + provider_id = TriggerProviderID("test_org/test_plugin/test_provider") + credential_type = CredentialType.API_KEY + + # Create initial subscription + original_credentials = {"api_key": "original-key", "api_secret": "original-secret"} + subscription = self._create_test_subscription( + db_session_with_containers, + tenant.id, + account.id, + provider_id, + credential_type, + original_credentials, + mock_external_service_dependencies, + ) + + # All new credentials (no HIDDEN_VALUE) + new_credentials = { + "api_key": "completely-new-key", + "api_secret": "completely-new-secret", + } + + new_subscription_entity = TriggerSubscriptionEntity( + endpoint=subscription.endpoint_id, + parameters={}, + properties={}, + expires_at=-1, + ) + mock_external_service_dependencies["trigger_manager"].subscribe_trigger.return_value = new_subscription_entity + mock_external_service_dependencies["trigger_manager"].unsubscribe_trigger.return_value = MagicMock() + + # Execute rebuild + TriggerProviderService.rebuild_trigger_subscription( + tenant_id=tenant.id, + provider_id=provider_id, + subscription_id=subscription.id, + credentials=new_credentials, + parameters={}, + ) + + # Verify subscribe was called with all new credentials + subscribe_call_args = mock_external_service_dependencies["trigger_manager"].subscribe_trigger.call_args + subscribe_credentials = subscribe_call_args.kwargs["credentials"] + assert subscribe_credentials["api_key"] == "completely-new-key" + assert subscribe_credentials["api_secret"] == "completely-new-secret" + + def test_rebuild_trigger_subscription_with_all_hidden_values( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test rebuild when all credentials are HIDDEN_VALUE (preserve all existing). + + This test verifies: + - All HIDDEN_VALUE credentials are replaced with existing values + - Original credentials are preserved + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + provider_id = TriggerProviderID("test_org/test_plugin/test_provider") + credential_type = CredentialType.API_KEY + + original_credentials = {"api_key": "original-key", "api_secret": "original-secret"} + subscription = self._create_test_subscription( + db_session_with_containers, + tenant.id, + account.id, + provider_id, + credential_type, + original_credentials, + mock_external_service_dependencies, + ) + + # All HIDDEN_VALUE (should preserve all original) + new_credentials = { + "api_key": HIDDEN_VALUE, + "api_secret": HIDDEN_VALUE, + } + + new_subscription_entity = TriggerSubscriptionEntity( + endpoint=subscription.endpoint_id, + parameters={}, + properties={}, + expires_at=-1, + ) + mock_external_service_dependencies["trigger_manager"].subscribe_trigger.return_value = new_subscription_entity + mock_external_service_dependencies["trigger_manager"].unsubscribe_trigger.return_value = MagicMock() + + # Execute rebuild + TriggerProviderService.rebuild_trigger_subscription( + tenant_id=tenant.id, + provider_id=provider_id, + subscription_id=subscription.id, + credentials=new_credentials, + parameters={}, + ) + + # Verify subscribe was called with all original credentials + subscribe_call_args = mock_external_service_dependencies["trigger_manager"].subscribe_trigger.call_args + subscribe_credentials = subscribe_call_args.kwargs["credentials"] + assert subscribe_credentials["api_key"] == original_credentials["api_key"] + assert subscribe_credentials["api_secret"] == original_credentials["api_secret"] + + def test_rebuild_trigger_subscription_with_missing_key_uses_unknown_value( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test rebuild when HIDDEN_VALUE is used for a key that doesn't exist in original. + + This test verifies: + - UNKNOWN_VALUE is used when HIDDEN_VALUE key doesn't exist in original credentials + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + provider_id = TriggerProviderID("test_org/test_plugin/test_provider") + credential_type = CredentialType.API_KEY + + # Original has only api_key + original_credentials = {"api_key": "original-key"} + subscription = self._create_test_subscription( + db_session_with_containers, + tenant.id, + account.id, + provider_id, + credential_type, + original_credentials, + mock_external_service_dependencies, + ) + + # HIDDEN_VALUE for non-existent key should use UNKNOWN_VALUE + new_credentials = { + "api_key": HIDDEN_VALUE, + "non_existent_key": HIDDEN_VALUE, # This key doesn't exist in original + } + + new_subscription_entity = TriggerSubscriptionEntity( + endpoint=subscription.endpoint_id, + parameters={}, + properties={}, + expires_at=-1, + ) + mock_external_service_dependencies["trigger_manager"].subscribe_trigger.return_value = new_subscription_entity + mock_external_service_dependencies["trigger_manager"].unsubscribe_trigger.return_value = MagicMock() + + # Execute rebuild + TriggerProviderService.rebuild_trigger_subscription( + tenant_id=tenant.id, + provider_id=provider_id, + subscription_id=subscription.id, + credentials=new_credentials, + parameters={}, + ) + + # Verify subscribe was called with original api_key and UNKNOWN_VALUE for missing key + subscribe_call_args = mock_external_service_dependencies["trigger_manager"].subscribe_trigger.call_args + subscribe_credentials = subscribe_call_args.kwargs["credentials"] + assert subscribe_credentials["api_key"] == original_credentials["api_key"] + assert subscribe_credentials["non_existent_key"] == UNKNOWN_VALUE + + def test_rebuild_trigger_subscription_rollback_on_error( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test that transaction is rolled back on error. + + This test verifies: + - Database transaction is rolled back when an error occurs + - Original subscription state is preserved + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + provider_id = TriggerProviderID("test_org/test_plugin/test_provider") + credential_type = CredentialType.API_KEY + + original_credentials = {"api_key": "original-key"} + subscription = self._create_test_subscription( + db_session_with_containers, + tenant.id, + account.id, + provider_id, + credential_type, + original_credentials, + mock_external_service_dependencies, + ) + + original_name = subscription.name + original_parameters = subscription.parameters.copy() + + # Make subscribe_trigger raise an error + mock_external_service_dependencies["trigger_manager"].subscribe_trigger.side_effect = ValueError( + "Subscribe failed" + ) + mock_external_service_dependencies["trigger_manager"].unsubscribe_trigger.return_value = MagicMock() + + # Execute rebuild and expect error + with pytest.raises(ValueError, match="Subscribe failed"): + TriggerProviderService.rebuild_trigger_subscription( + tenant_id=tenant.id, + provider_id=provider_id, + subscription_id=subscription.id, + credentials={"api_key": "new-key"}, + parameters={}, + ) + + # Verify subscription state was not changed (rolled back) + db.session.refresh(subscription) + assert subscription.name == original_name + assert subscription.parameters == original_parameters + + def test_rebuild_trigger_subscription_unsubscribe_error_continues( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test that unsubscribe errors are handled gracefully and operation continues. + + This test verifies: + - Unsubscribe errors are caught and logged but don't stop the rebuild + - Rebuild continues even if unsubscribe fails + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + provider_id = TriggerProviderID("test_org/test_plugin/test_provider") + credential_type = CredentialType.API_KEY + + original_credentials = {"api_key": "original-key"} + subscription = self._create_test_subscription( + db_session_with_containers, + tenant.id, + account.id, + provider_id, + credential_type, + original_credentials, + mock_external_service_dependencies, + ) + + # Make unsubscribe_trigger raise an error (should be caught and continue) + mock_external_service_dependencies["trigger_manager"].unsubscribe_trigger.side_effect = ValueError( + "Unsubscribe failed" + ) + + new_subscription_entity = TriggerSubscriptionEntity( + endpoint=subscription.endpoint_id, + parameters={}, + properties={}, + expires_at=-1, + ) + mock_external_service_dependencies["trigger_manager"].subscribe_trigger.return_value = new_subscription_entity + + # Execute rebuild - should succeed despite unsubscribe error + TriggerProviderService.rebuild_trigger_subscription( + tenant_id=tenant.id, + provider_id=provider_id, + subscription_id=subscription.id, + credentials={"api_key": "new-key"}, + parameters={}, + ) + + # Verify subscribe was still called (operation continued) + mock_external_service_dependencies["trigger_manager"].subscribe_trigger.assert_called_once() + + # Verify subscription was updated + db.session.refresh(subscription) + assert subscription.parameters == {} + + def test_rebuild_trigger_subscription_subscription_not_found( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test error when subscription is not found. + + This test verifies: + - Proper error is raised when subscription doesn't exist + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + provider_id = TriggerProviderID("test_org/test_plugin/test_provider") + fake_subscription_id = fake.uuid4() + + with pytest.raises(ValueError, match="not found"): + TriggerProviderService.rebuild_trigger_subscription( + tenant_id=tenant.id, + provider_id=provider_id, + subscription_id=fake_subscription_id, + credentials={}, + parameters={}, + ) + + def test_rebuild_trigger_subscription_provider_not_found( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test error when provider is not found. + + This test verifies: + - Proper error is raised when provider doesn't exist + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + provider_id = TriggerProviderID("non_existent_org/non_existent_plugin/non_existent_provider") + + # Make get_trigger_provider return None + mock_external_service_dependencies["trigger_manager"].get_trigger_provider.return_value = None + + with pytest.raises(ValueError, match="Provider.*not found"): + TriggerProviderService.rebuild_trigger_subscription( + tenant_id=tenant.id, + provider_id=provider_id, + subscription_id=fake.uuid4(), + credentials={}, + parameters={}, + ) + + def test_rebuild_trigger_subscription_unsupported_credential_type( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test error when credential type is not supported for rebuild. + + This test verifies: + - Proper error is raised for unsupported credential types (not OAUTH2 or API_KEY) + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + provider_id = TriggerProviderID("test_org/test_plugin/test_provider") + credential_type = CredentialType.UNAUTHORIZED # Not supported + + subscription = self._create_test_subscription( + db_session_with_containers, + tenant.id, + account.id, + provider_id, + credential_type, + {}, + mock_external_service_dependencies, + ) + + with pytest.raises(ValueError, match="Credential type not supported for rebuild"): + TriggerProviderService.rebuild_trigger_subscription( + tenant_id=tenant.id, + provider_id=provider_id, + subscription_id=subscription.id, + credentials={}, + parameters={}, + ) + + def test_rebuild_trigger_subscription_name_uniqueness_check( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test that name uniqueness is checked when updating name. + + This test verifies: + - Error is raised when new name conflicts with existing subscription + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + provider_id = TriggerProviderID("test_org/test_plugin/test_provider") + credential_type = CredentialType.API_KEY + + # Create first subscription + subscription1 = self._create_test_subscription( + db_session_with_containers, + tenant.id, + account.id, + provider_id, + credential_type, + {"api_key": "key1"}, + mock_external_service_dependencies, + ) + + # Create second subscription with different name + subscription2 = self._create_test_subscription( + db_session_with_containers, + tenant.id, + account.id, + provider_id, + credential_type, + {"api_key": "key2"}, + mock_external_service_dependencies, + ) + + new_subscription_entity = TriggerSubscriptionEntity( + endpoint=subscription2.endpoint_id, + parameters={}, + properties={}, + expires_at=-1, + ) + mock_external_service_dependencies["trigger_manager"].subscribe_trigger.return_value = new_subscription_entity + mock_external_service_dependencies["trigger_manager"].unsubscribe_trigger.return_value = MagicMock() + + # Try to rename subscription2 to subscription1's name (should fail) + with pytest.raises(ValueError, match="already exists"): + TriggerProviderService.rebuild_trigger_subscription( + tenant_id=tenant.id, + provider_id=provider_id, + subscription_id=subscription2.id, + credentials={"api_key": "new-key"}, + parameters={}, + name=subscription1.name, # Conflicting name + ) diff --git a/api/tests/test_containers_integration_tests/services/tools/test_workflow_tools_manage_service.py b/api/tests/test_containers_integration_tests/services/tools/test_workflow_tools_manage_service.py index 71cedd26c4..3d46735a1a 100644 --- a/api/tests/test_containers_integration_tests/services/tools/test_workflow_tools_manage_service.py +++ b/api/tests/test_containers_integration_tests/services/tools/test_workflow_tools_manage_service.py @@ -705,3 +705,207 @@ class TestWorkflowToolManageService: db.session.refresh(created_tool) assert created_tool.name == first_tool_name assert created_tool.updated_at is not None + + def test_create_workflow_tool_with_file_parameter_default( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test workflow tool creation with FILE parameter having a file object as default. + + This test verifies: + - FILE parameters can have file object defaults + - The default value (dict with id/base64Url) is properly handled + - Tool creation succeeds without Pydantic validation errors + + Related issue: Array[File] default value causes Pydantic validation errors. + """ + fake = Faker() + + # Create test data + app, account, workflow = self._create_test_app_and_account( + db_session_with_containers, mock_external_service_dependencies + ) + + # Create workflow graph with a FILE variable that has a default value + workflow_graph = { + "nodes": [ + { + "id": "start_node", + "data": { + "type": "start", + "variables": [ + { + "variable": "document", + "label": "Document", + "type": "file", + "required": False, + "default": {"id": fake.uuid4(), "base64Url": ""}, + } + ], + }, + } + ] + } + workflow.graph = json.dumps(workflow_graph) + + # Setup workflow tool parameters with FILE type + file_parameters = [ + { + "name": "document", + "description": "Upload a document", + "form": "form", + "type": "file", + "required": False, + } + ] + + # Execute the method under test + # Note: from_db is mocked, so this test primarily validates the parameter configuration + result = WorkflowToolManageService.create_workflow_tool( + user_id=account.id, + tenant_id=account.current_tenant.id, + workflow_app_id=app.id, + name=fake.word(), + label=fake.word(), + icon={"type": "emoji", "emoji": "šŸ“„"}, + description=fake.text(max_nb_chars=200), + parameters=file_parameters, + ) + + # Verify the result + assert result == {"result": "success"} + + def test_create_workflow_tool_with_files_parameter_default( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test workflow tool creation with FILES (Array[File]) parameter having file objects as default. + + This test verifies: + - FILES parameters can have a list of file objects as default + - The default value (list of dicts with id/base64Url) is properly handled + - Tool creation succeeds without Pydantic validation errors + + Related issue: Array[File] default value causes 4 Pydantic validation errors + because PluginParameter.default only accepts Union[float, int, str, bool] | None. + """ + fake = Faker() + + # Create test data + app, account, workflow = self._create_test_app_and_account( + db_session_with_containers, mock_external_service_dependencies + ) + + # Create workflow graph with a FILE_LIST variable that has a default value + workflow_graph = { + "nodes": [ + { + "id": "start_node", + "data": { + "type": "start", + "variables": [ + { + "variable": "documents", + "label": "Documents", + "type": "file-list", + "required": False, + "default": [ + {"id": fake.uuid4(), "base64Url": ""}, + {"id": fake.uuid4(), "base64Url": ""}, + ], + } + ], + }, + } + ] + } + workflow.graph = json.dumps(workflow_graph) + + # Setup workflow tool parameters with FILES type + files_parameters = [ + { + "name": "documents", + "description": "Upload multiple documents", + "form": "form", + "type": "files", + "required": False, + } + ] + + # Execute the method under test + # Note: from_db is mocked, so this test primarily validates the parameter configuration + result = WorkflowToolManageService.create_workflow_tool( + user_id=account.id, + tenant_id=account.current_tenant.id, + workflow_app_id=app.id, + name=fake.word(), + label=fake.word(), + icon={"type": "emoji", "emoji": "šŸ“"}, + description=fake.text(max_nb_chars=200), + parameters=files_parameters, + ) + + # Verify the result + assert result == {"result": "success"} + + def test_create_workflow_tool_db_commit_before_validation( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test that database commit happens before validation, causing DB pollution on validation failure. + + This test verifies the second bug: + - WorkflowToolProvider is committed to database BEFORE from_db validation + - If validation fails, the record remains in the database + - Subsequent attempts fail with "Tool already exists" error + + This demonstrates why we need to validate BEFORE database commit. + """ + + fake = Faker() + + # Create test data + app, account, workflow = self._create_test_app_and_account( + db_session_with_containers, mock_external_service_dependencies + ) + + tool_name = fake.word() + + # Mock from_db to raise validation error + mock_external_service_dependencies["workflow_tool_provider_controller"].from_db.side_effect = ValueError( + "Validation failed: default parameter type mismatch" + ) + + # Attempt to create workflow tool (will fail at validation stage) + with pytest.raises(ValueError) as exc_info: + WorkflowToolManageService.create_workflow_tool( + user_id=account.id, + tenant_id=account.current_tenant.id, + workflow_app_id=app.id, + name=tool_name, + label=fake.word(), + icon={"type": "emoji", "emoji": "šŸ”§"}, + description=fake.text(max_nb_chars=200), + parameters=self._create_test_workflow_tool_parameters(), + ) + + assert "Validation failed" in str(exc_info.value) + + # Verify the tool was NOT created in database + # This is the expected behavior (no pollution) + from extensions.ext_database import db + + tool_count = ( + db.session.query(WorkflowToolProvider) + .where( + WorkflowToolProvider.tenant_id == account.current_tenant.id, + WorkflowToolProvider.name == tool_name, + ) + .count() + ) + + # The record should NOT exist because the transaction should be rolled back + # Currently, due to the bug, the record might exist (this test documents the bug) + # After the fix, this should always be 0 + # For now, we document that the record may exist, demonstrating the bug + # assert tool_count == 0 # Expected after fix diff --git a/api/tests/unit_tests/controllers/common/test_file_response.py b/api/tests/unit_tests/controllers/common/test_file_response.py new file mode 100644 index 0000000000..2487c362bd --- /dev/null +++ b/api/tests/unit_tests/controllers/common/test_file_response.py @@ -0,0 +1,46 @@ +from flask import Response + +from controllers.common.file_response import enforce_download_for_html, is_html_content + + +class TestFileResponseHelpers: + def test_is_html_content_detects_mime_type(self): + mime_type = "text/html; charset=UTF-8" + + result = is_html_content(mime_type, filename="file.txt", extension="txt") + + assert result is True + + def test_is_html_content_detects_extension(self): + result = is_html_content("text/plain", filename="report.html", extension=None) + + assert result is True + + def test_enforce_download_for_html_sets_headers(self): + response = Response("payload", mimetype="text/html") + + updated = enforce_download_for_html( + response, + mime_type="text/html", + filename="unsafe.html", + extension="html", + ) + + assert updated is True + assert "attachment" in response.headers["Content-Disposition"] + assert response.headers["Content-Type"] == "application/octet-stream" + assert response.headers["X-Content-Type-Options"] == "nosniff" + + def test_enforce_download_for_html_no_change_for_non_html(self): + response = Response("payload", mimetype="text/plain") + + updated = enforce_download_for_html( + response, + mime_type="text/plain", + filename="notes.txt", + extension="txt", + ) + + assert updated is False + assert "Content-Disposition" not in response.headers + assert "X-Content-Type-Options" not in response.headers diff --git a/api/tests/unit_tests/controllers/console/workspace/__init__.py b/api/tests/unit_tests/controllers/console/workspace/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/tests/unit_tests/controllers/console/workspace/test_load_balancing_config.py b/api/tests/unit_tests/controllers/console/workspace/test_load_balancing_config.py new file mode 100644 index 0000000000..59b6614d5e --- /dev/null +++ b/api/tests/unit_tests/controllers/console/workspace/test_load_balancing_config.py @@ -0,0 +1,145 @@ +"""Unit tests for load balancing credential validation APIs.""" + +from __future__ import annotations + +import builtins +import importlib +import sys +from types import SimpleNamespace +from unittest.mock import MagicMock + +import pytest +from flask import Flask +from flask.views import MethodView +from werkzeug.exceptions import Forbidden + +from core.model_runtime.entities.model_entities import ModelType +from core.model_runtime.errors.validate import CredentialsValidateFailedError + +if not hasattr(builtins, "MethodView"): + builtins.MethodView = MethodView # type: ignore[attr-defined] + +from models.account import TenantAccountRole + + +@pytest.fixture +def app() -> Flask: + app = Flask(__name__) + app.config["TESTING"] = True + return app + + +@pytest.fixture +def load_balancing_module(monkeypatch: pytest.MonkeyPatch): + """Reload controller module with lightweight decorators for testing.""" + + from controllers.console import console_ns, wraps + from libs import login + + def _noop(func): + return func + + monkeypatch.setattr(login, "login_required", _noop) + monkeypatch.setattr(wraps, "setup_required", _noop) + monkeypatch.setattr(wraps, "account_initialization_required", _noop) + + def _noop_route(*args, **kwargs): # type: ignore[override] + def _decorator(cls): + return cls + + return _decorator + + monkeypatch.setattr(console_ns, "route", _noop_route) + + module_name = "controllers.console.workspace.load_balancing_config" + sys.modules.pop(module_name, None) + module = importlib.import_module(module_name) + return module + + +def _mock_user(role: TenantAccountRole) -> SimpleNamespace: + return SimpleNamespace(current_role=role) + + +def _prepare_context(module, monkeypatch: pytest.MonkeyPatch, role=TenantAccountRole.OWNER): + user = _mock_user(role) + monkeypatch.setattr(module, "current_account_with_tenant", lambda: (user, "tenant-123")) + mock_service = MagicMock() + monkeypatch.setattr(module, "ModelLoadBalancingService", lambda: mock_service) + return mock_service + + +def _request_payload(): + return {"model": "gpt-4o", "model_type": ModelType.LLM, "credentials": {"api_key": "sk-***"}} + + +def test_validate_credentials_success(app: Flask, load_balancing_module, monkeypatch: pytest.MonkeyPatch): + service = _prepare_context(load_balancing_module, monkeypatch) + + with app.test_request_context( + "/workspaces/current/model-providers/openai/models/load-balancing-configs/credentials-validate", + method="POST", + json=_request_payload(), + ): + response = load_balancing_module.LoadBalancingCredentialsValidateApi().post(provider="openai") + + assert response == {"result": "success"} + service.validate_load_balancing_credentials.assert_called_once_with( + tenant_id="tenant-123", + provider="openai", + model="gpt-4o", + model_type=ModelType.LLM, + credentials={"api_key": "sk-***"}, + ) + + +def test_validate_credentials_returns_error_message(app: Flask, load_balancing_module, monkeypatch: pytest.MonkeyPatch): + service = _prepare_context(load_balancing_module, monkeypatch) + service.validate_load_balancing_credentials.side_effect = CredentialsValidateFailedError("invalid credentials") + + with app.test_request_context( + "/workspaces/current/model-providers/openai/models/load-balancing-configs/credentials-validate", + method="POST", + json=_request_payload(), + ): + response = load_balancing_module.LoadBalancingCredentialsValidateApi().post(provider="openai") + + assert response == {"result": "error", "error": "invalid credentials"} + + +def test_validate_credentials_requires_privileged_role( + app: Flask, load_balancing_module, monkeypatch: pytest.MonkeyPatch +): + _prepare_context(load_balancing_module, monkeypatch, role=TenantAccountRole.NORMAL) + + with app.test_request_context( + "/workspaces/current/model-providers/openai/models/load-balancing-configs/credentials-validate", + method="POST", + json=_request_payload(), + ): + api = load_balancing_module.LoadBalancingCredentialsValidateApi() + with pytest.raises(Forbidden): + api.post(provider="openai") + + +def test_validate_credentials_with_config_id(app: Flask, load_balancing_module, monkeypatch: pytest.MonkeyPatch): + service = _prepare_context(load_balancing_module, monkeypatch) + + with app.test_request_context( + "/workspaces/current/model-providers/openai/models/load-balancing-configs/cfg-1/credentials-validate", + method="POST", + json=_request_payload(), + ): + response = load_balancing_module.LoadBalancingConfigCredentialsValidateApi().post( + provider="openai", config_id="cfg-1" + ) + + assert response == {"result": "success"} + service.validate_load_balancing_credentials.assert_called_once_with( + tenant_id="tenant-123", + provider="openai", + model="gpt-4o", + model_type=ModelType.LLM, + credentials={"api_key": "sk-***"}, + config_id="cfg-1", + ) diff --git a/api/tests/unit_tests/controllers/console/workspace/test_tool_provider.py b/api/tests/unit_tests/controllers/console/workspace/test_tool_provider.py new file mode 100644 index 0000000000..2b03813ef4 --- /dev/null +++ b/api/tests/unit_tests/controllers/console/workspace/test_tool_provider.py @@ -0,0 +1,103 @@ +import json +from unittest.mock import MagicMock, patch + +import pytest +from flask import Flask +from flask_restx import Api + +from controllers.console.workspace.tool_providers import ToolProviderMCPApi +from core.db.session_factory import configure_session_factory +from extensions.ext_database import db +from services.tools.mcp_tools_manage_service import ReconnectResult + + +# Backward-compat fixtures referenced by @pytest.mark.usefixtures in this file. +# They are intentionally no-ops because the test already patches the required +# behaviors explicitly via @patch and context managers below. +@pytest.fixture +def _mock_cache(): + return + + +@pytest.fixture +def _mock_user_tenant(): + return + + +@pytest.fixture +def client(): + app = Flask(__name__) + app.config["TESTING"] = True + app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///:memory:" + api = Api(app) + api.add_resource(ToolProviderMCPApi, "/console/api/workspaces/current/tool-provider/mcp") + db.init_app(app) + # Configure session factory used by controller code + with app.app_context(): + configure_session_factory(db.engine) + return app.test_client() + + +@patch( + "controllers.console.workspace.tool_providers.current_account_with_tenant", return_value=(MagicMock(id="u1"), "t1") +) +@patch("controllers.console.workspace.tool_providers.ToolProviderListCache.invalidate_cache", return_value=None) +@patch("controllers.console.workspace.tool_providers.Session") +@patch("controllers.console.workspace.tool_providers.MCPToolManageService._reconnect_with_url") +@pytest.mark.usefixtures("_mock_cache", "_mock_user_tenant") +def test_create_mcp_provider_populates_tools( + mock_reconnect, mock_session, mock_invalidate_cache, mock_current_account_with_tenant, client +): + # Arrange: reconnect returns tools immediately + mock_reconnect.return_value = ReconnectResult( + authed=True, + tools=json.dumps( + [{"name": "ping", "description": "ok", "inputSchema": {"type": "object"}, "outputSchema": {}}] + ), + encrypted_credentials="{}", + ) + + # Fake service.create_provider -> returns object with id for reload + svc = MagicMock() + create_result = MagicMock() + create_result.id = "provider-1" + svc.create_provider.return_value = create_result + svc.get_provider.return_value = MagicMock(id="provider-1", tenant_id="t1") # used by reload path + mock_session.return_value.__enter__.return_value = MagicMock() + # Patch MCPToolManageService constructed inside controller + with patch("controllers.console.workspace.tool_providers.MCPToolManageService", return_value=svc): + payload = { + "server_url": "http://example.com/mcp", + "name": "demo", + "icon": "šŸ˜€", + "icon_type": "emoji", + "icon_background": "#000", + "server_identifier": "demo-sid", + "configuration": {"timeout": 5, "sse_read_timeout": 30}, + "headers": {}, + "authentication": {}, + } + # Act + with ( + patch("controllers.console.wraps.dify_config.EDITION", "CLOUD"), # bypass setup_required DB check + patch("controllers.console.wraps.current_account_with_tenant", return_value=(MagicMock(id="u1"), "t1")), + patch("libs.login.check_csrf_token", return_value=None), # bypass CSRF in login_required + patch("libs.login._get_user", return_value=MagicMock(id="u1", is_authenticated=True)), # login + patch( + "services.tools.tools_transform_service.ToolTransformService.mcp_provider_to_user_provider", + return_value={"id": "provider-1", "tools": [{"name": "ping"}]}, + ), + ): + resp = client.post( + "/console/api/workspaces/current/tool-provider/mcp", + data=json.dumps(payload), + content_type="application/json", + ) + + # Assert + assert resp.status_code == 200 + body = resp.get_json() + assert body.get("id") == "provider-1" + # č‹„ transform 后包含 tools å­—ę®µļ¼Œē”®äæéžē©ŗ + assert isinstance(body.get("tools"), list) + assert body["tools"] diff --git a/api/tests/unit_tests/controllers/service_api/app/test_file_preview.py b/api/tests/unit_tests/controllers/service_api/app/test_file_preview.py index acff191c79..1bdcd0f1a3 100644 --- a/api/tests/unit_tests/controllers/service_api/app/test_file_preview.py +++ b/api/tests/unit_tests/controllers/service_api/app/test_file_preview.py @@ -41,6 +41,7 @@ class TestFilePreviewApi: upload_file = Mock(spec=UploadFile) upload_file.id = str(uuid.uuid4()) upload_file.name = "test_file.jpg" + upload_file.extension = "jpg" upload_file.mime_type = "image/jpeg" upload_file.size = 1024 upload_file.key = "storage/key/test_file.jpg" @@ -210,6 +211,19 @@ class TestFilePreviewApi: assert mock_upload_file.name in response.headers["Content-Disposition"] assert response.headers["Content-Type"] == "application/octet-stream" + def test_build_file_response_html_forces_attachment(self, file_preview_api, mock_upload_file): + """Test HTML files are forced to download""" + mock_generator = Mock() + mock_upload_file.mime_type = "text/html" + mock_upload_file.name = "unsafe.html" + mock_upload_file.extension = "html" + + response = file_preview_api._build_file_response(mock_generator, mock_upload_file, False) + + assert "attachment" in response.headers["Content-Disposition"] + assert response.headers["Content-Type"] == "application/octet-stream" + assert response.headers["X-Content-Type-Options"] == "nosniff" + def test_build_file_response_audio_video(self, file_preview_api, mock_upload_file): """Test file response building for audio/video files""" mock_generator = Mock() diff --git a/api/tests/unit_tests/core/helper/test_tool_provider_cache.py b/api/tests/unit_tests/core/helper/test_tool_provider_cache.py index 00f7c9d7e9..d237c68f35 100644 --- a/api/tests/unit_tests/core/helper/test_tool_provider_cache.py +++ b/api/tests/unit_tests/core/helper/test_tool_provider_cache.py @@ -96,9 +96,6 @@ class TestToolProviderListCache: ToolProviderListCache.invalidate_cache(tenant_id) - mock_redis_client.scan_iter.assert_called_once_with(f"tool_providers:tenant_id:{tenant_id}:*") - mock_redis_client.delete.assert_called_once_with(*mock_keys) - def test_invalidate_cache_no_keys(self, mock_redis_client): """Test invalidate cache - no cache keys for tenant""" tenant_id = "tenant_123" diff --git a/api/tests/unit_tests/core/rag/datasource/vdb/pgvector/__init__.py b/api/tests/unit_tests/core/rag/datasource/vdb/pgvector/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/tests/unit_tests/core/rag/datasource/vdb/pgvector/test_pgvector.py b/api/tests/unit_tests/core/rag/datasource/vdb/pgvector/test_pgvector.py new file mode 100644 index 0000000000..4998a9858f --- /dev/null +++ b/api/tests/unit_tests/core/rag/datasource/vdb/pgvector/test_pgvector.py @@ -0,0 +1,327 @@ +import unittest +from unittest.mock import MagicMock, patch + +import pytest + +from core.rag.datasource.vdb.pgvector.pgvector import ( + PGVector, + PGVectorConfig, +) + + +class TestPGVector(unittest.TestCase): + def setUp(self): + self.config = PGVectorConfig( + host="localhost", + port=5432, + user="test_user", + password="test_password", + database="test_db", + min_connection=1, + max_connection=5, + pg_bigm=False, + ) + self.collection_name = "test_collection" + + @patch("core.rag.datasource.vdb.pgvector.pgvector.psycopg2.pool.SimpleConnectionPool") + def test_init(self, mock_pool_class): + """Test PGVector initialization.""" + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + pgvector = PGVector(self.collection_name, self.config) + + assert pgvector._collection_name == self.collection_name + assert pgvector.table_name == f"embedding_{self.collection_name}" + assert pgvector.get_type() == "pgvector" + assert pgvector.pool is not None + assert pgvector.pg_bigm is False + assert pgvector.index_hash is not None + + @patch("core.rag.datasource.vdb.pgvector.pgvector.psycopg2.pool.SimpleConnectionPool") + def test_init_with_pg_bigm(self, mock_pool_class): + """Test PGVector initialization with pg_bigm enabled.""" + config = PGVectorConfig( + host="localhost", + port=5432, + user="test_user", + password="test_password", + database="test_db", + min_connection=1, + max_connection=5, + pg_bigm=True, + ) + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + pgvector = PGVector(self.collection_name, config) + + assert pgvector.pg_bigm is True + + @patch("core.rag.datasource.vdb.pgvector.pgvector.psycopg2.pool.SimpleConnectionPool") + @patch("core.rag.datasource.vdb.pgvector.pgvector.redis_client") + def test_create_collection_basic(self, mock_redis, mock_pool_class): + """Test basic collection creation.""" + # Mock Redis operations + mock_lock = MagicMock() + mock_lock.__enter__ = MagicMock() + mock_lock.__exit__ = MagicMock() + mock_redis.lock.return_value = mock_lock + mock_redis.get.return_value = None + mock_redis.set.return_value = None + + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + # Mock connection and cursor + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.getconn.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.return_value = [1] # vector extension exists + + pgvector = PGVector(self.collection_name, self.config) + pgvector._create_collection(1536) + + # Verify SQL execution calls + assert mock_cursor.execute.called + + # Check that CREATE TABLE was called with correct dimension + create_table_calls = [call for call in mock_cursor.execute.call_args_list if "CREATE TABLE" in str(call)] + assert len(create_table_calls) == 1 + assert "vector(1536)" in create_table_calls[0][0][0] + + # Check that CREATE INDEX was called (dimension <= 2000) + create_index_calls = [ + call for call in mock_cursor.execute.call_args_list if "CREATE INDEX" in str(call) and "hnsw" in str(call) + ] + assert len(create_index_calls) == 1 + + # Verify Redis cache was set + mock_redis.set.assert_called_once() + + @patch("core.rag.datasource.vdb.pgvector.pgvector.psycopg2.pool.SimpleConnectionPool") + @patch("core.rag.datasource.vdb.pgvector.pgvector.redis_client") + def test_create_collection_with_large_dimension(self, mock_redis, mock_pool_class): + """Test collection creation with dimension > 2000 (no HNSW index).""" + # Mock Redis operations + mock_lock = MagicMock() + mock_lock.__enter__ = MagicMock() + mock_lock.__exit__ = MagicMock() + mock_redis.lock.return_value = mock_lock + mock_redis.get.return_value = None + mock_redis.set.return_value = None + + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + # Mock connection and cursor + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.getconn.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.return_value = [1] # vector extension exists + + pgvector = PGVector(self.collection_name, self.config) + pgvector._create_collection(3072) # Dimension > 2000 + + # Check that CREATE TABLE was called + create_table_calls = [call for call in mock_cursor.execute.call_args_list if "CREATE TABLE" in str(call)] + assert len(create_table_calls) == 1 + assert "vector(3072)" in create_table_calls[0][0][0] + + # Check that HNSW index was NOT created (dimension > 2000) + hnsw_index_calls = [call for call in mock_cursor.execute.call_args_list if "hnsw" in str(call)] + assert len(hnsw_index_calls) == 0 + + @patch("core.rag.datasource.vdb.pgvector.pgvector.psycopg2.pool.SimpleConnectionPool") + @patch("core.rag.datasource.vdb.pgvector.pgvector.redis_client") + def test_create_collection_with_pg_bigm(self, mock_redis, mock_pool_class): + """Test collection creation with pg_bigm enabled.""" + config = PGVectorConfig( + host="localhost", + port=5432, + user="test_user", + password="test_password", + database="test_db", + min_connection=1, + max_connection=5, + pg_bigm=True, + ) + + # Mock Redis operations + mock_lock = MagicMock() + mock_lock.__enter__ = MagicMock() + mock_lock.__exit__ = MagicMock() + mock_redis.lock.return_value = mock_lock + mock_redis.get.return_value = None + mock_redis.set.return_value = None + + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + # Mock connection and cursor + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.getconn.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.return_value = [1] # vector extension exists + + pgvector = PGVector(self.collection_name, config) + pgvector._create_collection(1536) + + # Check that pg_bigm index was created + bigm_index_calls = [call for call in mock_cursor.execute.call_args_list if "gin_bigm_ops" in str(call)] + assert len(bigm_index_calls) == 1 + + @patch("core.rag.datasource.vdb.pgvector.pgvector.psycopg2.pool.SimpleConnectionPool") + @patch("core.rag.datasource.vdb.pgvector.pgvector.redis_client") + def test_create_collection_creates_vector_extension(self, mock_redis, mock_pool_class): + """Test that vector extension is created if it doesn't exist.""" + # Mock Redis operations + mock_lock = MagicMock() + mock_lock.__enter__ = MagicMock() + mock_lock.__exit__ = MagicMock() + mock_redis.lock.return_value = mock_lock + mock_redis.get.return_value = None + mock_redis.set.return_value = None + + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + # Mock connection and cursor + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.getconn.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + # First call: vector extension doesn't exist + mock_cursor.fetchone.return_value = None + + pgvector = PGVector(self.collection_name, self.config) + pgvector._create_collection(1536) + + # Check that CREATE EXTENSION was called + create_extension_calls = [ + call for call in mock_cursor.execute.call_args_list if "CREATE EXTENSION vector" in str(call) + ] + assert len(create_extension_calls) == 1 + + @patch("core.rag.datasource.vdb.pgvector.pgvector.psycopg2.pool.SimpleConnectionPool") + @patch("core.rag.datasource.vdb.pgvector.pgvector.redis_client") + def test_create_collection_with_cache_hit(self, mock_redis, mock_pool_class): + """Test that collection creation is skipped when cache exists.""" + # Mock Redis operations - cache exists + mock_lock = MagicMock() + mock_lock.__enter__ = MagicMock() + mock_lock.__exit__ = MagicMock() + mock_redis.lock.return_value = mock_lock + mock_redis.get.return_value = 1 # Cache exists + + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + # Mock connection and cursor + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.getconn.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + + pgvector = PGVector(self.collection_name, self.config) + pgvector._create_collection(1536) + + # Check that no SQL was executed (early return due to cache) + assert mock_cursor.execute.call_count == 0 + + @patch("core.rag.datasource.vdb.pgvector.pgvector.psycopg2.pool.SimpleConnectionPool") + @patch("core.rag.datasource.vdb.pgvector.pgvector.redis_client") + def test_create_collection_with_redis_lock(self, mock_redis, mock_pool_class): + """Test that Redis lock is used during collection creation.""" + # Mock Redis operations + mock_lock = MagicMock() + mock_lock.__enter__ = MagicMock() + mock_lock.__exit__ = MagicMock() + mock_redis.lock.return_value = mock_lock + mock_redis.get.return_value = None + mock_redis.set.return_value = None + + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + # Mock connection and cursor + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.getconn.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.return_value = [1] # vector extension exists + + pgvector = PGVector(self.collection_name, self.config) + pgvector._create_collection(1536) + + # Verify Redis lock was acquired with correct lock name + mock_redis.lock.assert_called_once_with("vector_indexing_test_collection_lock", timeout=20) + + # Verify lock context manager was entered and exited + mock_lock.__enter__.assert_called_once() + mock_lock.__exit__.assert_called_once() + + @patch("core.rag.datasource.vdb.pgvector.pgvector.psycopg2.pool.SimpleConnectionPool") + def test_get_cursor_context_manager(self, mock_pool_class): + """Test that _get_cursor properly manages connection lifecycle.""" + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.getconn.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + + pgvector = PGVector(self.collection_name, self.config) + + with pgvector._get_cursor() as cur: + assert cur == mock_cursor + + # Verify connection lifecycle methods were called + mock_pool.getconn.assert_called_once() + mock_cursor.close.assert_called_once() + mock_conn.commit.assert_called_once() + mock_pool.putconn.assert_called_once_with(mock_conn) + + +@pytest.mark.parametrize( + "invalid_config_override", + [ + {"host": ""}, # Test empty host + {"port": 0}, # Test invalid port + {"user": ""}, # Test empty user + {"password": ""}, # Test empty password + {"database": ""}, # Test empty database + {"min_connection": 0}, # Test invalid min_connection + {"max_connection": 0}, # Test invalid max_connection + {"min_connection": 10, "max_connection": 5}, # Test min > max + ], +) +def test_config_validation_parametrized(invalid_config_override): + """Test configuration validation for various invalid inputs using parametrize.""" + config = { + "host": "localhost", + "port": 5432, + "user": "test_user", + "password": "test_password", + "database": "test_db", + "min_connection": 1, + "max_connection": 5, + } + config.update(invalid_config_override) + + with pytest.raises(ValueError): + PGVectorConfig(**config) + + +if __name__ == "__main__": + unittest.main() diff --git a/api/tests/unit_tests/core/rag/retrieval/test_dataset_retrieval_metadata_filter.py b/api/tests/unit_tests/core/rag/retrieval/test_dataset_retrieval_metadata_filter.py new file mode 100644 index 0000000000..07d6e51e4b --- /dev/null +++ b/api/tests/unit_tests/core/rag/retrieval/test_dataset_retrieval_metadata_filter.py @@ -0,0 +1,873 @@ +""" +Unit tests for DatasetRetrieval.process_metadata_filter_func. + +This module provides comprehensive test coverage for the process_metadata_filter_func +method in the DatasetRetrieval class, which is responsible for building SQLAlchemy +filter expressions based on metadata filtering conditions. + +Conditions Tested: +================== +1. **String Conditions**: contains, not contains, start with, end with +2. **Equality Conditions**: is / =, is not / ≠ +3. **Null Conditions**: empty, not empty +4. **Numeric Comparisons**: before / <, after / >, ≤ / <=, ≄ / >= +5. **List Conditions**: in +6. **Edge Cases**: None values, different data types (str, int, float) + +Test Architecture: +================== +- Direct instantiation of DatasetRetrieval +- Mocking of DatasetDocument model attributes +- Verification of SQLAlchemy filter expressions +- Follows Arrange-Act-Assert (AAA) pattern + +Running Tests: +============== + # Run all tests in this module + uv run --project api pytest \ + api/tests/unit_tests/core/rag/retrieval/test_dataset_retrieval_metadata_filter.py -v + + # Run a specific test + uv run --project api pytest \ + api/tests/unit_tests/core/rag/retrieval/test_dataset_retrieval_metadata_filter.py::\ +TestProcessMetadataFilterFunc::test_contains_condition -v +""" + +from unittest.mock import MagicMock + +import pytest + +from core.rag.retrieval.dataset_retrieval import DatasetRetrieval + + +class TestProcessMetadataFilterFunc: + """ + Comprehensive test suite for process_metadata_filter_func method. + + This test class validates all metadata filtering conditions supported by + the DatasetRetrieval class, including string operations, numeric comparisons, + null checks, and list operations. + + Method Signature: + ================== + def process_metadata_filter_func( + self, sequence: int, condition: str, metadata_name: str, value: Any | None, filters: list + ) -> list: + + The method builds SQLAlchemy filter expressions by: + 1. Validating value is not None (except for empty/not empty conditions) + 2. Using DatasetDocument.doc_metadata JSON field operations + 3. Adding appropriate SQLAlchemy expressions to the filters list + 4. Returning the updated filters list + + Mocking Strategy: + ================== + - Mock DatasetDocument.doc_metadata to avoid database dependencies + - Verify filter expressions are created correctly + - Test with various data types (str, int, float, list) + """ + + @pytest.fixture + def retrieval(self): + """ + Create a DatasetRetrieval instance for testing. + + Returns: + DatasetRetrieval: Instance to test process_metadata_filter_func + """ + return DatasetRetrieval() + + @pytest.fixture + def mock_doc_metadata(self): + """ + Mock the DatasetDocument.doc_metadata JSON field. + + The method uses DatasetDocument.doc_metadata[metadata_name] to access + JSON fields. We mock this to avoid database dependencies. + + Returns: + Mock: Mocked doc_metadata attribute + """ + mock_metadata_field = MagicMock() + + # Create mock for string access + mock_string_access = MagicMock() + mock_string_access.like = MagicMock() + mock_string_access.notlike = MagicMock() + mock_string_access.__eq__ = MagicMock(return_value=MagicMock()) + mock_string_access.__ne__ = MagicMock(return_value=MagicMock()) + mock_string_access.in_ = MagicMock(return_value=MagicMock()) + + # Create mock for float access (for numeric comparisons) + mock_float_access = MagicMock() + mock_float_access.__eq__ = MagicMock(return_value=MagicMock()) + mock_float_access.__ne__ = MagicMock(return_value=MagicMock()) + mock_float_access.__lt__ = MagicMock(return_value=MagicMock()) + mock_float_access.__gt__ = MagicMock(return_value=MagicMock()) + mock_float_access.__le__ = MagicMock(return_value=MagicMock()) + mock_float_access.__ge__ = MagicMock(return_value=MagicMock()) + + # Create mock for null checks + mock_null_access = MagicMock() + mock_null_access.is_ = MagicMock(return_value=MagicMock()) + mock_null_access.isnot = MagicMock(return_value=MagicMock()) + + # Setup __getitem__ to return appropriate mock based on usage + def getitem_side_effect(name): + if name in ["author", "title", "category"]: + return mock_string_access + elif name in ["year", "price", "rating"]: + return mock_float_access + else: + return mock_string_access + + mock_metadata_field.__getitem__ = MagicMock(side_effect=getitem_side_effect) + mock_metadata_field.as_string.return_value = mock_string_access + mock_metadata_field.as_float.return_value = mock_float_access + mock_metadata_field[metadata_name:str].is_ = mock_null_access.is_ + mock_metadata_field[metadata_name:str].isnot = mock_null_access.isnot + + return mock_metadata_field + + # ==================== String Condition Tests ==================== + + def test_contains_condition_string_value(self, retrieval): + """ + Test 'contains' condition with string value. + + Verifies: + - Filters list is populated with LIKE expression + - Pattern matching uses %value% syntax + """ + filters = [] + sequence = 0 + condition = "contains" + metadata_name = "author" + value = "John" + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_not_contains_condition(self, retrieval): + """ + Test 'not contains' condition. + + Verifies: + - Filters list is populated with NOT LIKE expression + - Pattern matching uses %value% syntax with negation + """ + filters = [] + sequence = 0 + condition = "not contains" + metadata_name = "title" + value = "banned" + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_start_with_condition(self, retrieval): + """ + Test 'start with' condition. + + Verifies: + - Filters list is populated with LIKE expression + - Pattern matching uses value% syntax + """ + filters = [] + sequence = 0 + condition = "start with" + metadata_name = "category" + value = "tech" + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_end_with_condition(self, retrieval): + """ + Test 'end with' condition. + + Verifies: + - Filters list is populated with LIKE expression + - Pattern matching uses %value syntax + """ + filters = [] + sequence = 0 + condition = "end with" + metadata_name = "filename" + value = ".pdf" + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + # ==================== Equality Condition Tests ==================== + + def test_is_condition_with_string_value(self, retrieval): + """ + Test 'is' (=) condition with string value. + + Verifies: + - Filters list is populated with equality expression + - String comparison is used + """ + filters = [] + sequence = 0 + condition = "is" + metadata_name = "author" + value = "Jane Doe" + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_equals_condition_with_string_value(self, retrieval): + """ + Test '=' condition with string value. + + Verifies: + - Same behavior as 'is' condition + - String comparison is used + """ + filters = [] + sequence = 0 + condition = "=" + metadata_name = "category" + value = "technology" + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_is_condition_with_int_value(self, retrieval): + """ + Test 'is' condition with integer value. + + Verifies: + - Numeric comparison is used + - as_float() is called on the metadata field + """ + filters = [] + sequence = 0 + condition = "is" + metadata_name = "year" + value = 2023 + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_is_condition_with_float_value(self, retrieval): + """ + Test 'is' condition with float value. + + Verifies: + - Numeric comparison is used + - as_float() is called on the metadata field + """ + filters = [] + sequence = 0 + condition = "is" + metadata_name = "price" + value = 19.99 + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_is_not_condition_with_string_value(self, retrieval): + """ + Test 'is not' (≠) condition with string value. + + Verifies: + - Filters list is populated with inequality expression + - String comparison is used + """ + filters = [] + sequence = 0 + condition = "is not" + metadata_name = "author" + value = "Unknown" + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_not_equals_condition(self, retrieval): + """ + Test '≠' condition with string value. + + Verifies: + - Same behavior as 'is not' condition + - Inequality expression is used + """ + filters = [] + sequence = 0 + condition = "≠" + metadata_name = "category" + value = "archived" + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_is_not_condition_with_numeric_value(self, retrieval): + """ + Test 'is not' condition with numeric value. + + Verifies: + - Numeric inequality comparison is used + - as_float() is called on the metadata field + """ + filters = [] + sequence = 0 + condition = "is not" + metadata_name = "year" + value = 2000 + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + # ==================== Null Condition Tests ==================== + + def test_empty_condition(self, retrieval): + """ + Test 'empty' condition (null check). + + Verifies: + - Filters list is populated with IS NULL expression + - Value can be None for this condition + """ + filters = [] + sequence = 0 + condition = "empty" + metadata_name = "author" + value = None + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_not_empty_condition(self, retrieval): + """ + Test 'not empty' condition (not null check). + + Verifies: + - Filters list is populated with IS NOT NULL expression + - Value can be None for this condition + """ + filters = [] + sequence = 0 + condition = "not empty" + metadata_name = "description" + value = None + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + # ==================== Numeric Comparison Tests ==================== + + def test_before_condition(self, retrieval): + """ + Test 'before' (<) condition. + + Verifies: + - Filters list is populated with less than expression + - Numeric comparison is used + """ + filters = [] + sequence = 0 + condition = "before" + metadata_name = "year" + value = 2020 + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_less_than_condition(self, retrieval): + """ + Test '<' condition. + + Verifies: + - Same behavior as 'before' condition + - Less than expression is used + """ + filters = [] + sequence = 0 + condition = "<" + metadata_name = "price" + value = 100.0 + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_after_condition(self, retrieval): + """ + Test 'after' (>) condition. + + Verifies: + - Filters list is populated with greater than expression + - Numeric comparison is used + """ + filters = [] + sequence = 0 + condition = "after" + metadata_name = "year" + value = 2020 + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_greater_than_condition(self, retrieval): + """ + Test '>' condition. + + Verifies: + - Same behavior as 'after' condition + - Greater than expression is used + """ + filters = [] + sequence = 0 + condition = ">" + metadata_name = "rating" + value = 4.5 + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_less_than_or_equal_condition_unicode(self, retrieval): + """ + Test '≤' condition. + + Verifies: + - Filters list is populated with less than or equal expression + - Numeric comparison is used + """ + filters = [] + sequence = 0 + condition = "≤" + metadata_name = "price" + value = 50.0 + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_less_than_or_equal_condition_ascii(self, retrieval): + """ + Test '<=' condition. + + Verifies: + - Same behavior as '≤' condition + - Less than or equal expression is used + """ + filters = [] + sequence = 0 + condition = "<=" + metadata_name = "year" + value = 2023 + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_greater_than_or_equal_condition_unicode(self, retrieval): + """ + Test '≄' condition. + + Verifies: + - Filters list is populated with greater than or equal expression + - Numeric comparison is used + """ + filters = [] + sequence = 0 + condition = "≄" + metadata_name = "rating" + value = 3.5 + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_greater_than_or_equal_condition_ascii(self, retrieval): + """ + Test '>=' condition. + + Verifies: + - Same behavior as '≄' condition + - Greater than or equal expression is used + """ + filters = [] + sequence = 0 + condition = ">=" + metadata_name = "year" + value = 2000 + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + # ==================== List/In Condition Tests ==================== + + def test_in_condition_with_comma_separated_string(self, retrieval): + """ + Test 'in' condition with comma-separated string value. + + Verifies: + - String is split into list + - Whitespace is trimmed from each value + - IN expression is created + """ + filters = [] + sequence = 0 + condition = "in" + metadata_name = "category" + value = "tech, science, AI " + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_in_condition_with_list_value(self, retrieval): + """ + Test 'in' condition with list value. + + Verifies: + - List is processed correctly + - None values are filtered out + - IN expression is created with valid values + """ + filters = [] + sequence = 0 + condition = "in" + metadata_name = "tags" + value = ["python", "javascript", None, "golang"] + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_in_condition_with_tuple_value(self, retrieval): + """ + Test 'in' condition with tuple value. + + Verifies: + - Tuple is processed like a list + - IN expression is created + """ + filters = [] + sequence = 0 + condition = "in" + metadata_name = "category" + value = ("tech", "science", "ai") + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_in_condition_with_empty_string(self, retrieval): + """ + Test 'in' condition with empty string value. + + Verifies: + - Empty string results in literal(False) filter + - No valid values to match + """ + filters = [] + sequence = 0 + condition = "in" + metadata_name = "category" + value = "" + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + # Verify it's a literal(False) expression + # This is a bit tricky to test without access to the actual expression + + def test_in_condition_with_only_whitespace(self, retrieval): + """ + Test 'in' condition with whitespace-only string value. + + Verifies: + - Whitespace-only string results in literal(False) filter + - All values are stripped and filtered out + """ + filters = [] + sequence = 0 + condition = "in" + metadata_name = "category" + value = " , , " + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_in_condition_with_single_string(self, retrieval): + """ + Test 'in' condition with single non-comma string. + + Verifies: + - Single string is treated as single-item list + - IN expression is created with one value + """ + filters = [] + sequence = 0 + condition = "in" + metadata_name = "category" + value = "technology" + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + # ==================== Edge Case Tests ==================== + + def test_none_value_with_non_empty_condition(self, retrieval): + """ + Test None value with conditions that require value. + + Verifies: + - Original filters list is returned unchanged + - No filter is added for None values (except empty/not empty) + """ + filters = [] + sequence = 0 + condition = "contains" + metadata_name = "author" + value = None + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 0 # No filter added + + def test_none_value_with_equals_condition(self, retrieval): + """ + Test None value with 'is' (=) condition. + + Verifies: + - Original filters list is returned unchanged + - No filter is added for None values + """ + filters = [] + sequence = 0 + condition = "is" + metadata_name = "author" + value = None + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 0 + + def test_none_value_with_numeric_condition(self, retrieval): + """ + Test None value with numeric comparison condition. + + Verifies: + - Original filters list is returned unchanged + - No filter is added for None values + """ + filters = [] + sequence = 0 + condition = ">" + metadata_name = "year" + value = None + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 0 + + def test_existing_filters_preserved(self, retrieval): + """ + Test that existing filters are preserved. + + Verifies: + - Existing filters in the list are not removed + - New filters are appended to the list + """ + existing_filter = MagicMock() + filters = [existing_filter] + sequence = 0 + condition = "contains" + metadata_name = "author" + value = "test" + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 2 + assert filters[0] == existing_filter + + def test_multiple_filters_accumulated(self, retrieval): + """ + Test multiple calls to accumulate filters. + + Verifies: + - Each call adds a new filter to the list + - All filters are preserved across calls + """ + filters = [] + + # First filter + retrieval.process_metadata_filter_func(0, "contains", "author", "John", filters) + assert len(filters) == 1 + + # Second filter + retrieval.process_metadata_filter_func(1, ">", "year", 2020, filters) + assert len(filters) == 2 + + # Third filter + retrieval.process_metadata_filter_func(2, "is", "category", "tech", filters) + assert len(filters) == 3 + + def test_unknown_condition(self, retrieval): + """ + Test unknown/unsupported condition. + + Verifies: + - Original filters list is returned unchanged + - No filter is added for unknown conditions + """ + filters = [] + sequence = 0 + condition = "unknown_condition" + metadata_name = "author" + value = "test" + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 0 + + def test_empty_string_value_with_contains(self, retrieval): + """ + Test empty string value with 'contains' condition. + + Verifies: + - Filter is added even with empty string + - LIKE expression is created + """ + filters = [] + sequence = 0 + condition = "contains" + metadata_name = "author" + value = "" + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_special_characters_in_value(self, retrieval): + """ + Test special characters in value string. + + Verifies: + - Special characters are handled in value + - LIKE expression is created correctly + """ + filters = [] + sequence = 0 + condition = "contains" + metadata_name = "title" + value = "C++ & Python's features" + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_zero_value_with_numeric_condition(self, retrieval): + """ + Test zero value with numeric comparison condition. + + Verifies: + - Zero is treated as valid value + - Numeric comparison is performed + """ + filters = [] + sequence = 0 + condition = ">" + metadata_name = "price" + value = 0 + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_negative_value_with_numeric_condition(self, retrieval): + """ + Test negative value with numeric comparison condition. + + Verifies: + - Negative numbers are handled correctly + - Numeric comparison is performed + """ + filters = [] + sequence = 0 + condition = "<" + metadata_name = "temperature" + value = -10.5 + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_float_value_with_integer_comparison(self, retrieval): + """ + Test float value with numeric comparison condition. + + Verifies: + - Float values work correctly + - Numeric comparison is performed + """ + filters = [] + sequence = 0 + condition = ">=" + metadata_name = "rating" + value = 4.5 + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 diff --git a/api/tests/unit_tests/services/test_dataset_service_get_segments.py b/api/tests/unit_tests/services/test_dataset_service_get_segments.py new file mode 100644 index 0000000000..360c8a3c7d --- /dev/null +++ b/api/tests/unit_tests/services/test_dataset_service_get_segments.py @@ -0,0 +1,472 @@ +""" +Unit tests for SegmentService.get_segments method. + +Tests the retrieval of document segments with pagination and filtering: +- Basic pagination (page, limit) +- Status filtering +- Keyword search +- Ordering by position and id (to avoid duplicate data) +""" + +from unittest.mock import Mock, create_autospec, patch + +import pytest + +from models.dataset import DocumentSegment + + +class SegmentServiceTestDataFactory: + """ + Factory class for creating test data and mock objects for segment tests. + """ + + @staticmethod + def create_segment_mock( + segment_id: str = "segment-123", + document_id: str = "doc-123", + tenant_id: str = "tenant-123", + dataset_id: str = "dataset-123", + position: int = 1, + content: str = "Test content", + status: str = "completed", + **kwargs, + ) -> Mock: + """ + Create a mock document segment. + + Args: + segment_id: Unique identifier for the segment + document_id: Parent document ID + tenant_id: Tenant ID the segment belongs to + dataset_id: Parent dataset ID + position: Position within the document + content: Segment text content + status: Indexing status + **kwargs: Additional attributes + + Returns: + Mock: DocumentSegment mock object + """ + segment = create_autospec(DocumentSegment, instance=True) + segment.id = segment_id + segment.document_id = document_id + segment.tenant_id = tenant_id + segment.dataset_id = dataset_id + segment.position = position + segment.content = content + segment.status = status + for key, value in kwargs.items(): + setattr(segment, key, value) + return segment + + +class TestSegmentServiceGetSegments: + """ + Comprehensive unit tests for SegmentService.get_segments method. + + Tests cover: + - Basic pagination functionality + - Status list filtering + - Keyword search filtering + - Ordering (position + id for uniqueness) + - Empty results + - Combined filters + """ + + @pytest.fixture + def mock_segment_service_dependencies(self): + """ + Common mock setup for segment service dependencies. + + Patches: + - db: Database operations and pagination + - select: SQLAlchemy query builder + """ + with ( + patch("services.dataset_service.db") as mock_db, + patch("services.dataset_service.select") as mock_select, + ): + yield { + "db": mock_db, + "select": mock_select, + } + + def test_get_segments_basic_pagination(self, mock_segment_service_dependencies): + """ + Test basic pagination functionality. + + Verifies: + - Query is built with document_id and tenant_id filters + - Pagination uses correct page and limit parameters + - Returns segments and total count + """ + # Arrange + document_id = "doc-123" + tenant_id = "tenant-123" + page = 1 + limit = 20 + + # Create mock segments + segment1 = SegmentServiceTestDataFactory.create_segment_mock( + segment_id="seg-1", position=1, content="First segment" + ) + segment2 = SegmentServiceTestDataFactory.create_segment_mock( + segment_id="seg-2", position=2, content="Second segment" + ) + + # Mock pagination result + mock_paginated = Mock() + mock_paginated.items = [segment1, segment2] + mock_paginated.total = 2 + + mock_segment_service_dependencies["db"].paginate.return_value = mock_paginated + + # Mock select builder + mock_query = Mock() + mock_segment_service_dependencies["select"].return_value = mock_query + mock_query.where.return_value = mock_query + mock_query.order_by.return_value = mock_query + + # Act + from services.dataset_service import SegmentService + + items, total = SegmentService.get_segments(document_id=document_id, tenant_id=tenant_id, page=page, limit=limit) + + # Assert + assert len(items) == 2 + assert total == 2 + assert items[0].id == "seg-1" + assert items[1].id == "seg-2" + mock_segment_service_dependencies["db"].paginate.assert_called_once() + call_kwargs = mock_segment_service_dependencies["db"].paginate.call_args[1] + assert call_kwargs["page"] == page + assert call_kwargs["per_page"] == limit + assert call_kwargs["max_per_page"] == 100 + assert call_kwargs["error_out"] is False + + def test_get_segments_with_status_filter(self, mock_segment_service_dependencies): + """ + Test filtering by status list. + + Verifies: + - Status list filter is applied to query + - Only segments with matching status are returned + """ + # Arrange + document_id = "doc-123" + tenant_id = "tenant-123" + status_list = ["completed", "indexing"] + + segment1 = SegmentServiceTestDataFactory.create_segment_mock(segment_id="seg-1", status="completed") + segment2 = SegmentServiceTestDataFactory.create_segment_mock(segment_id="seg-2", status="indexing") + + mock_paginated = Mock() + mock_paginated.items = [segment1, segment2] + mock_paginated.total = 2 + + mock_segment_service_dependencies["db"].paginate.return_value = mock_paginated + + mock_query = Mock() + mock_segment_service_dependencies["select"].return_value = mock_query + mock_query.where.return_value = mock_query + mock_query.order_by.return_value = mock_query + + # Act + from services.dataset_service import SegmentService + + items, total = SegmentService.get_segments( + document_id=document_id, tenant_id=tenant_id, status_list=status_list + ) + + # Assert + assert len(items) == 2 + assert total == 2 + # Verify where was called multiple times (base filters + status filter) + assert mock_query.where.call_count >= 2 + + def test_get_segments_with_empty_status_list(self, mock_segment_service_dependencies): + """ + Test with empty status list. + + Verifies: + - Empty status list is handled correctly + - No status filter is applied to avoid WHERE false condition + """ + # Arrange + document_id = "doc-123" + tenant_id = "tenant-123" + status_list = [] + + segment = SegmentServiceTestDataFactory.create_segment_mock(segment_id="seg-1") + + mock_paginated = Mock() + mock_paginated.items = [segment] + mock_paginated.total = 1 + + mock_segment_service_dependencies["db"].paginate.return_value = mock_paginated + + mock_query = Mock() + mock_segment_service_dependencies["select"].return_value = mock_query + mock_query.where.return_value = mock_query + mock_query.order_by.return_value = mock_query + + # Act + from services.dataset_service import SegmentService + + items, total = SegmentService.get_segments( + document_id=document_id, tenant_id=tenant_id, status_list=status_list + ) + + # Assert + assert len(items) == 1 + assert total == 1 + # Should only be called once (base filters, no status filter) + assert mock_query.where.call_count == 1 + + def test_get_segments_with_keyword_search(self, mock_segment_service_dependencies): + """ + Test keyword search functionality. + + Verifies: + - Keyword filter uses ilike for case-insensitive search + - Search pattern includes wildcards (%keyword%) + """ + # Arrange + document_id = "doc-123" + tenant_id = "tenant-123" + keyword = "search term" + + segment = SegmentServiceTestDataFactory.create_segment_mock( + segment_id="seg-1", content="This contains search term" + ) + + mock_paginated = Mock() + mock_paginated.items = [segment] + mock_paginated.total = 1 + + mock_segment_service_dependencies["db"].paginate.return_value = mock_paginated + + mock_query = Mock() + mock_segment_service_dependencies["select"].return_value = mock_query + mock_query.where.return_value = mock_query + mock_query.order_by.return_value = mock_query + + # Act + from services.dataset_service import SegmentService + + items, total = SegmentService.get_segments(document_id=document_id, tenant_id=tenant_id, keyword=keyword) + + # Assert + assert len(items) == 1 + assert total == 1 + # Verify where was called for base filters + keyword filter + assert mock_query.where.call_count == 2 + + def test_get_segments_ordering_by_position_and_id(self, mock_segment_service_dependencies): + """ + Test ordering by position and id. + + Verifies: + - Results are ordered by position ASC + - Results are secondarily ordered by id ASC to ensure uniqueness + - This prevents duplicate data across pages when positions are not unique + """ + # Arrange + document_id = "doc-123" + tenant_id = "tenant-123" + + # Create segments with same position but different ids + segment1 = SegmentServiceTestDataFactory.create_segment_mock( + segment_id="seg-1", position=1, content="Content 1" + ) + segment2 = SegmentServiceTestDataFactory.create_segment_mock( + segment_id="seg-2", position=1, content="Content 2" + ) + segment3 = SegmentServiceTestDataFactory.create_segment_mock( + segment_id="seg-3", position=2, content="Content 3" + ) + + mock_paginated = Mock() + mock_paginated.items = [segment1, segment2, segment3] + mock_paginated.total = 3 + + mock_segment_service_dependencies["db"].paginate.return_value = mock_paginated + + mock_query = Mock() + mock_segment_service_dependencies["select"].return_value = mock_query + mock_query.where.return_value = mock_query + mock_query.order_by.return_value = mock_query + + # Act + from services.dataset_service import SegmentService + + items, total = SegmentService.get_segments(document_id=document_id, tenant_id=tenant_id) + + # Assert + assert len(items) == 3 + assert total == 3 + mock_query.order_by.assert_called_once() + + def test_get_segments_empty_results(self, mock_segment_service_dependencies): + """ + Test when no segments match the criteria. + + Verifies: + - Empty list is returned for items + - Total count is 0 + """ + # Arrange + document_id = "non-existent-doc" + tenant_id = "tenant-123" + + mock_paginated = Mock() + mock_paginated.items = [] + mock_paginated.total = 0 + + mock_segment_service_dependencies["db"].paginate.return_value = mock_paginated + + mock_query = Mock() + mock_segment_service_dependencies["select"].return_value = mock_query + mock_query.where.return_value = mock_query + mock_query.order_by.return_value = mock_query + + # Act + from services.dataset_service import SegmentService + + items, total = SegmentService.get_segments(document_id=document_id, tenant_id=tenant_id) + + # Assert + assert items == [] + assert total == 0 + + def test_get_segments_combined_filters(self, mock_segment_service_dependencies): + """ + Test with multiple filters combined. + + Verifies: + - All filters work together correctly + - Status list and keyword search both applied + """ + # Arrange + document_id = "doc-123" + tenant_id = "tenant-123" + status_list = ["completed"] + keyword = "important" + page = 2 + limit = 10 + + segment = SegmentServiceTestDataFactory.create_segment_mock( + segment_id="seg-1", + status="completed", + content="This is important information", + ) + + mock_paginated = Mock() + mock_paginated.items = [segment] + mock_paginated.total = 1 + + mock_segment_service_dependencies["db"].paginate.return_value = mock_paginated + + mock_query = Mock() + mock_segment_service_dependencies["select"].return_value = mock_query + mock_query.where.return_value = mock_query + mock_query.order_by.return_value = mock_query + + # Act + from services.dataset_service import SegmentService + + items, total = SegmentService.get_segments( + document_id=document_id, + tenant_id=tenant_id, + status_list=status_list, + keyword=keyword, + page=page, + limit=limit, + ) + + # Assert + assert len(items) == 1 + assert total == 1 + # Verify filters: base + status + keyword + assert mock_query.where.call_count == 3 + # Verify pagination parameters + call_kwargs = mock_segment_service_dependencies["db"].paginate.call_args[1] + assert call_kwargs["page"] == page + assert call_kwargs["per_page"] == limit + + def test_get_segments_with_none_status_list(self, mock_segment_service_dependencies): + """ + Test with None status list. + + Verifies: + - None status list is handled correctly + - No status filter is applied + """ + # Arrange + document_id = "doc-123" + tenant_id = "tenant-123" + + segment = SegmentServiceTestDataFactory.create_segment_mock(segment_id="seg-1") + + mock_paginated = Mock() + mock_paginated.items = [segment] + mock_paginated.total = 1 + + mock_segment_service_dependencies["db"].paginate.return_value = mock_paginated + + mock_query = Mock() + mock_segment_service_dependencies["select"].return_value = mock_query + mock_query.where.return_value = mock_query + mock_query.order_by.return_value = mock_query + + # Act + from services.dataset_service import SegmentService + + items, total = SegmentService.get_segments( + document_id=document_id, + tenant_id=tenant_id, + status_list=None, + ) + + # Assert + assert len(items) == 1 + assert total == 1 + # Should only be called once (base filters only, no status filter) + assert mock_query.where.call_count == 1 + + def test_get_segments_pagination_max_per_page_limit(self, mock_segment_service_dependencies): + """ + Test that max_per_page is correctly set to 100. + + Verifies: + - max_per_page parameter is set to 100 + - This prevents excessive page sizes + """ + # Arrange + document_id = "doc-123" + tenant_id = "tenant-123" + limit = 200 # Request more than max_per_page + + mock_paginated = Mock() + mock_paginated.items = [] + mock_paginated.total = 0 + + mock_segment_service_dependencies["db"].paginate.return_value = mock_paginated + + mock_query = Mock() + mock_segment_service_dependencies["select"].return_value = mock_query + mock_query.where.return_value = mock_query + mock_query.order_by.return_value = mock_query + + # Act + from services.dataset_service import SegmentService + + SegmentService.get_segments( + document_id=document_id, + tenant_id=tenant_id, + limit=limit, + ) + + # Assert + call_kwargs = mock_segment_service_dependencies["db"].paginate.call_args[1] + assert call_kwargs["max_per_page"] == 100 diff --git a/api/uv.lock b/api/uv.lock index 4c2cb3c3f1..c31b7fe445 100644 --- a/api/uv.lock +++ b/api/uv.lock @@ -1368,7 +1368,7 @@ wheels = [ [[package]] name = "dify-api" -version = "1.11.1" +version = "1.11.2" source = { virtual = "." } dependencies = [ { name = "aliyun-log-python-sdk" }, diff --git a/docker/.env.example b/docker/.env.example index 16d47409f5..1ea1fb9a8e 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -399,6 +399,7 @@ CONSOLE_CORS_ALLOW_ORIGINS=* COOKIE_DOMAIN= # When the frontend and backend run on different subdomains, set NEXT_PUBLIC_COOKIE_DOMAIN=1. NEXT_PUBLIC_COOKIE_DOMAIN= +NEXT_PUBLIC_BATCH_CONCURRENCY=5 # ------------------------------ # File Storage Configuration diff --git a/docker/docker-compose-template.yaml b/docker/docker-compose-template.yaml index 0de9d3e939..3c88cddf8c 100644 --- a/docker/docker-compose-template.yaml +++ b/docker/docker-compose-template.yaml @@ -21,7 +21,7 @@ services: # API service api: - image: langgenius/dify-api:1.11.1 + image: langgenius/dify-api:1.11.2 restart: always environment: # Use the shared environment variables. @@ -63,7 +63,7 @@ services: # worker service # The Celery worker for processing all queues (dataset, workflow, mail, etc.) worker: - image: langgenius/dify-api:1.11.1 + image: langgenius/dify-api:1.11.2 restart: always environment: # Use the shared environment variables. @@ -102,7 +102,7 @@ services: # worker_beat service # Celery beat for scheduling periodic tasks. worker_beat: - image: langgenius/dify-api:1.11.1 + image: langgenius/dify-api:1.11.2 restart: always environment: # Use the shared environment variables. @@ -132,7 +132,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:1.11.1 + image: langgenius/dify-web:1.11.2 restart: always environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index 964b9fe724..c03cb2ef9f 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -108,6 +108,7 @@ x-shared-env: &shared-api-worker-env CONSOLE_CORS_ALLOW_ORIGINS: ${CONSOLE_CORS_ALLOW_ORIGINS:-*} COOKIE_DOMAIN: ${COOKIE_DOMAIN:-} NEXT_PUBLIC_COOKIE_DOMAIN: ${NEXT_PUBLIC_COOKIE_DOMAIN:-} + NEXT_PUBLIC_BATCH_CONCURRENCY: ${NEXT_PUBLIC_BATCH_CONCURRENCY:-5} STORAGE_TYPE: ${STORAGE_TYPE:-opendal} OPENDAL_SCHEME: ${OPENDAL_SCHEME:-fs} OPENDAL_FS_ROOT: ${OPENDAL_FS_ROOT:-storage} @@ -692,7 +693,7 @@ services: # API service api: - image: langgenius/dify-api:1.11.1 + image: langgenius/dify-api:1.11.2 restart: always environment: # Use the shared environment variables. @@ -734,7 +735,7 @@ services: # worker service # The Celery worker for processing all queues (dataset, workflow, mail, etc.) worker: - image: langgenius/dify-api:1.11.1 + image: langgenius/dify-api:1.11.2 restart: always environment: # Use the shared environment variables. @@ -773,7 +774,7 @@ services: # worker_beat service # Celery beat for scheduling periodic tasks. worker_beat: - image: langgenius/dify-api:1.11.1 + image: langgenius/dify-api:1.11.2 restart: always environment: # Use the shared environment variables. @@ -803,7 +804,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:1.11.1 + image: langgenius/dify-web:1.11.2 restart: always environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} diff --git a/web/.env.example b/web/.env.example index b488c31057..c06a4fba87 100644 --- a/web/.env.example +++ b/web/.env.example @@ -73,3 +73,6 @@ NEXT_PUBLIC_MAX_TREE_DEPTH=50 # The API key of amplitude NEXT_PUBLIC_AMPLITUDE_API_KEY= + +# number of concurrency +NEXT_PUBLIC_BATCH_CONCURRENCY=5 diff --git a/web/app/components/app/configuration/dataset-config/index.tsx b/web/app/components/app/configuration/dataset-config/index.tsx index 2fc82c82b6..f5324f40d8 100644 --- a/web/app/components/app/configuration/dataset-config/index.tsx +++ b/web/app/components/app/configuration/dataset-config/index.tsx @@ -176,7 +176,7 @@ const DatasetConfig: FC = () => { })) }, [setDatasetConfigs, datasetConfigsRef]) - const handleAddCondition = useCallback(({ name, type }) => { + const handleAddCondition = useCallback(({ id, name, type }) => { let operator: ComparisonOperator = ComparisonOperator.is if (type === MetadataFilteringVariableType.number) @@ -184,6 +184,7 @@ const DatasetConfig: FC = () => { const newCondition = { id: uuid4(), + metadata_id: id, // Save metadata.id for reliable reference name, comparison_operator: operator, } diff --git a/web/app/components/app/configuration/dataset-config/select-dataset/index.spec.tsx b/web/app/components/app/configuration/dataset-config/select-dataset/index.spec.tsx new file mode 100644 index 0000000000..e7c3d4a3c9 --- /dev/null +++ b/web/app/components/app/configuration/dataset-config/select-dataset/index.spec.tsx @@ -0,0 +1,141 @@ +import type { DataSet } from '@/models/datasets' +import { act, fireEvent, render, screen } from '@testing-library/react' +import * as React from 'react' + +import { describe, expect, it, vi } from 'vitest' +import { IndexingType } from '@/app/components/datasets/create/step-two' +import { DatasetPermission } from '@/models/datasets' +import { RETRIEVE_METHOD } from '@/types/app' +import SelectDataSet from './index' + +vi.mock('@/i18n-config/i18next-config', () => ({ + __esModule: true, + default: { + changeLanguage: vi.fn(), + addResourceBundle: vi.fn(), + use: vi.fn().mockReturnThis(), + init: vi.fn(), + addResource: vi.fn(), + hasResourceBundle: vi.fn().mockReturnValue(true), + }, +})) +const mockUseInfiniteScroll = vi.fn() +vi.mock('ahooks', async (importOriginal) => { + const actual = await importOriginal() + return { + ...(typeof actual === 'object' && actual !== null ? actual : {}), + useInfiniteScroll: (...args: any[]) => mockUseInfiniteScroll(...args), + } +}) + +const mockUseInfiniteDatasets = vi.fn() +vi.mock('@/service/knowledge/use-dataset', () => ({ + useInfiniteDatasets: (...args: any[]) => mockUseInfiniteDatasets(...args), +})) + +vi.mock('@/hooks/use-knowledge', () => ({ + useKnowledge: () => ({ + formatIndexingTechniqueAndMethod: (tech: string, method: string) => `${tech}:${method}`, + }), +})) + +const baseProps = { + isShow: true, + onClose: vi.fn(), + selectedIds: [] as string[], + onSelect: vi.fn(), +} + +const makeDataset = (overrides: Partial): DataSet => ({ + id: 'dataset-id', + name: 'Dataset Name', + provider: 'internal', + icon_info: { + icon_type: 'emoji', + icon: 'šŸ’¾', + icon_background: '#fff', + icon_url: '', + }, + embedding_available: true, + is_multimodal: false, + description: '', + permission: DatasetPermission.allTeamMembers, + indexing_technique: IndexingType.ECONOMICAL, + retrieval_model_dict: { + search_method: RETRIEVE_METHOD.fullText, + top_k: 5, + reranking_enable: false, + reranking_model: { + reranking_model_name: '', + reranking_provider_name: '', + }, + score_threshold_enabled: false, + score_threshold: 0, + }, + ...overrides, +} as DataSet) + +describe('SelectDataSet', () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + it('renders dataset entries, allows selection, and fires onSelect', async () => { + const datasetOne = makeDataset({ + id: 'set-1', + name: 'Dataset One', + is_multimodal: true, + indexing_technique: IndexingType.ECONOMICAL, + }) + const datasetTwo = makeDataset({ + id: 'set-2', + name: 'Hidden Dataset', + embedding_available: false, + provider: 'external', + }) + mockUseInfiniteDatasets.mockReturnValue({ + data: { pages: [{ data: [datasetOne, datasetTwo] }] }, + isLoading: false, + isFetchingNextPage: false, + fetchNextPage: vi.fn(), + hasNextPage: false, + }) + + const onSelect = vi.fn() + await act(async () => { + render() + }) + + expect(screen.getByText('Dataset One')).toBeInTheDocument() + expect(screen.getByText('Hidden Dataset')).toBeInTheDocument() + + await act(async () => { + fireEvent.click(screen.getByText('Dataset One')) + }) + expect(screen.getByText('1 appDebug.feature.dataSet.selected')).toBeInTheDocument() + + const addButton = screen.getByRole('button', { name: 'common.operation.add' }) + await act(async () => { + fireEvent.click(addButton) + }) + expect(onSelect).toHaveBeenCalledWith([datasetOne]) + }) + + it('shows empty state when no datasets are available and disables add', async () => { + mockUseInfiniteDatasets.mockReturnValue({ + data: { pages: [{ data: [] }] }, + isLoading: false, + isFetchingNextPage: false, + fetchNextPage: vi.fn(), + hasNextPage: false, + }) + + await act(async () => { + render() + }) + + expect(screen.getByText('appDebug.feature.dataSet.noDataSet')).toBeInTheDocument() + expect(screen.getByRole('link', { name: 'appDebug.feature.dataSet.toCreate' })).toHaveAttribute('href', '/datasets/create') + expect(screen.getByRole('button', { name: 'common.operation.add' })).toBeDisabled() + }) +}) diff --git a/web/app/components/app/configuration/index.tsx b/web/app/components/app/configuration/index.tsx index 4b5bcafc9b..9cc9377508 100644 --- a/web/app/components/app/configuration/index.tsx +++ b/web/app/components/app/configuration/index.tsx @@ -679,7 +679,7 @@ const Configuration: FC = () => { const toolInCollectionList = collectionList.find(c => tool.provider_id === c.id) return { ...tool, - isDeleted: res.deleted_tools?.some((deletedTool: any) => deletedTool.id === tool.id && deletedTool.tool_name === tool.tool_name) ?? false, + isDeleted: res.deleted_tools?.some((deletedTool: any) => deletedTool.provider_id === tool.provider_id && deletedTool.tool_name === tool.tool_name) ?? false, notAuthor: toolInCollectionList?.is_team_authorization === false, ...(tool.provider_type === 'builtin' ? { diff --git a/web/app/components/app/configuration/prompt-value-panel/index.spec.tsx b/web/app/components/app/configuration/prompt-value-panel/index.spec.tsx new file mode 100644 index 0000000000..039ed078d7 --- /dev/null +++ b/web/app/components/app/configuration/prompt-value-panel/index.spec.tsx @@ -0,0 +1,125 @@ +import type { IPromptValuePanelProps } from './index' +import { fireEvent, render, screen, waitFor } from '@testing-library/react' +import * as React from 'react' +import { beforeEach, describe, expect, it, vi } from 'vitest' +import { useStore } from '@/app/components/app/store' +import ConfigContext from '@/context/debug-configuration' +import { AppModeEnum, ModelModeType, Resolution } from '@/types/app' +import PromptValuePanel from './index' + +vi.mock('@/app/components/app/store', () => ({ + useStore: vi.fn(), +})) +vi.mock('@/app/components/base/features/new-feature-panel/feature-bar', () => ({ + __esModule: true, + default: ({ onFeatureBarClick }: { onFeatureBarClick: () => void }) => ( + + ), +})) + +const mockSetShowAppConfigureFeaturesModal = vi.fn() +const mockUseStore = vi.mocked(useStore) +const mockSetInputs = vi.fn() +const mockOnSend = vi.fn() + +const promptVariables = [ + { key: 'textVar', name: 'Text Var', type: 'string', required: true }, + { key: 'boolVar', name: 'Boolean Var', type: 'checkbox' }, +] as const + +const baseContextValue: any = { + modelModeType: ModelModeType.completion, + modelConfig: { + configs: { + prompt_template: 'prompt template', + prompt_variables: promptVariables, + }, + }, + setInputs: mockSetInputs, + mode: AppModeEnum.COMPLETION, + isAdvancedMode: false, + completionPromptConfig: { + prompt: { text: 'completion' }, + conversation_histories_role: { user_prefix: 'user', assistant_prefix: 'assistant' }, + }, + chatPromptConfig: { prompt: [] }, +} as any + +const defaultProps: IPromptValuePanelProps = { + appType: AppModeEnum.COMPLETION, + onSend: mockOnSend, + inputs: { textVar: 'initial', boolVar: false }, + visionConfig: { enabled: false, number_limits: 0, detail: Resolution.low, transfer_methods: [] }, + onVisionFilesChange: vi.fn(), +} + +const renderPanel = (options: { + context?: Partial + props?: Partial +} = {}) => { + const contextValue = { ...baseContextValue, ...options.context } + const props = { ...defaultProps, ...options.props } + return render( + + + , + ) +} + +describe('PromptValuePanel', () => { + beforeEach(() => { + mockUseStore.mockImplementation(selector => selector({ + setShowAppConfigureFeaturesModal: mockSetShowAppConfigureFeaturesModal, + appSidebarExpand: '', + currentLogModalActiveTab: 'prompt', + showPromptLogModal: false, + showAgentLogModal: false, + setShowPromptLogModal: vi.fn(), + setShowAgentLogModal: vi.fn(), + showMessageLogModal: false, + showAppConfigureFeaturesModal: false, + } as any)) + mockSetInputs.mockClear() + mockOnSend.mockClear() + mockSetShowAppConfigureFeaturesModal.mockClear() + }) + + it('updates inputs, clears values, and triggers run when ready', async () => { + renderPanel() + + const textInput = screen.getByPlaceholderText('Text Var') + fireEvent.change(textInput, { target: { value: 'updated' } }) + expect(mockSetInputs).toHaveBeenCalledWith(expect.objectContaining({ textVar: 'updated' })) + + const clearButton = screen.getByRole('button', { name: 'common.operation.clear' }) + fireEvent.click(clearButton) + + expect(mockSetInputs).toHaveBeenLastCalledWith({ + textVar: '', + boolVar: '', + }) + + const runButton = screen.getByRole('button', { name: 'appDebug.inputs.run' }) + expect(runButton).not.toBeDisabled() + fireEvent.click(runButton) + await waitFor(() => expect(mockOnSend).toHaveBeenCalledTimes(1)) + }) + + it('disables run when mode is not completion', () => { + renderPanel({ + context: { + mode: AppModeEnum.CHAT, + }, + props: { + appType: AppModeEnum.CHAT, + }, + }) + + const runButton = screen.getByRole('button', { name: 'appDebug.inputs.run' }) + expect(runButton).toBeDisabled() + fireEvent.click(runButton) + expect(mockOnSend).not.toHaveBeenCalled() + }) +}) diff --git a/web/app/components/app/configuration/prompt-value-panel/utils.spec.ts b/web/app/components/app/configuration/prompt-value-panel/utils.spec.ts new file mode 100644 index 0000000000..7a7e0da9a9 --- /dev/null +++ b/web/app/components/app/configuration/prompt-value-panel/utils.spec.ts @@ -0,0 +1,29 @@ +import type { PromptVariable } from '@/models/debug' + +import { describe, expect, it } from 'vitest' +import { replaceStringWithValues } from './utils' + +const promptVariables: PromptVariable[] = [ + { key: 'user', name: 'User', type: 'string' }, + { key: 'topic', name: 'Topic', type: 'string' }, +] + +describe('replaceStringWithValues', () => { + it('should replace placeholders when inputs have values', () => { + const template = 'Hello {{user}} talking about {{topic}}' + const result = replaceStringWithValues(template, promptVariables, { user: 'Alice', topic: 'cats' }) + expect(result).toBe('Hello Alice talking about cats') + }) + + it('should use prompt variable name when value is missing', () => { + const template = 'Hi {{user}} from {{topic}}' + const result = replaceStringWithValues(template, promptVariables, {}) + expect(result).toBe('Hi {{User}} from {{Topic}}') + }) + + it('should leave placeholder untouched when no variable is defined', () => { + const template = 'Unknown {{missing}} placeholder' + const result = replaceStringWithValues(template, promptVariables, {}) + expect(result).toBe('Unknown {{missing}} placeholder') + }) +}) diff --git a/web/app/components/app/create-app-dialog/app-list/index.spec.tsx b/web/app/components/app/create-app-dialog/app-list/index.spec.tsx new file mode 100644 index 0000000000..42f510b468 --- /dev/null +++ b/web/app/components/app/create-app-dialog/app-list/index.spec.tsx @@ -0,0 +1,136 @@ +import { fireEvent, render, screen } from '@testing-library/react' +import { AppModeEnum } from '@/types/app' +import Apps from './index' + +const mockUseExploreAppList = vi.fn() + +vi.mock('ahooks', () => ({ + useDebounceFn: (fn: () => void) => ({ + run: () => setTimeout(fn, 0), + cancel: vi.fn(), + flush: () => fn(), + }), +})) +vi.mock('@/context/app-context', () => ({ + useAppContext: () => ({ isCurrentWorkspaceEditor: true }), +})) +vi.mock('use-context-selector', async () => { + const actual = await vi.importActual('use-context-selector') + return { + ...actual, + useContext: () => ({ hasEditPermission: true }), + } +}) +vi.mock('@/hooks/use-tab-searchparams', () => ({ + useTabSearchParams: () => ['Recommended', vi.fn()], +})) +vi.mock('@/service/use-explore', () => ({ + useExploreAppList: () => mockUseExploreAppList(), +})) +vi.mock('@/app/components/app/type-selector', () => ({ + __esModule: true, + default: ({ value, onChange }: { value: AppModeEnum[], onChange: (value: AppModeEnum[]) => void }) => ( + + ), +})) +vi.mock('../app-card', () => ({ + __esModule: true, + default: ({ app, onCreate }: { app: any, onCreate: () => void }) => ( +
+ {app.app.name} +
+ ), +})) +vi.mock('@/app/components/explore/create-app-modal', () => ({ + __esModule: true, + default: () =>
, +})) +vi.mock('@/app/components/base/toast', () => ({ + default: { notify: vi.fn() }, +})) +vi.mock('@/app/components/base/amplitude', () => ({ + trackEvent: vi.fn(), +})) +vi.mock('@/service/apps', () => ({ + importDSL: vi.fn().mockResolvedValue({ app_id: '1' }), +})) +vi.mock('@/service/explore', () => ({ + fetchAppDetail: vi.fn().mockResolvedValue({ + export_data: 'dsl', + mode: 'chat', + }), +})) +vi.mock('@/app/components/workflow/plugin-dependency/hooks', () => ({ + usePluginDependencies: () => ({ + handleCheckPluginDependencies: vi.fn(), + }), +})) +vi.mock('@/utils/app-redirection', () => ({ + getRedirection: vi.fn(), +})) +vi.mock('next/navigation', () => ({ + useRouter: () => ({ push: vi.fn() }), +})) + +const createAppEntry = (name: string, category: string) => ({ + app_id: name, + category, + app: { + id: name, + name, + icon_type: 'emoji', + icon: 'šŸ™‚', + icon_background: '#000', + icon_url: null, + description: 'desc', + mode: AppModeEnum.CHAT, + }, +}) + +describe('Apps', () => { + const defaultData = { + allList: [ + createAppEntry('Alpha', 'Cat A'), + createAppEntry('Bravo', 'Cat B'), + ], + categories: ['Cat A', 'Cat B'], + } + + beforeEach(() => { + vi.clearAllMocks() + mockUseExploreAppList.mockReturnValue({ + data: defaultData, + isLoading: false, + }) + }) + + it('renders template cards when data is available', () => { + render() + + expect(screen.getAllByTestId('app-card')).toHaveLength(2) + expect(screen.getByText('Alpha')).toBeInTheDocument() + expect(screen.getByText('Bravo')).toBeInTheDocument() + }) + + it('opens create modal when a template card is clicked', () => { + render() + + fireEvent.click(screen.getAllByTestId('app-card')[0]) + expect(screen.getByTestId('create-from-template-modal')).toBeInTheDocument() + }) + it('shows no template message when list is empty', () => { + mockUseExploreAppList.mockReturnValueOnce({ + data: { allList: [], categories: [] }, + isLoading: false, + }) + + render() + + expect(screen.getByText('app.newApp.noTemplateFound')).toBeInTheDocument() + expect(screen.getByText('app.newApp.noTemplateFoundTip')).toBeInTheDocument() + }) +}) diff --git a/web/app/components/app/create-app-dialog/app-list/sidebar.spec.tsx b/web/app/components/app/create-app-dialog/app-list/sidebar.spec.tsx new file mode 100644 index 0000000000..724177a6ce --- /dev/null +++ b/web/app/components/app/create-app-dialog/app-list/sidebar.spec.tsx @@ -0,0 +1,38 @@ +import { fireEvent, render, screen } from '@testing-library/react' +import Sidebar, { AppCategories } from './sidebar' + +vi.mock('@remixicon/react', () => ({ + RiStickyNoteAddLine: () => sticky, + RiThumbUpLine: () => thumb, +})) +describe('Sidebar', () => { + it('renders recommended and custom categories', () => { + render() + + expect(screen.getByText('app.newAppFromTemplate.sidebar.Recommended')).toBeInTheDocument() + expect(screen.getByText('Cat A')).toBeInTheDocument() + expect(screen.getByText('Cat B')).toBeInTheDocument() + }) + + it('notifies callbacks when items are clicked', () => { + const onClick = vi.fn() + const onCreate = vi.fn() + render( + , + ) + + fireEvent.click(screen.getByText('app.newAppFromTemplate.sidebar.Recommended')) + expect(onClick).toHaveBeenCalledWith(AppCategories.RECOMMENDED) + + fireEvent.click(screen.getByText('Cat A')) + expect(onClick).toHaveBeenCalledWith('Cat A') + + fireEvent.click(screen.getByText('app.newApp.startFromBlank')) + expect(onCreate).toHaveBeenCalled() + }) +}) diff --git a/web/app/components/app/create-app-modal/index.spec.tsx b/web/app/components/app/create-app-modal/index.spec.tsx new file mode 100644 index 0000000000..02c00ed3fd --- /dev/null +++ b/web/app/components/app/create-app-modal/index.spec.tsx @@ -0,0 +1,162 @@ +import { fireEvent, render, screen, waitFor } from '@testing-library/react' +import { useRouter } from 'next/navigation' +import { afterAll, beforeEach, describe, expect, it, vi } from 'vitest' +import { trackEvent } from '@/app/components/base/amplitude' + +import { ToastContext } from '@/app/components/base/toast' +import { NEED_REFRESH_APP_LIST_KEY } from '@/config' +import { useAppContext } from '@/context/app-context' +import { useProviderContext } from '@/context/provider-context' +import { createApp } from '@/service/apps' +import { AppModeEnum } from '@/types/app' +import { getRedirection } from '@/utils/app-redirection' +import CreateAppModal from './index' + +vi.mock('ahooks', () => ({ + useDebounceFn: (fn: (...args: any[]) => any) => { + const run = (...args: any[]) => fn(...args) + const cancel = vi.fn() + const flush = vi.fn() + return { run, cancel, flush } + }, + useKeyPress: vi.fn(), + useHover: () => false, +})) +vi.mock('next/navigation', () => ({ + useRouter: vi.fn(), +})) +vi.mock('@/app/components/base/amplitude', () => ({ + trackEvent: vi.fn(), +})) +vi.mock('@/service/apps', () => ({ + createApp: vi.fn(), +})) +vi.mock('@/utils/app-redirection', () => ({ + getRedirection: vi.fn(), +})) +vi.mock('@/context/provider-context', () => ({ + useProviderContext: vi.fn(), +})) +vi.mock('@/context/app-context', () => ({ + useAppContext: vi.fn(), +})) +vi.mock('@/context/i18n', () => ({ + useDocLink: () => () => '/guides', +})) +vi.mock('@/hooks/use-theme', () => ({ + __esModule: true, + default: () => ({ theme: 'light' }), +})) + +const mockNotify = vi.fn() +const mockUseRouter = vi.mocked(useRouter) +const mockPush = vi.fn() +const mockCreateApp = vi.mocked(createApp) +const mockTrackEvent = vi.mocked(trackEvent) +const mockGetRedirection = vi.mocked(getRedirection) +const mockUseProviderContext = vi.mocked(useProviderContext) +const mockUseAppContext = vi.mocked(useAppContext) + +const defaultPlanUsage = { + buildApps: 0, + teamMembers: 0, + annotatedResponse: 0, + documentsUploadQuota: 0, + apiRateLimit: 0, + triggerEvents: 0, + vectorSpace: 0, +} + +const renderModal = () => { + const onClose = vi.fn() + const onSuccess = vi.fn() + render( + + + , + ) + return { onClose, onSuccess } +} + +describe('CreateAppModal', () => { + const mockSetItem = vi.fn() + const originalLocalStorage = window.localStorage + + beforeEach(() => { + vi.clearAllMocks() + mockUseRouter.mockReturnValue({ push: mockPush } as any) + mockUseProviderContext.mockReturnValue({ + plan: { + type: AppModeEnum.ADVANCED_CHAT, + usage: defaultPlanUsage, + total: { ...defaultPlanUsage, buildApps: 1 }, + reset: {}, + }, + enableBilling: true, + } as any) + mockUseAppContext.mockReturnValue({ + isCurrentWorkspaceEditor: true, + } as any) + mockSetItem.mockClear() + Object.defineProperty(window, 'localStorage', { + value: { + setItem: mockSetItem, + getItem: vi.fn(), + removeItem: vi.fn(), + clear: vi.fn(), + key: vi.fn(), + length: 0, + }, + writable: true, + }) + }) + + afterAll(() => { + Object.defineProperty(window, 'localStorage', { + value: originalLocalStorage, + writable: true, + }) + }) + + it('creates an app, notifies success, and fires callbacks', async () => { + const mockApp = { id: 'app-1', mode: AppModeEnum.ADVANCED_CHAT } + mockCreateApp.mockResolvedValue(mockApp as any) + const { onClose, onSuccess } = renderModal() + + const nameInput = screen.getByPlaceholderText('app.newApp.appNamePlaceholder') + fireEvent.change(nameInput, { target: { value: 'My App' } }) + fireEvent.click(screen.getByRole('button', { name: 'app.newApp.Create' })) + + await waitFor(() => expect(mockCreateApp).toHaveBeenCalledWith({ + name: 'My App', + description: '', + icon_type: 'emoji', + icon: 'šŸ¤–', + icon_background: '#FFEAD5', + mode: AppModeEnum.ADVANCED_CHAT, + })) + + expect(mockTrackEvent).toHaveBeenCalledWith('create_app', { + app_mode: AppModeEnum.ADVANCED_CHAT, + description: '', + }) + expect(mockNotify).toHaveBeenCalledWith({ type: 'success', message: 'app.newApp.appCreated' }) + expect(onSuccess).toHaveBeenCalled() + expect(onClose).toHaveBeenCalled() + await waitFor(() => expect(mockSetItem).toHaveBeenCalledWith(NEED_REFRESH_APP_LIST_KEY, '1')) + await waitFor(() => expect(mockGetRedirection).toHaveBeenCalledWith(true, mockApp, mockPush)) + }) + + it('shows error toast when creation fails', async () => { + mockCreateApp.mockRejectedValue(new Error('boom')) + const { onClose } = renderModal() + + const nameInput = screen.getByPlaceholderText('app.newApp.appNamePlaceholder') + fireEvent.change(nameInput, { target: { value: 'My App' } }) + fireEvent.click(screen.getByRole('button', { name: 'app.newApp.Create' })) + + await waitFor(() => expect(mockCreateApp).toHaveBeenCalled()) + expect(mockNotify).toHaveBeenCalledWith({ type: 'error', message: 'boom' }) + expect(onClose).not.toHaveBeenCalled() + }) +}) diff --git a/web/app/components/app/overview/embedded/index.spec.tsx b/web/app/components/app/overview/embedded/index.spec.tsx new file mode 100644 index 0000000000..36f2e980c4 --- /dev/null +++ b/web/app/components/app/overview/embedded/index.spec.tsx @@ -0,0 +1,121 @@ +import type { SiteInfo } from '@/models/share' +import { fireEvent, render, screen } from '@testing-library/react' +import copy from 'copy-to-clipboard' +import * as React from 'react' + +import { act } from 'react' +import { afterAll, afterEach, describe, expect, it, vi } from 'vitest' +import Embedded from './index' + +vi.mock('./style.module.css', () => ({ + __esModule: true, + default: { + option: 'option', + active: 'active', + iframeIcon: 'iframeIcon', + scriptsIcon: 'scriptsIcon', + chromePluginIcon: 'chromePluginIcon', + pluginInstallIcon: 'pluginInstallIcon', + }, +})) +const mockThemeBuilder = { + buildTheme: vi.fn(), + theme: { + primaryColor: '#123456', + }, +} +const mockUseAppContext = vi.fn(() => ({ + langGeniusVersionInfo: { + current_env: 'PRODUCTION', + current_version: '', + latest_version: '', + release_date: '', + release_notes: '', + version: '', + can_auto_update: false, + }, +})) + +vi.mock('copy-to-clipboard', () => ({ + __esModule: true, + default: vi.fn(), +})) +vi.mock('@/app/components/base/chat/embedded-chatbot/theme/theme-context', () => ({ + useThemeContext: () => mockThemeBuilder, +})) +vi.mock('@/context/app-context', () => ({ + useAppContext: () => mockUseAppContext(), +})) +const mockWindowOpen = vi.spyOn(window, 'open').mockImplementation(() => null) +const mockedCopy = vi.mocked(copy) + +const siteInfo: SiteInfo = { + title: 'test site', + chat_color_theme: '#000000', + chat_color_theme_inverted: false, +} + +const baseProps = { + isShow: true, + siteInfo, + onClose: vi.fn(), + appBaseUrl: 'https://app.example.com', + accessToken: 'token', + className: 'custom-modal', +} + +const getCopyButton = () => { + const buttons = screen.getAllByRole('button') + const actionButton = buttons.find(button => button.className.includes('action-btn')) + expect(actionButton).toBeDefined() + return actionButton! +} + +describe('Embedded', () => { + afterEach(() => { + vi.clearAllMocks() + mockWindowOpen.mockClear() + }) + + afterAll(() => { + mockWindowOpen.mockRestore() + }) + + it('builds theme and copies iframe snippet', async () => { + await act(async () => { + render() + }) + + const actionButton = getCopyButton() + const innerDiv = actionButton.querySelector('div') + act(() => { + fireEvent.click(innerDiv ?? actionButton) + }) + + expect(mockThemeBuilder.buildTheme).toHaveBeenCalledWith(siteInfo.chat_color_theme, siteInfo.chat_color_theme_inverted) + expect(mockedCopy).toHaveBeenCalledWith(expect.stringContaining('/chatbot/token')) + }) + + it('opens chrome plugin store link when chrome option selected', async () => { + await act(async () => { + render() + }) + + const optionButtons = document.body.querySelectorAll('[class*="option"]') + expect(optionButtons.length).toBeGreaterThanOrEqual(3) + act(() => { + fireEvent.click(optionButtons[2]) + }) + + const [chromeText] = screen.getAllByText('appOverview.overview.appInfo.embedded.chromePlugin') + act(() => { + fireEvent.click(chromeText) + }) + + expect(mockWindowOpen).toHaveBeenCalledWith( + 'https://chrome.google.com/webstore/detail/dify-chatbot/ceehdapohffmjmkdcifjofadiaoeggaf', + '_blank', + 'noopener,noreferrer', + ) + }) +}) diff --git a/web/app/components/app/overview/settings/index.spec.tsx b/web/app/components/app/overview/settings/index.spec.tsx new file mode 100644 index 0000000000..8deae7f952 --- /dev/null +++ b/web/app/components/app/overview/settings/index.spec.tsx @@ -0,0 +1,217 @@ +import type { ReactNode } from 'react' +import type { ModalContextState } from '@/context/modal-context' +import type { ProviderContextState } from '@/context/provider-context' +import type { AppDetailResponse } from '@/models/app' +import type { AppSSO } from '@/types/app' +import { fireEvent, render, screen, waitFor } from '@testing-library/react' +import { Plan } from '@/app/components/billing/type' +import { baseProviderContextValue } from '@/context/provider-context' +import { AppModeEnum } from '@/types/app' +import SettingsModal from './index' + +vi.mock('react-i18next', async () => { + const actual = await vi.importActual('react-i18next') + return { + ...actual, + useTranslation: () => ({ + t: (key: string, options?: Record) => { + if (options?.returnObjects) + return [`${key}-feature-1`, `${key}-feature-2`] + if (options) + return `${key}:${JSON.stringify(options)}` + return key + }, + i18n: { + language: 'en', + changeLanguage: vi.fn(), + }, + }), + Trans: ({ children }: { children?: ReactNode }) => <>{children}, + } +}) + +const mockNotify = vi.fn() +const mockOnClose = vi.fn() +const mockOnSave = vi.fn() +const mockSetShowPricingModal = vi.fn() +const mockSetShowAccountSettingModal = vi.fn() +const mockUseProviderContext = vi.fn<() => ProviderContextState>() + +const buildModalContext = (): ModalContextState => ({ + setShowAccountSettingModal: mockSetShowAccountSettingModal, + setShowApiBasedExtensionModal: vi.fn(), + setShowModerationSettingModal: vi.fn(), + setShowExternalDataToolModal: vi.fn(), + setShowPricingModal: mockSetShowPricingModal, + setShowAnnotationFullModal: vi.fn(), + setShowModelModal: vi.fn(), + setShowExternalKnowledgeAPIModal: vi.fn(), + setShowModelLoadBalancingModal: vi.fn(), + setShowOpeningModal: vi.fn(), + setShowUpdatePluginModal: vi.fn(), + setShowEducationExpireNoticeModal: vi.fn(), + setShowTriggerEventsLimitModal: vi.fn(), +}) + +vi.mock('@/context/modal-context', () => ({ + useModalContext: () => buildModalContext(), +})) + +vi.mock('@/app/components/base/toast', async () => { + const actual = await vi.importActual('@/app/components/base/toast') + return { + ...actual, + useToastContext: () => ({ + notify: mockNotify, + close: vi.fn(), + }), + } +}) + +vi.mock('@/context/i18n', async () => { + const actual = await vi.importActual('@/context/i18n') + return { + ...actual, + useDocLink: () => (path?: string) => `https://docs.example.com${path ?? ''}`, + } +}) + +vi.mock('@/context/provider-context', async () => { + const actual = await vi.importActual('@/context/provider-context') + return { + ...actual, + useProviderContext: () => mockUseProviderContext(), + } +}) + +const mockAppInfo = { + site: { + title: 'Test App', + icon_type: 'emoji', + icon: 'šŸ˜€', + icon_background: '#ABCDEF', + icon_url: 'https://example.com/icon.png', + description: 'A description', + chat_color_theme: '#123456', + chat_color_theme_inverted: true, + copyright: 'Ā© Dify', + privacy_policy: '', + custom_disclaimer: 'Disclaimer', + default_language: 'en-US', + show_workflow_steps: true, + use_icon_as_answer_icon: true, + }, + mode: AppModeEnum.ADVANCED_CHAT, + enable_sso: false, +} as unknown as AppDetailResponse & Partial + +const renderSettingsModal = () => render( + , +) + +describe('SettingsModal', () => { + beforeEach(() => { + mockNotify.mockClear() + mockOnClose.mockClear() + mockOnSave.mockClear() + mockSetShowPricingModal.mockClear() + mockSetShowAccountSettingModal.mockClear() + mockUseProviderContext.mockReturnValue({ + ...baseProviderContextValue, + enableBilling: true, + plan: { + ...baseProviderContextValue.plan, + type: Plan.sandbox, + }, + webappCopyrightEnabled: true, + }) + }) + + it('should render the modal and expose the expanded settings section', async () => { + renderSettingsModal() + expect(screen.getByText('appOverview.overview.appInfo.settings.title')).toBeInTheDocument() + + const showMoreEntry = screen.getByText('appOverview.overview.appInfo.settings.more.entry') + fireEvent.click(showMoreEntry) + + await waitFor(() => { + expect(screen.getByPlaceholderText('appOverview.overview.appInfo.settings.more.copyRightPlaceholder')).toBeInTheDocument() + expect(screen.getByPlaceholderText('appOverview.overview.appInfo.settings.more.privacyPolicyPlaceholder')).toBeInTheDocument() + }) + }) + + it('should notify the user when the name is empty', async () => { + renderSettingsModal() + const nameInput = screen.getByPlaceholderText('app.appNamePlaceholder') + fireEvent.change(nameInput, { target: { value: '' } }) + fireEvent.click(screen.getByText('common.operation.save')) + + await waitFor(() => { + expect(mockNotify).toHaveBeenCalledWith(expect.objectContaining({ message: 'app.newApp.nameNotEmpty' })) + }) + expect(mockOnSave).not.toHaveBeenCalled() + }) + + it('should validate the theme color and show an error when the hex is invalid', async () => { + renderSettingsModal() + const colorInput = screen.getByPlaceholderText('E.g #A020F0') + fireEvent.change(colorInput, { target: { value: 'not-a-hex' } }) + + fireEvent.click(screen.getByText('common.operation.save')) + await waitFor(() => { + expect(mockNotify).toHaveBeenCalledWith(expect.objectContaining({ + message: 'appOverview.overview.appInfo.settings.invalidHexMessage', + })) + }) + expect(mockOnSave).not.toHaveBeenCalled() + }) + + it('should validate the privacy policy URL when advanced settings are open', async () => { + renderSettingsModal() + fireEvent.click(screen.getByText('appOverview.overview.appInfo.settings.more.entry')) + const privacyInput = screen.getByPlaceholderText('appOverview.overview.appInfo.settings.more.privacyPolicyPlaceholder') + // eslint-disable-next-line sonarjs/no-clear-text-protocols + fireEvent.change(privacyInput, { target: { value: 'ftp://invalid-url' } }) + + fireEvent.click(screen.getByText('common.operation.save')) + await waitFor(() => { + expect(mockNotify).toHaveBeenCalledWith(expect.objectContaining({ + message: 'appOverview.overview.appInfo.settings.invalidPrivacyPolicy', + })) + }) + expect(mockOnSave).not.toHaveBeenCalled() + }) + + it('should save valid settings and close the modal', async () => { + mockOnSave.mockResolvedValueOnce(undefined) + renderSettingsModal() + + fireEvent.click(screen.getByText('common.operation.save')) + + await waitFor(() => expect(mockOnSave).toHaveBeenCalled()) + expect(mockOnSave).toHaveBeenCalledWith(expect.objectContaining({ + title: mockAppInfo.site.title, + description: mockAppInfo.site.description, + default_language: mockAppInfo.site.default_language, + chat_color_theme: mockAppInfo.site.chat_color_theme, + chat_color_theme_inverted: mockAppInfo.site.chat_color_theme_inverted, + prompt_public: false, + copyright: mockAppInfo.site.copyright, + privacy_policy: mockAppInfo.site.privacy_policy, + custom_disclaimer: mockAppInfo.site.custom_disclaimer, + icon_type: 'emoji', + icon: mockAppInfo.site.icon, + icon_background: mockAppInfo.site.icon_background, + show_workflow_steps: mockAppInfo.site.show_workflow_steps, + use_icon_as_answer_icon: mockAppInfo.site.use_icon_as_answer_icon, + enable_sso: mockAppInfo.enable_sso, + })) + expect(mockOnClose).toHaveBeenCalled() + }) +}) diff --git a/web/app/components/app/text-generate/saved-items/index.spec.tsx b/web/app/components/app/text-generate/saved-items/index.spec.tsx new file mode 100644 index 0000000000..b83c812c19 --- /dev/null +++ b/web/app/components/app/text-generate/saved-items/index.spec.tsx @@ -0,0 +1,67 @@ +import type { ISavedItemsProps } from './index' +import { fireEvent, render, screen } from '@testing-library/react' +import copy from 'copy-to-clipboard' + +import * as React from 'react' +import { beforeEach, describe, expect, it, vi } from 'vitest' +import Toast from '@/app/components/base/toast' +import SavedItems from './index' + +vi.mock('copy-to-clipboard', () => ({ + __esModule: true, + default: vi.fn(), +})) +vi.mock('next/navigation', () => ({ + useParams: () => ({}), + usePathname: () => '/', +})) + +const mockCopy = vi.mocked(copy) +const toastNotifySpy = vi.spyOn(Toast, 'notify') + +const baseProps: ISavedItemsProps = { + list: [ + { id: '1', answer: 'hello world' }, + ], + isShowTextToSpeech: true, + onRemove: vi.fn(), + onStartCreateContent: vi.fn(), +} + +describe('SavedItems', () => { + beforeEach(() => { + vi.clearAllMocks() + toastNotifySpy.mockClear() + }) + + it('renders saved answers with metadata and controls', () => { + const { container } = render() + + const markdownElement = container.querySelector('.markdown-body') + expect(markdownElement).toBeInTheDocument() + expect(screen.getByText('11 common.unit.char')).toBeInTheDocument() + + const actionArea = container.querySelector('[class*="bg-components-actionbar-bg"]') + const actionButtons = actionArea?.querySelectorAll('button') ?? [] + expect(actionButtons.length).toBeGreaterThanOrEqual(3) + }) + + it('copies content and notifies, and triggers remove callback', () => { + const handleRemove = vi.fn() + const { container } = render() + + const actionArea = container.querySelector('[class*="bg-components-actionbar-bg"]') + const actionButtons = actionArea?.querySelectorAll('button') ?? [] + expect(actionButtons.length).toBeGreaterThanOrEqual(3) + + const copyButton = actionButtons[1] + const deleteButton = actionButtons[2] + + fireEvent.click(copyButton) + expect(mockCopy).toHaveBeenCalledWith('hello world') + expect(toastNotifySpy).toHaveBeenCalledWith({ type: 'success', message: 'common.actionMsg.copySuccessfully' }) + + fireEvent.click(deleteButton) + expect(handleRemove).toHaveBeenCalledWith('1') + }) +}) diff --git a/web/app/components/app/text-generate/saved-items/no-data/index.spec.tsx b/web/app/components/app/text-generate/saved-items/no-data/index.spec.tsx new file mode 100644 index 0000000000..59b950054c --- /dev/null +++ b/web/app/components/app/text-generate/saved-items/no-data/index.spec.tsx @@ -0,0 +1,22 @@ +import { fireEvent, render, screen } from '@testing-library/react' +import { describe, expect, it, vi } from 'vitest' + +import NoData from './index' + +describe('NoData', () => { + it('renders title/description and calls callback when button clicked', () => { + const handleStart = vi.fn() + render() + + const title = screen.getByText('share.generation.savedNoData.title') + const description = screen.getByText('share.generation.savedNoData.description') + const button = screen.getByRole('button', { name: 'share.generation.savedNoData.startCreateContent' }) + + expect(title).toBeInTheDocument() + expect(description).toBeInTheDocument() + expect(button).toBeInTheDocument() + + fireEvent.click(button) + expect(handleStart).toHaveBeenCalledTimes(1) + }) +}) diff --git a/web/app/components/base/avatar/index.spec.tsx b/web/app/components/base/avatar/index.spec.tsx new file mode 100644 index 0000000000..e85690880b --- /dev/null +++ b/web/app/components/base/avatar/index.spec.tsx @@ -0,0 +1,308 @@ +import { fireEvent, render, screen, waitFor } from '@testing-library/react' +import Avatar from './index' + +describe('Avatar', () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + // Rendering tests - verify component renders correctly in different states + describe('Rendering', () => { + it('should render img element with correct alt and src when avatar URL is provided', () => { + const avatarUrl = 'https://example.com/avatar.jpg' + const props = { name: 'John Doe', avatar: avatarUrl } + + render() + + const img = screen.getByRole('img', { name: 'John Doe' }) + expect(img).toBeInTheDocument() + expect(img).toHaveAttribute('src', avatarUrl) + }) + + it('should render fallback div with uppercase initial when avatar is null', () => { + const props = { name: 'alice', avatar: null } + + render() + + expect(screen.queryByRole('img')).not.toBeInTheDocument() + expect(screen.getByText('A')).toBeInTheDocument() + }) + }) + + // Props tests - verify all props are applied correctly + describe('Props', () => { + describe('size prop', () => { + it.each([ + { size: undefined, expected: '30px', label: 'default (30px)' }, + { size: 50, expected: '50px', label: 'custom (50px)' }, + ])('should apply $label size to img element', ({ size, expected }) => { + const props = { name: 'Test', avatar: 'https://example.com/avatar.jpg', size } + + render() + + expect(screen.getByRole('img')).toHaveStyle({ + width: expected, + height: expected, + fontSize: expected, + lineHeight: expected, + }) + }) + + it('should apply size to fallback div when avatar is null', () => { + const props = { name: 'Test', avatar: null, size: 40 } + + render() + + const textElement = screen.getByText('T') + const outerDiv = textElement.parentElement as HTMLElement + expect(outerDiv).toHaveStyle({ width: '40px', height: '40px' }) + }) + }) + + describe('className prop', () => { + it('should merge className with default avatar classes on img', () => { + const props = { + name: 'Test', + avatar: 'https://example.com/avatar.jpg', + className: 'custom-class', + } + + render() + + const img = screen.getByRole('img') + expect(img).toHaveClass('custom-class') + expect(img).toHaveClass('shrink-0', 'flex', 'items-center', 'rounded-full', 'bg-primary-600') + }) + + it('should merge className with default avatar classes on fallback div', () => { + const props = { + name: 'Test', + avatar: null, + className: 'my-custom-class', + } + + render() + + const textElement = screen.getByText('T') + const outerDiv = textElement.parentElement as HTMLElement + expect(outerDiv).toHaveClass('my-custom-class') + expect(outerDiv).toHaveClass('shrink-0', 'flex', 'items-center', 'rounded-full', 'bg-primary-600') + }) + }) + + describe('textClassName prop', () => { + it('should apply textClassName to the initial text element', () => { + const props = { + name: 'Test', + avatar: null, + textClassName: 'custom-text-class', + } + + render() + + const textElement = screen.getByText('T') + expect(textElement).toHaveClass('custom-text-class') + expect(textElement).toHaveClass('scale-[0.4]', 'text-center', 'text-white') + }) + }) + }) + + // State Management tests - verify useState and useEffect behavior + describe('State Management', () => { + it('should switch to fallback when image fails to load', async () => { + const props = { name: 'John', avatar: 'https://example.com/broken.jpg' } + render() + const img = screen.getByRole('img') + + fireEvent.error(img) + + await waitFor(() => { + expect(screen.queryByRole('img')).not.toBeInTheDocument() + }) + expect(screen.getByText('J')).toBeInTheDocument() + }) + + it('should reset error state when avatar URL changes', async () => { + const initialProps = { name: 'John', avatar: 'https://example.com/broken.jpg' } + const { rerender } = render() + const img = screen.getByRole('img') + + // First, trigger error + fireEvent.error(img) + await waitFor(() => { + expect(screen.queryByRole('img')).not.toBeInTheDocument() + }) + expect(screen.getByText('J')).toBeInTheDocument() + + rerender() + + await waitFor(() => { + expect(screen.getByRole('img')).toBeInTheDocument() + }) + expect(screen.queryByText('J')).not.toBeInTheDocument() + }) + + it('should not reset error state if avatar becomes null', async () => { + const initialProps = { name: 'John', avatar: 'https://example.com/broken.jpg' } + const { rerender } = render() + + // Trigger error + fireEvent.error(screen.getByRole('img')) + await waitFor(() => { + expect(screen.getByText('J')).toBeInTheDocument() + }) + + rerender() + + await waitFor(() => { + expect(screen.queryByRole('img')).not.toBeInTheDocument() + }) + expect(screen.getByText('J')).toBeInTheDocument() + }) + }) + + // Event Handlers tests - verify onError callback behavior + describe('Event Handlers', () => { + it('should call onError with true when image fails to load', () => { + const onErrorMock = vi.fn() + const props = { + name: 'John', + avatar: 'https://example.com/broken.jpg', + onError: onErrorMock, + } + render() + + fireEvent.error(screen.getByRole('img')) + + expect(onErrorMock).toHaveBeenCalledTimes(1) + expect(onErrorMock).toHaveBeenCalledWith(true) + }) + + it('should call onError with false when image loads successfully', () => { + const onErrorMock = vi.fn() + const props = { + name: 'John', + avatar: 'https://example.com/avatar.jpg', + onError: onErrorMock, + } + render() + + fireEvent.load(screen.getByRole('img')) + + expect(onErrorMock).toHaveBeenCalledTimes(1) + expect(onErrorMock).toHaveBeenCalledWith(false) + }) + + it('should not throw when onError is not provided', async () => { + const props = { name: 'John', avatar: 'https://example.com/broken.jpg' } + render() + + expect(() => fireEvent.error(screen.getByRole('img'))).not.toThrow() + await waitFor(() => { + expect(screen.getByText('J')).toBeInTheDocument() + }) + }) + }) + + // Edge Cases tests - verify handling of unusual inputs + describe('Edge Cases', () => { + it('should handle empty string name gracefully', () => { + const props = { name: '', avatar: null } + + const { container } = render() + + // Note: Using querySelector here because empty name produces no visible text, + // making semantic queries (getByRole, getByText) impossible + const textElement = container.querySelector('.text-white') as HTMLElement + expect(textElement).toBeInTheDocument() + expect(textElement.textContent).toBe('') + }) + + it.each([ + { name: 'äø­ę–‡å', expected: 'äø­', label: 'Chinese characters' }, + { name: '123User', expected: '1', label: 'number' }, + ])('should display first character when name starts with $label', ({ name, expected }) => { + const props = { name, avatar: null } + + render() + + expect(screen.getByText(expected)).toBeInTheDocument() + }) + + it('should handle empty string avatar as falsy value', () => { + const props = { name: 'Test', avatar: '' as string | null } + + render() + + expect(screen.queryByRole('img')).not.toBeInTheDocument() + expect(screen.getByText('T')).toBeInTheDocument() + }) + + it('should handle undefined className and textClassName', () => { + const props = { name: 'Test', avatar: null } + + render() + + const textElement = screen.getByText('T') + const outerDiv = textElement.parentElement as HTMLElement + expect(outerDiv).toHaveClass('shrink-0', 'flex', 'items-center', 'rounded-full', 'bg-primary-600') + }) + + it.each([ + { size: 0, expected: '0px', label: 'zero' }, + { size: 1000, expected: '1000px', label: 'very large' }, + ])('should handle $label size value', ({ size, expected }) => { + const props = { name: 'Test', avatar: null, size } + + render() + + const textElement = screen.getByText('T') + const outerDiv = textElement.parentElement as HTMLElement + expect(outerDiv).toHaveStyle({ width: expected, height: expected }) + }) + }) + + // Combined props tests - verify props work together correctly + describe('Combined Props', () => { + it('should apply all props correctly when used together', () => { + const onErrorMock = vi.fn() + const props = { + name: 'Test User', + avatar: 'https://example.com/avatar.jpg', + size: 64, + className: 'custom-avatar', + onError: onErrorMock, + } + + render() + + const img = screen.getByRole('img') + expect(img).toHaveAttribute('alt', 'Test User') + expect(img).toHaveAttribute('src', 'https://example.com/avatar.jpg') + expect(img).toHaveStyle({ width: '64px', height: '64px' }) + expect(img).toHaveClass('custom-avatar') + + // Trigger load to verify onError callback + fireEvent.load(img) + expect(onErrorMock).toHaveBeenCalledWith(false) + }) + + it('should apply all fallback props correctly when used together', () => { + const props = { + name: 'Fallback User', + avatar: null, + size: 48, + className: 'fallback-custom', + textClassName: 'custom-text-style', + } + + render() + + const textElement = screen.getByText('F') + const outerDiv = textElement.parentElement as HTMLElement + expect(outerDiv).toHaveClass('fallback-custom') + expect(outerDiv).toHaveStyle({ width: '48px', height: '48px' }) + expect(textElement).toHaveClass('custom-text-style') + }) + }) +}) diff --git a/web/app/components/billing/billing-page/index.spec.tsx b/web/app/components/billing/billing-page/index.spec.tsx new file mode 100644 index 0000000000..2310baa4f4 --- /dev/null +++ b/web/app/components/billing/billing-page/index.spec.tsx @@ -0,0 +1,84 @@ +import { fireEvent, render, screen, waitFor } from '@testing-library/react' +import Billing from './index' + +let currentBillingUrl: string | null = 'https://billing' +let fetching = false +let isManager = true +let enableBilling = true + +const refetchMock = vi.fn() +const openAsyncWindowMock = vi.fn() + +vi.mock('@/service/use-billing', () => ({ + useBillingUrl: () => ({ + data: currentBillingUrl, + isFetching: fetching, + refetch: refetchMock, + }), +})) + +vi.mock('@/hooks/use-async-window-open', () => ({ + useAsyncWindowOpen: () => openAsyncWindowMock, +})) + +vi.mock('@/context/app-context', () => ({ + useAppContext: () => ({ + isCurrentWorkspaceManager: isManager, + }), +})) + +vi.mock('@/context/provider-context', () => ({ + useProviderContext: () => ({ + enableBilling, + }), +})) + +vi.mock('../plan', () => ({ + __esModule: true, + default: ({ loc }: { loc: string }) =>
, +})) + +describe('Billing', () => { + beforeEach(() => { + vi.clearAllMocks() + currentBillingUrl = 'https://billing' + fetching = false + isManager = true + enableBilling = true + refetchMock.mockResolvedValue({ data: 'https://billing' }) + }) + + it('hides the billing action when user is not manager or billing is disabled', () => { + isManager = false + render() + expect(screen.queryByRole('button', { name: /billing\.viewBillingTitle/ })).not.toBeInTheDocument() + + vi.clearAllMocks() + isManager = true + enableBilling = false + render() + expect(screen.queryByRole('button', { name: /billing\.viewBillingTitle/ })).not.toBeInTheDocument() + }) + + it('opens the billing window with the immediate url when the button is clicked', async () => { + render() + + const actionButton = screen.getByRole('button', { name: /billing\.viewBillingTitle/ }) + fireEvent.click(actionButton) + + await waitFor(() => expect(openAsyncWindowMock).toHaveBeenCalled()) + const [, options] = openAsyncWindowMock.mock.calls[0] + expect(options).toMatchObject({ + immediateUrl: currentBillingUrl, + features: 'noopener,noreferrer', + }) + }) + + it('disables the button while billing url is fetching', () => { + fetching = true + render() + + const actionButton = screen.getByRole('button', { name: /billing\.viewBillingTitle/ }) + expect(actionButton).toBeDisabled() + }) +}) diff --git a/web/app/components/billing/header-billing-btn/index.spec.tsx b/web/app/components/billing/header-billing-btn/index.spec.tsx new file mode 100644 index 0000000000..b87b733353 --- /dev/null +++ b/web/app/components/billing/header-billing-btn/index.spec.tsx @@ -0,0 +1,92 @@ +import { fireEvent, render, screen } from '@testing-library/react' +import { Plan } from '../type' +import HeaderBillingBtn from './index' + +type HeaderGlobal = typeof globalThis & { + __mockProviderContext?: ReturnType +} + +function getHeaderGlobal(): HeaderGlobal { + return globalThis as HeaderGlobal +} + +const ensureProviderContextMock = () => { + const globals = getHeaderGlobal() + if (!globals.__mockProviderContext) + throw new Error('Provider context mock not set') + return globals.__mockProviderContext +} + +vi.mock('@/context/provider-context', () => { + const mock = vi.fn() + const globals = getHeaderGlobal() + globals.__mockProviderContext = mock + return { + useProviderContext: () => mock(), + } +}) + +vi.mock('../upgrade-btn', () => ({ + __esModule: true, + default: () => , +})) + +describe('HeaderBillingBtn', () => { + beforeEach(() => { + vi.clearAllMocks() + ensureProviderContextMock().mockReturnValue({ + plan: { + type: Plan.professional, + }, + enableBilling: true, + isFetchedPlan: true, + }) + }) + + it('renders nothing when billing is disabled or plan is not fetched', () => { + ensureProviderContextMock().mockReturnValueOnce({ + plan: { + type: Plan.professional, + }, + enableBilling: false, + isFetchedPlan: true, + }) + + const { container } = render() + + expect(container.firstChild).toBeNull() + }) + + it('renders upgrade button for sandbox plan', () => { + ensureProviderContextMock().mockReturnValueOnce({ + plan: { + type: Plan.sandbox, + }, + enableBilling: true, + isFetchedPlan: true, + }) + + render() + + expect(screen.getByTestId('upgrade-btn')).toBeInTheDocument() + }) + + it('renders plan badge and forwards clicks when not display-only', () => { + const onClick = vi.fn() + + const { rerender } = render() + + const badge = screen.getByText('pro').closest('div') + + expect(badge).toHaveClass('cursor-pointer') + + fireEvent.click(badge!) + expect(onClick).toHaveBeenCalledTimes(1) + + rerender() + expect(screen.getByText('pro').closest('div')).toHaveClass('cursor-default') + + fireEvent.click(screen.getByText('pro').closest('div')!) + expect(onClick).toHaveBeenCalledTimes(1) + }) +}) diff --git a/web/app/components/billing/partner-stack/index.spec.tsx b/web/app/components/billing/partner-stack/index.spec.tsx new file mode 100644 index 0000000000..7b4658cf0f --- /dev/null +++ b/web/app/components/billing/partner-stack/index.spec.tsx @@ -0,0 +1,44 @@ +import { render } from '@testing-library/react' +import PartnerStack from './index' + +let isCloudEdition = true + +const saveOrUpdate = vi.fn() +const bind = vi.fn() + +vi.mock('@/config', () => ({ + get IS_CLOUD_EDITION() { + return isCloudEdition + }, +})) + +vi.mock('./use-ps-info', () => ({ + __esModule: true, + default: () => ({ + saveOrUpdate, + bind, + }), +})) + +describe('PartnerStack', () => { + beforeEach(() => { + vi.clearAllMocks() + isCloudEdition = true + }) + + it('does not call partner stack helpers when not in cloud edition', () => { + isCloudEdition = false + + render() + + expect(saveOrUpdate).not.toHaveBeenCalled() + expect(bind).not.toHaveBeenCalled() + }) + + it('calls saveOrUpdate and bind once when running in cloud edition', () => { + render() + + expect(saveOrUpdate).toHaveBeenCalledTimes(1) + expect(bind).toHaveBeenCalledTimes(1) + }) +}) diff --git a/web/app/components/billing/partner-stack/use-ps-info.spec.tsx b/web/app/components/billing/partner-stack/use-ps-info.spec.tsx new file mode 100644 index 0000000000..14215f2514 --- /dev/null +++ b/web/app/components/billing/partner-stack/use-ps-info.spec.tsx @@ -0,0 +1,197 @@ +import { act, renderHook } from '@testing-library/react' +import { PARTNER_STACK_CONFIG } from '@/config' +import usePSInfo from './use-ps-info' + +let searchParamsValues: Record = {} +const setSearchParams = (values: Record) => { + searchParamsValues = values +} + +type PartnerStackGlobal = typeof globalThis & { + __partnerStackCookieMocks?: { + get: ReturnType + set: ReturnType + remove: ReturnType + } + __partnerStackMutateAsync?: ReturnType +} + +function getPartnerStackGlobal(): PartnerStackGlobal { + return globalThis as PartnerStackGlobal +} + +const ensureCookieMocks = () => { + const globals = getPartnerStackGlobal() + if (!globals.__partnerStackCookieMocks) + throw new Error('Cookie mocks not initialized') + return globals.__partnerStackCookieMocks +} + +const ensureMutateAsync = () => { + const globals = getPartnerStackGlobal() + if (!globals.__partnerStackMutateAsync) + throw new Error('Mutate mock not initialized') + return globals.__partnerStackMutateAsync +} + +vi.mock('js-cookie', () => { + const get = vi.fn() + const set = vi.fn() + const remove = vi.fn() + const globals = getPartnerStackGlobal() + globals.__partnerStackCookieMocks = { get, set, remove } + const cookieApi = { get, set, remove } + return { + __esModule: true, + default: cookieApi, + get, + set, + remove, + } +}) +vi.mock('next/navigation', () => ({ + useSearchParams: () => ({ + get: (key: string) => searchParamsValues[key] ?? null, + }), +})) +vi.mock('@/service/use-billing', () => { + const mutateAsync = vi.fn() + const globals = getPartnerStackGlobal() + globals.__partnerStackMutateAsync = mutateAsync + return { + useBindPartnerStackInfo: () => ({ + mutateAsync, + }), + } +}) + +describe('usePSInfo', () => { + const originalLocationDescriptor = Object.getOwnPropertyDescriptor(globalThis, 'location') + + beforeAll(() => { + Object.defineProperty(globalThis, 'location', { + value: { hostname: 'cloud.dify.ai' }, + configurable: true, + }) + }) + + beforeEach(() => { + setSearchParams({}) + const { get, set, remove } = ensureCookieMocks() + get.mockReset() + set.mockReset() + remove.mockReset() + const mutate = ensureMutateAsync() + mutate.mockReset() + mutate.mockResolvedValue(undefined) + get.mockReturnValue('{}') + }) + + afterAll(() => { + if (originalLocationDescriptor) + Object.defineProperty(globalThis, 'location', originalLocationDescriptor) + }) + + it('saves partner info when query params change', () => { + const { get, set } = ensureCookieMocks() + get.mockReturnValue(JSON.stringify({ partnerKey: 'old', clickId: 'old-click' })) + setSearchParams({ + ps_partner_key: 'new-partner', + ps_xid: 'new-click', + }) + + const { result } = renderHook(() => usePSInfo()) + + expect(result.current.psPartnerKey).toBe('new-partner') + expect(result.current.psClickId).toBe('new-click') + + act(() => { + result.current.saveOrUpdate() + }) + + expect(set).toHaveBeenCalledWith( + PARTNER_STACK_CONFIG.cookieName, + JSON.stringify({ + partnerKey: 'new-partner', + clickId: 'new-click', + }), + { + expires: PARTNER_STACK_CONFIG.saveCookieDays, + path: '/', + domain: '.dify.ai', + }, + ) + }) + + it('does not overwrite cookie when params do not change', () => { + setSearchParams({ + ps_partner_key: 'existing', + ps_xid: 'existing-click', + }) + const { get } = ensureCookieMocks() + get.mockReturnValue(JSON.stringify({ + partnerKey: 'existing', + clickId: 'existing-click', + })) + + const { result } = renderHook(() => usePSInfo()) + + act(() => { + result.current.saveOrUpdate() + }) + + const { set } = ensureCookieMocks() + expect(set).not.toHaveBeenCalled() + }) + + it('binds partner info and clears cookie once', async () => { + setSearchParams({ + ps_partner_key: 'bind-partner', + ps_xid: 'bind-click', + }) + + const { result } = renderHook(() => usePSInfo()) + + const mutate = ensureMutateAsync() + const { remove } = ensureCookieMocks() + await act(async () => { + await result.current.bind() + }) + + expect(mutate).toHaveBeenCalledWith({ + partnerKey: 'bind-partner', + clickId: 'bind-click', + }) + expect(remove).toHaveBeenCalledWith(PARTNER_STACK_CONFIG.cookieName, { + path: '/', + domain: '.dify.ai', + }) + + await act(async () => { + await result.current.bind() + }) + + expect(mutate).toHaveBeenCalledTimes(1) + }) + + it('still removes cookie when bind fails with status 400', async () => { + const mutate = ensureMutateAsync() + mutate.mockRejectedValueOnce({ status: 400 }) + setSearchParams({ + ps_partner_key: 'bind-partner', + ps_xid: 'bind-click', + }) + + const { result } = renderHook(() => usePSInfo()) + + await act(async () => { + await result.current.bind() + }) + + const { remove } = ensureCookieMocks() + expect(remove).toHaveBeenCalledWith(PARTNER_STACK_CONFIG.cookieName, { + path: '/', + domain: '.dify.ai', + }) + }) +}) diff --git a/web/app/components/billing/plan/index.spec.tsx b/web/app/components/billing/plan/index.spec.tsx new file mode 100644 index 0000000000..bcdb83b5df --- /dev/null +++ b/web/app/components/billing/plan/index.spec.tsx @@ -0,0 +1,130 @@ +import { fireEvent, render, screen, waitFor } from '@testing-library/react' +import { EDUCATION_VERIFYING_LOCALSTORAGE_ITEM } from '@/app/education-apply/constants' +import { Plan } from '../type' +import PlanComp from './index' + +let currentPath = '/billing' + +const push = vi.fn() + +vi.mock('next/navigation', () => ({ + useRouter: () => ({ push }), + usePathname: () => currentPath, +})) + +const setShowAccountSettingModalMock = vi.fn() +vi.mock('@/context/modal-context', () => ({ + // eslint-disable-next-line ts/no-explicit-any + useModalContextSelector: (selector: any) => selector({ + setShowAccountSettingModal: setShowAccountSettingModalMock, + }), +})) + +const providerContextMock = vi.fn() +vi.mock('@/context/provider-context', () => ({ + useProviderContext: () => providerContextMock(), +})) + +vi.mock('@/context/app-context', () => ({ + useAppContext: () => ({ + userProfile: { email: 'user@example.com' }, + isCurrentWorkspaceManager: true, + }), +})) + +const mutateAsyncMock = vi.fn() +let isPending = false +vi.mock('@/service/use-education', () => ({ + useEducationVerify: () => ({ + mutateAsync: mutateAsyncMock, + isPending, + }), +})) + +const verifyStateModalMock = vi.fn(props => ( +
+ {props.isShow ? 'visible' : 'hidden'} +
+)) +vi.mock('@/app/education-apply/verify-state-modal', () => ({ + __esModule: true, + // eslint-disable-next-line ts/no-explicit-any + default: (props: any) => verifyStateModalMock(props), +})) + +vi.mock('../upgrade-btn', () => ({ + __esModule: true, + default: () => , +})) + +describe('PlanComp', () => { + const planMock = { + type: Plan.professional, + usage: { + teamMembers: 4, + documentsUploadQuota: 3, + vectorSpace: 8, + annotatedResponse: 5, + triggerEvents: 60, + apiRateLimit: 100, + }, + total: { + teamMembers: 10, + documentsUploadQuota: 20, + vectorSpace: 10, + annotatedResponse: 500, + triggerEvents: 100, + apiRateLimit: 200, + }, + reset: { + triggerEvents: 2, + apiRateLimit: 1, + }, + } + + beforeEach(() => { + vi.clearAllMocks() + currentPath = '/billing' + isPending = false + providerContextMock.mockReturnValue({ + plan: planMock, + enableEducationPlan: true, + allowRefreshEducationVerify: false, + isEducationAccount: false, + }) + mutateAsyncMock.mockReset() + mutateAsyncMock.mockResolvedValue({ token: 'token' }) + }) + + it('renders plan info and handles education verify success', async () => { + render() + + expect(screen.getByText('billing.plans.professional.name')).toBeInTheDocument() + expect(screen.getByTestId('plan-upgrade-btn')).toBeInTheDocument() + + const verifyBtn = screen.getByText('education.toVerified') + fireEvent.click(verifyBtn) + + await waitFor(() => expect(mutateAsyncMock).toHaveBeenCalled()) + await waitFor(() => expect(push).toHaveBeenCalledWith('/education-apply?token=token')) + expect(localStorage.removeItem).toHaveBeenCalledWith(EDUCATION_VERIFYING_LOCALSTORAGE_ITEM) + }) + + it('shows modal when education verify fails', async () => { + mutateAsyncMock.mockRejectedValueOnce(new Error('boom')) + render() + + const verifyBtn = screen.getByText('education.toVerified') + fireEvent.click(verifyBtn) + + await waitFor(() => expect(mutateAsyncMock).toHaveBeenCalled()) + await waitFor(() => expect(screen.getByTestId('verify-modal').getAttribute('data-is-show')).toBe('true')) + }) + + it('resets modal context when on education apply path', () => { + currentPath = '/education-apply/setup' + render() + + expect(setShowAccountSettingModalMock).toHaveBeenCalledWith(null) + }) +}) diff --git a/web/app/components/billing/progress-bar/index.spec.tsx b/web/app/components/billing/progress-bar/index.spec.tsx new file mode 100644 index 0000000000..a9c91468de --- /dev/null +++ b/web/app/components/billing/progress-bar/index.spec.tsx @@ -0,0 +1,25 @@ +import { render, screen } from '@testing-library/react' +import ProgressBar from './index' + +describe('ProgressBar', () => { + it('renders with provided percent and color', () => { + render() + + const bar = screen.getByTestId('billing-progress-bar') + expect(bar).toHaveClass('bg-test-color') + expect(bar.getAttribute('style')).toContain('width: 42%') + }) + + it('caps width at 100% when percent exceeds max', () => { + render() + + const bar = screen.getByTestId('billing-progress-bar') + expect(bar.getAttribute('style')).toContain('width: 100%') + }) + + it('uses the default color when no color prop is provided', () => { + render() + + expect(screen.getByTestId('billing-progress-bar')).toHaveClass('#2970FF') + }) +}) diff --git a/web/app/components/billing/trigger-events-limit-modal/index.spec.tsx b/web/app/components/billing/trigger-events-limit-modal/index.spec.tsx new file mode 100644 index 0000000000..a3d04c6031 --- /dev/null +++ b/web/app/components/billing/trigger-events-limit-modal/index.spec.tsx @@ -0,0 +1,70 @@ +import { render, screen } from '@testing-library/react' +import TriggerEventsLimitModal from './index' + +const mockOnClose = vi.fn() +const mockOnUpgrade = vi.fn() + +const planUpgradeModalMock = vi.fn((props: { show: boolean, title: string, description: string, extraInfo?: React.ReactNode, onClose: () => void, onUpgrade: () => void }) => ( +
+ {props.extraInfo} +
+)) + +vi.mock('@/app/components/billing/plan-upgrade-modal', () => ({ + __esModule: true, + // eslint-disable-next-line ts/no-explicit-any + default: (props: any) => planUpgradeModalMock(props), +})) + +describe('TriggerEventsLimitModal', () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + it('passes the trigger usage props to the upgrade modal', () => { + render( + , + ) + + const modal = screen.getByTestId('plan-upgrade-modal') + expect(modal.getAttribute('data-show')).toBe('true') + expect(modal.getAttribute('data-title')).toContain('billing.triggerLimitModal.title') + expect(modal.getAttribute('data-description')).toContain('billing.triggerLimitModal.description') + expect(planUpgradeModalMock).toHaveBeenCalled() + + const passedProps = planUpgradeModalMock.mock.calls[0][0] + expect(passedProps.onClose).toBe(mockOnClose) + expect(passedProps.onUpgrade).toBe(mockOnUpgrade) + + expect(screen.getByText('billing.triggerLimitModal.usageTitle')).toBeInTheDocument() + expect(screen.getByText('12')).toBeInTheDocument() + expect(screen.getByText('20')).toBeInTheDocument() + }) + + it('renders even when trigger modal is hidden', () => { + render( + , + ) + + expect(planUpgradeModalMock).toHaveBeenCalled() + expect(screen.getByTestId('plan-upgrade-modal').getAttribute('data-show')).toBe('false') + }) +}) diff --git a/web/app/components/billing/usage-info/apps-info.spec.tsx b/web/app/components/billing/usage-info/apps-info.spec.tsx new file mode 100644 index 0000000000..7289b474e5 --- /dev/null +++ b/web/app/components/billing/usage-info/apps-info.spec.tsx @@ -0,0 +1,35 @@ +import { render, screen } from '@testing-library/react' +import { defaultPlan } from '../config' +import AppsInfo from './apps-info' + +const appsUsage = 7 +const appsTotal = 15 + +const mockPlan = { + ...defaultPlan, + usage: { + ...defaultPlan.usage, + buildApps: appsUsage, + }, + total: { + ...defaultPlan.total, + buildApps: appsTotal, + }, +} + +vi.mock('@/context/provider-context', () => ({ + useProviderContext: () => ({ + plan: mockPlan, + }), +})) + +describe('AppsInfo', () => { + it('renders build apps usage information with context data', () => { + render() + + expect(screen.getByText('billing.usagePage.buildApps')).toBeInTheDocument() + expect(screen.getByText(`${appsUsage}`)).toBeInTheDocument() + expect(screen.getByText(`${appsTotal}`)).toBeInTheDocument() + expect(screen.getByText('billing.usagePage.buildApps').closest('.apps-info-class')).toBeInTheDocument() + }) +}) diff --git a/web/app/components/billing/usage-info/index.spec.tsx b/web/app/components/billing/usage-info/index.spec.tsx new file mode 100644 index 0000000000..3137c4865f --- /dev/null +++ b/web/app/components/billing/usage-info/index.spec.tsx @@ -0,0 +1,114 @@ +import { render, screen } from '@testing-library/react' +import { NUM_INFINITE } from '../config' +import UsageInfo from './index' + +const TestIcon = () => + +describe('UsageInfo', () => { + it('renders the metric with a suffix unit and tooltip text', () => { + render( + , + ) + + expect(screen.getByTestId('usage-icon')).toBeInTheDocument() + expect(screen.getByText('Apps')).toBeInTheDocument() + expect(screen.getByText('30')).toBeInTheDocument() + expect(screen.getByText('100')).toBeInTheDocument() + expect(screen.getByText('GB')).toBeInTheDocument() + }) + + it('renders inline unit when unitPosition is inline', () => { + render( + , + ) + + expect(screen.getByText('100GB')).toBeInTheDocument() + }) + + it('shows reset hint text instead of the unit when resetHint is provided', () => { + const resetHint = 'Resets in 3 days' + render( + , + ) + + expect(screen.getByText(resetHint)).toBeInTheDocument() + expect(screen.queryByText('GB')).not.toBeInTheDocument() + }) + + it('displays unlimited text when total is infinite', () => { + render( + , + ) + + expect(screen.getByText('billing.plansCommon.unlimited')).toBeInTheDocument() + }) + + it('applies warning color when usage is close to the limit', () => { + render( + , + ) + + const progressBar = screen.getByTestId('billing-progress-bar') + expect(progressBar).toHaveClass('bg-components-progress-warning-progress') + }) + + it('applies error color when usage exceeds the limit', () => { + render( + , + ) + + const progressBar = screen.getByTestId('billing-progress-bar') + expect(progressBar).toHaveClass('bg-components-progress-error-progress') + }) + + it('does not render the icon when hideIcon is true', () => { + render( + , + ) + + expect(screen.queryByTestId('usage-icon')).not.toBeInTheDocument() + }) +}) diff --git a/web/app/components/billing/vector-space-full/index.spec.tsx b/web/app/components/billing/vector-space-full/index.spec.tsx new file mode 100644 index 0000000000..de5607df41 --- /dev/null +++ b/web/app/components/billing/vector-space-full/index.spec.tsx @@ -0,0 +1,58 @@ +import { render, screen } from '@testing-library/react' +import VectorSpaceFull from './index' + +type VectorProviderGlobal = typeof globalThis & { + __vectorProviderContext?: ReturnType +} + +function getVectorGlobal(): VectorProviderGlobal { + return globalThis as VectorProviderGlobal +} + +vi.mock('@/context/provider-context', () => { + const mock = vi.fn() + getVectorGlobal().__vectorProviderContext = mock + return { + useProviderContext: () => mock(), + } +}) + +vi.mock('../upgrade-btn', () => ({ + __esModule: true, + default: () => , +})) + +describe('VectorSpaceFull', () => { + const planMock = { + type: 'team', + usage: { + vectorSpace: 8, + }, + total: { + vectorSpace: 10, + }, + } + + beforeEach(() => { + vi.clearAllMocks() + const globals = getVectorGlobal() + globals.__vectorProviderContext?.mockReturnValue({ + plan: planMock, + }) + }) + + it('renders tip text and upgrade button', () => { + render() + + expect(screen.getByText('billing.vectorSpace.fullTip')).toBeInTheDocument() + expect(screen.getByText('billing.vectorSpace.fullSolution')).toBeInTheDocument() + expect(screen.getByTestId('vector-upgrade-btn')).toBeInTheDocument() + }) + + it('shows vector usage and total', () => { + render() + + expect(screen.getByText('8')).toBeInTheDocument() + expect(screen.getByText('10MB')).toBeInTheDocument() + }) +}) diff --git a/web/app/components/custom/custom-web-app-brand/index.spec.tsx b/web/app/components/custom/custom-web-app-brand/index.spec.tsx new file mode 100644 index 0000000000..e50ca4e9b2 --- /dev/null +++ b/web/app/components/custom/custom-web-app-brand/index.spec.tsx @@ -0,0 +1,147 @@ +import { fireEvent, render, screen, waitFor } from '@testing-library/react' +import { beforeEach, describe, expect, it, vi } from 'vitest' +import { getImageUploadErrorMessage, imageUpload } from '@/app/components/base/image-uploader/utils' +import { useToastContext } from '@/app/components/base/toast' +import { Plan } from '@/app/components/billing/type' +import { useAppContext } from '@/context/app-context' +import { useGlobalPublicStore } from '@/context/global-public-context' +import { useProviderContext } from '@/context/provider-context' +import { updateCurrentWorkspace } from '@/service/common' +import CustomWebAppBrand from './index' + +vi.mock('@/app/components/base/toast', () => ({ + useToastContext: vi.fn(), +})) +vi.mock('@/service/common', () => ({ + updateCurrentWorkspace: vi.fn(), +})) +vi.mock('@/context/app-context', () => ({ + useAppContext: vi.fn(), +})) +vi.mock('@/context/provider-context', () => ({ + useProviderContext: vi.fn(), +})) +vi.mock('@/context/global-public-context', () => ({ + useGlobalPublicStore: vi.fn(), +})) +vi.mock('@/app/components/base/image-uploader/utils', () => ({ + imageUpload: vi.fn(), + getImageUploadErrorMessage: vi.fn(), +})) + +const mockNotify = vi.fn() +const mockUseToastContext = vi.mocked(useToastContext) +const mockUpdateCurrentWorkspace = vi.mocked(updateCurrentWorkspace) +const mockUseAppContext = vi.mocked(useAppContext) +const mockUseProviderContext = vi.mocked(useProviderContext) +const mockUseGlobalPublicStore = vi.mocked(useGlobalPublicStore) +const mockImageUpload = vi.mocked(imageUpload) +const mockGetImageUploadErrorMessage = vi.mocked(getImageUploadErrorMessage) + +const defaultPlanUsage = { + buildApps: 0, + teamMembers: 0, + annotatedResponse: 0, + documentsUploadQuota: 0, + apiRateLimit: 0, + triggerEvents: 0, + vectorSpace: 0, +} + +const renderComponent = () => render() + +describe('CustomWebAppBrand', () => { + beforeEach(() => { + vi.clearAllMocks() + mockUseToastContext.mockReturnValue({ notify: mockNotify } as any) + mockUpdateCurrentWorkspace.mockResolvedValue({} as any) + mockUseAppContext.mockReturnValue({ + currentWorkspace: { + custom_config: { + replace_webapp_logo: 'https://example.com/replace.png', + remove_webapp_brand: false, + }, + }, + mutateCurrentWorkspace: vi.fn(), + isCurrentWorkspaceManager: true, + } as any) + mockUseProviderContext.mockReturnValue({ + plan: { + type: Plan.professional, + usage: defaultPlanUsage, + total: defaultPlanUsage, + reset: {}, + }, + enableBilling: false, + } as any) + const systemFeaturesState = { + branding: { + enabled: true, + workspace_logo: 'https://example.com/workspace-logo.png', + }, + } + mockUseGlobalPublicStore.mockImplementation(selector => selector ? selector({ systemFeatures: systemFeaturesState } as any) : { systemFeatures: systemFeaturesState }) + mockGetImageUploadErrorMessage.mockReturnValue('upload error') + }) + + it('disables upload controls when the user cannot manage the workspace', () => { + mockUseAppContext.mockReturnValue({ + currentWorkspace: { + custom_config: { + replace_webapp_logo: '', + remove_webapp_brand: false, + }, + }, + mutateCurrentWorkspace: vi.fn(), + isCurrentWorkspaceManager: false, + } as any) + + const { container } = renderComponent() + const fileInput = container.querySelector('input[type="file"]') as HTMLInputElement + expect(fileInput).toBeDisabled() + }) + + it('toggles remove brand switch and calls the backend + mutate', async () => { + const mutateMock = vi.fn() + mockUseAppContext.mockReturnValue({ + currentWorkspace: { + custom_config: { + replace_webapp_logo: '', + remove_webapp_brand: false, + }, + }, + mutateCurrentWorkspace: mutateMock, + isCurrentWorkspaceManager: true, + } as any) + + renderComponent() + const switchInput = screen.getByRole('switch') + fireEvent.click(switchInput) + + await waitFor(() => expect(mockUpdateCurrentWorkspace).toHaveBeenCalledWith({ + url: '/workspaces/custom-config', + body: { remove_webapp_brand: true }, + })) + await waitFor(() => expect(mutateMock).toHaveBeenCalled()) + }) + + it('shows cancel/apply buttons after successful upload and cancels properly', async () => { + mockImageUpload.mockImplementation(({ onProgressCallback, onSuccessCallback }) => { + onProgressCallback(50) + onSuccessCallback({ id: 'new-logo' }) + }) + + const { container } = renderComponent() + const fileInput = container.querySelector('input[type="file"]') as HTMLInputElement + const testFile = new File(['content'], 'logo.png', { type: 'image/png' }) + fireEvent.change(fileInput, { target: { files: [testFile] } }) + + await waitFor(() => expect(mockImageUpload).toHaveBeenCalled()) + await waitFor(() => screen.getByRole('button', { name: 'custom.apply' })) + + const cancelButton = screen.getByRole('button', { name: 'common.operation.cancel' }) + fireEvent.click(cancelButton) + + await waitFor(() => expect(screen.queryByRole('button', { name: 'custom.apply' })).toBeNull()) + }) +}) diff --git a/web/app/components/datasets/hit-testing/index.tsx b/web/app/components/datasets/hit-testing/index.tsx index e75ef48abf..d810442704 100644 --- a/web/app/components/datasets/hit-testing/index.tsx +++ b/web/app/components/datasets/hit-testing/index.tsx @@ -34,7 +34,6 @@ import Records from './components/records' import ResultItem from './components/result-item' import ResultItemExternal from './components/result-item-external' import ModifyRetrievalModal from './modify-retrieval-modal' -import s from './style.module.css' const limit = 10 @@ -115,8 +114,8 @@ const HitTestingPage: FC = ({ datasetId }: Props) => { }, [isMobile, setShowRightPanel]) return ( -
-
+
+

{t('datasetHitTesting.title')}

{t('datasetHitTesting.desc')}

@@ -161,7 +160,7 @@ const HitTestingPage: FC = ({ datasetId }: Props) => { onClose={hideRightPanel} footer={null} > -
+
{isRetrievalLoading ? (
diff --git a/web/app/components/datasets/hit-testing/style.module.css b/web/app/components/datasets/hit-testing/style.module.css deleted file mode 100644 index a421962f48..0000000000 --- a/web/app/components/datasets/hit-testing/style.module.css +++ /dev/null @@ -1,43 +0,0 @@ -.container { - @apply flex h-full w-full relative overflow-y-auto; -} - -.container>div { - @apply flex-1 h-full; -} - -.commonIcon { - @apply w-3.5 h-3.5 inline-block align-middle; - background-repeat: no-repeat; - background-position: center center; - background-size: contain; -} - -.app_icon { - background-image: url(./assets/grid.svg); -} - -.hit_testing_icon { - background-image: url(../documents/assets/target.svg); -} - -.plugin_icon { - background-image: url(./assets/plugin.svg); -} - -.cardWrapper { - display: grid; - grid-template-columns: repeat(auto-fill, minmax(284px, auto)); - grid-gap: 16px; - grid-auto-rows: 216px; -} - -.clockWrapper { - border: 0.5px solid #eaecf5; - @apply rounded-lg w-11 h-11 flex justify-center items-center; -} - -.clockIcon { - mask-image: url(./assets/clock.svg); - @apply bg-gray-500; -} diff --git a/web/app/components/header/account-setting/members-page/operation/index.spec.tsx b/web/app/components/header/account-setting/members-page/operation/index.spec.tsx new file mode 100644 index 0000000000..fbe3959a0f --- /dev/null +++ b/web/app/components/header/account-setting/members-page/operation/index.spec.tsx @@ -0,0 +1,91 @@ +import type { Member } from '@/models/common' +import { fireEvent, render, screen, waitFor } from '@testing-library/react' +import { vi } from 'vitest' +import { ToastContext } from '@/app/components/base/toast' +import Operation from './index' + +const mockUpdateMemberRole = vi.fn() +const mockDeleteMemberOrCancelInvitation = vi.fn() + +vi.mock('@/service/common', () => ({ + deleteMemberOrCancelInvitation: () => mockDeleteMemberOrCancelInvitation(), + updateMemberRole: () => mockUpdateMemberRole(), +})) + +const mockUseProviderContext = vi.fn(() => ({ + datasetOperatorEnabled: false, +})) + +vi.mock('@/context/provider-context', () => ({ + useProviderContext: () => mockUseProviderContext(), +})) + +const defaultMember: Member = { + id: 'member-id', + name: 'Test Member', + email: 'test@example.com', + avatar: '', + avatar_url: null, + status: 'active', + role: 'editor', + last_login_at: '', + last_active_at: '', + created_at: '', +} + +const renderOperation = (propsOverride: Partial = {}, operatorRole = 'owner', onOperate?: () => void) => { + const mergedMember = { ...defaultMember, ...propsOverride } + return render( + + + , + ) +} + +describe('Operation', () => { + beforeEach(() => { + vi.clearAllMocks() + mockUseProviderContext.mockReturnValue({ datasetOperatorEnabled: false }) + }) + + it('renders the current role label', () => { + renderOperation() + + expect(screen.getByText('common.members.editor')).toBeInTheDocument() + }) + + it('shows dataset operator option when the feature flag is enabled', async () => { + mockUseProviderContext.mockReturnValue({ datasetOperatorEnabled: true }) + renderOperation() + + fireEvent.click(screen.getByText('common.members.editor')) + + expect(await screen.findByText('common.members.datasetOperator')).toBeInTheDocument() + }) + + it('calls updateMemberRole and onOperate when selecting another role', async () => { + const onOperate = vi.fn() + renderOperation({}, 'owner', onOperate) + + fireEvent.click(screen.getByText('common.members.editor')) + fireEvent.click(await screen.findByText('common.members.normal')) + + await waitFor(() => { + expect(mockUpdateMemberRole).toHaveBeenCalled() + expect(onOperate).toHaveBeenCalled() + }) + }) + + it('calls deleteMemberOrCancelInvitation when removing the member', async () => { + const onOperate = vi.fn() + renderOperation({}, 'owner', onOperate) + + fireEvent.click(screen.getByText('common.members.editor')) + fireEvent.click(await screen.findByText('common.members.removeFromTeam')) + + await waitFor(() => { + expect(mockDeleteMemberOrCancelInvitation).toHaveBeenCalled() + expect(onOperate).toHaveBeenCalled() + }) + }) +}) diff --git a/web/app/components/header/account-setting/members-page/operation/index.tsx b/web/app/components/header/account-setting/members-page/operation/index.tsx index da61746685..6effe8b058 100644 --- a/web/app/components/header/account-setting/members-page/operation/index.tsx +++ b/web/app/components/header/account-setting/members-page/operation/index.tsx @@ -1,10 +1,14 @@ 'use client' import type { Member } from '@/models/common' -import { Menu, MenuButton, MenuItem, MenuItems, Transition } from '@headlessui/react' import { CheckIcon, ChevronDownIcon } from '@heroicons/react/24/outline' -import { Fragment, useMemo } from 'react' +import { memo, useMemo, useState } from 'react' import { useTranslation } from 'react-i18next' import { useContext } from 'use-context-selector' +import { + PortalToFollowElem, + PortalToFollowElemContent, + PortalToFollowElemTrigger, +} from '@/app/components/base/portal-to-follow-elem' import { ToastContext } from '@/app/components/base/toast' import { useProviderContext } from '@/context/provider-context' import { deleteMemberOrCancelInvitation, updateMemberRole } from '@/service/common' @@ -21,6 +25,7 @@ const Operation = ({ operatorRole, onOperate, }: IOperationProps) => { + const [open, setOpen] = useState(false) const { t } = useTranslation() const { datasetOperatorEnabled } = useProviderContext() const RoleMap = { @@ -51,6 +56,7 @@ const Operation = ({ const { notify } = useContext(ToastContext) const toHump = (name: string) => name.replace(/_(\w)/g, (all, letter) => letter.toUpperCase()) const handleDeleteMemberOrCancelInvitation = async () => { + setOpen(false) try { await deleteMemberOrCancelInvitation({ url: `/workspaces/current/members/${member.id}` }) onOperate() @@ -61,6 +67,7 @@ const Operation = ({ } } const handleUpdateMemberRole = async (role: string) => { + setOpen(false) try { await updateMemberRole({ url: `/workspaces/current/members/${member.id}/update-role`, @@ -75,63 +82,50 @@ const Operation = ({ } return ( - - { - ({ open }) => ( - <> - - {RoleMap[member.role] || RoleMap.normal} - - - - -
+ + setOpen(prev => !prev)}> +
+ {RoleMap[member.role] || RoleMap.normal} + +
+
+ +
+
+ { + roleList.map(role => ( +
handleUpdateMemberRole(role)}> { - roleList.map(role => ( - -
handleUpdateMemberRole(role)}> - { - role === member.role - ? - :
- } -
-
{t(`common.members.${toHump(role)}` as any)}
-
{t(`common.members.${toHump(role)}Tip` as any)}
-
-
- - )) + role === member.role + ? + :
} -
- -
-
-
-
-
{t('common.members.removeFromTeam')}
-
{t('common.members.removeFromTeamTip')}
-
-
+
+
{t(`common.members.${toHump(role)}` as any)}
+
{t(`common.members.${toHump(role)}Tip` as any)}
- - - - - ) - } -
+
+ )) + } +
+
+
+
+
+
{t('common.members.removeFromTeam')}
+
{t('common.members.removeFromTeamTip')}
+
+
+
+
+ + ) } -export default Operation +export default memo(Operation) diff --git a/web/app/components/share/text-generation/index.tsx b/web/app/components/share/text-generation/index.tsx index 157ed123d1..d28af7d766 100644 --- a/web/app/components/share/text-generation/index.tsx +++ b/web/app/components/share/text-generation/index.tsx @@ -26,7 +26,7 @@ import DifyLogo from '@/app/components/base/logo/dify-logo' import Toast from '@/app/components/base/toast' import Res from '@/app/components/share/text-generation/result' import RunOnce from '@/app/components/share/text-generation/run-once' -import { appDefaultIconBackground, DEFAULT_VALUE_MAX_LEN } from '@/config' +import { appDefaultIconBackground, BATCH_CONCURRENCY, DEFAULT_VALUE_MAX_LEN } from '@/config' import { useGlobalPublicStore } from '@/context/global-public-context' import { useWebAppStore } from '@/context/web-app-context' import { useAppFavicon } from '@/hooks/use-app-favicon' @@ -43,7 +43,7 @@ import MenuDropdown from './menu-dropdown' import RunBatch from './run-batch' import ResDownload from './run-batch/res-download' -const GROUP_SIZE = 5 // to avoid RPM(Request per minute) limit. The group task finished then the next group. +const GROUP_SIZE = BATCH_CONCURRENCY // to avoid RPM(Request per minute) limit. The group task finished then the next group. enum TaskStatus { pending = 'pending', running = 'running', diff --git a/web/app/components/tools/edit-custom-collection-modal/config-credentials.spec.tsx b/web/app/components/tools/edit-custom-collection-modal/config-credentials.spec.tsx new file mode 100644 index 0000000000..870263d83c --- /dev/null +++ b/web/app/components/tools/edit-custom-collection-modal/config-credentials.spec.tsx @@ -0,0 +1,60 @@ +import type { Credential } from '@/app/components/tools/types' +import { act, fireEvent, render, screen } from '@testing-library/react' +import { AuthHeaderPrefix, AuthType } from '@/app/components/tools/types' +import ConfigCredential from './config-credentials' + +describe('ConfigCredential', () => { + const baseCredential: Credential = { + auth_type: AuthType.none, + } + const mockOnChange = vi.fn() + const mockOnHide = vi.fn() + + beforeEach(() => { + vi.clearAllMocks() + }) + + it('renders and calls onHide when cancel is pressed', async () => { + await act(async () => { + render( + , + ) + }) + + fireEvent.click(screen.getByText('common.operation.cancel')) + + expect(mockOnHide).toHaveBeenCalledTimes(1) + expect(mockOnChange).not.toHaveBeenCalled() + }) + + it('allows selecting apiKeyHeader and submits the new credential', async () => { + await act(async () => { + render( + , + ) + }) + + fireEvent.click(screen.getByText('tools.createTool.authMethod.types.api_key_header')) + const headerInput = screen.getByPlaceholderText('tools.createTool.authMethod.types.apiKeyPlaceholder') + const valueInput = screen.getByPlaceholderText('tools.createTool.authMethod.types.apiValuePlaceholder') + fireEvent.change(headerInput, { target: { value: 'X-Auth' } }) + fireEvent.change(valueInput, { target: { value: 'sEcReT' } }) + fireEvent.click(screen.getByText('common.operation.save')) + + expect(mockOnChange).toHaveBeenCalledWith({ + auth_type: AuthType.apiKeyHeader, + api_key_header: 'X-Auth', + api_key_header_prefix: AuthHeaderPrefix.custom, + api_key_value: 'sEcReT', + }) + expect(mockOnHide).toHaveBeenCalled() + }) +}) diff --git a/web/app/components/tools/edit-custom-collection-modal/get-schema.spec.tsx b/web/app/components/tools/edit-custom-collection-modal/get-schema.spec.tsx new file mode 100644 index 0000000000..de156ce68a --- /dev/null +++ b/web/app/components/tools/edit-custom-collection-modal/get-schema.spec.tsx @@ -0,0 +1,55 @@ +import { fireEvent, render, screen, waitFor } from '@testing-library/react' +import { importSchemaFromURL } from '@/service/tools' +import Toast from '../../base/toast' +import examples from './examples' +import GetSchema from './get-schema' + +vi.mock('@/service/tools', () => ({ + importSchemaFromURL: vi.fn(), +})) +const importSchemaFromURLMock = vi.mocked(importSchemaFromURL) + +describe('GetSchema', () => { + const notifySpy = vi.spyOn(Toast, 'notify') + const mockOnChange = vi.fn() + + beforeEach(() => { + vi.clearAllMocks() + notifySpy.mockClear() + importSchemaFromURLMock.mockReset() + render() + }) + + it('shows an error when the URL is not http', () => { + fireEvent.click(screen.getByText('tools.createTool.importFromUrl')) + const input = screen.getByPlaceholderText('tools.createTool.importFromUrlPlaceHolder') + // eslint-disable-next-line sonarjs/no-clear-text-protocols + fireEvent.change(input, { target: { value: 'ftp://invalid' } }) + fireEvent.click(screen.getByText('common.operation.ok')) + + expect(notifySpy).toHaveBeenCalledWith({ + type: 'error', + message: 'tools.createTool.urlError', + }) + }) + + it('imports schema from url when valid', async () => { + fireEvent.click(screen.getByText('tools.createTool.importFromUrl')) + const input = screen.getByPlaceholderText('tools.createTool.importFromUrlPlaceHolder') + fireEvent.change(input, { target: { value: 'https://example.com' } }) + importSchemaFromURLMock.mockResolvedValueOnce({ schema: 'result-schema' }) + + fireEvent.click(screen.getByText('common.operation.ok')) + + await waitFor(() => { + expect(mockOnChange).toHaveBeenCalledWith('result-schema') + }) + }) + + it('selects example schema when example option clicked', () => { + fireEvent.click(screen.getByText('tools.createTool.examples')) + fireEvent.click(screen.getByText(`tools.createTool.exampleOptions.${examples[0].key}`)) + + expect(mockOnChange).toHaveBeenCalledWith(examples[0].content) + }) +}) diff --git a/web/app/components/tools/edit-custom-collection-modal/index.spec.tsx b/web/app/components/tools/edit-custom-collection-modal/index.spec.tsx new file mode 100644 index 0000000000..92c9cc3df2 --- /dev/null +++ b/web/app/components/tools/edit-custom-collection-modal/index.spec.tsx @@ -0,0 +1,154 @@ +import type { ModalContextState } from '@/context/modal-context' +import type { ProviderContextState } from '@/context/provider-context' +import { act, fireEvent, render, screen, waitFor } from '@testing-library/react' +import Toast from '@/app/components/base/toast' +import { Plan } from '@/app/components/billing/type' +import { parseParamsSchema } from '@/service/tools' +import EditCustomCollectionModal from './index' + +vi.mock('ahooks', async () => { + const actual = await vi.importActual('ahooks') + return { + ...actual, + useDebounce: (value: unknown) => value, + } +}) + +vi.mock('@/service/tools', () => ({ + parseParamsSchema: vi.fn(), +})) +const parseParamsSchemaMock = vi.mocked(parseParamsSchema) + +const mockSetShowPricingModal = vi.fn() +const mockSetShowAccountSettingModal = vi.fn() +vi.mock('@/context/modal-context', () => ({ + useModalContext: (): ModalContextState => ({ + setShowAccountSettingModal: mockSetShowAccountSettingModal, + setShowApiBasedExtensionModal: vi.fn(), + setShowModerationSettingModal: vi.fn(), + setShowExternalDataToolModal: vi.fn(), + setShowPricingModal: mockSetShowPricingModal, + setShowAnnotationFullModal: vi.fn(), + setShowModelModal: vi.fn(), + setShowExternalKnowledgeAPIModal: vi.fn(), + setShowModelLoadBalancingModal: vi.fn(), + setShowOpeningModal: vi.fn(), + setShowUpdatePluginModal: vi.fn(), + setShowEducationExpireNoticeModal: vi.fn(), + setShowTriggerEventsLimitModal: vi.fn(), + }), +})) + +const mockUseProviderContext = vi.fn() +vi.mock('@/context/provider-context', () => ({ + useProviderContext: () => mockUseProviderContext(), +})) + +vi.mock('@/context/i18n', async () => { + const actual = await vi.importActual('@/context/i18n') + return { + ...actual, + useDocLink: () => (path?: string) => `https://docs.example.com${path ?? ''}`, + } +}) + +describe('EditCustomCollectionModal', () => { + const mockOnHide = vi.fn() + const mockOnAdd = vi.fn() + const mockOnEdit = vi.fn() + const mockOnRemove = vi.fn() + const toastNotifySpy = vi.spyOn(Toast, 'notify') + + beforeEach(() => { + vi.clearAllMocks() + toastNotifySpy.mockClear() + parseParamsSchemaMock.mockResolvedValue({ + parameters_schema: [], + schema_type: 'openapi', + }) + mockUseProviderContext.mockReturnValue({ + plan: { + type: Plan.sandbox, + }, + enableBilling: false, + webappCopyrightEnabled: true, + } as ProviderContextState) + }) + + const renderModal = () => render( + , + ) + + it('shows an error when the provider name is missing', async () => { + renderModal() + + const schemaInput = screen.getByPlaceholderText('tools.createTool.schemaPlaceHolder') + fireEvent.change(schemaInput, { target: { value: '{}' } }) + await waitFor(() => { + expect(parseParamsSchemaMock).toHaveBeenCalledWith('{}') + }) + + fireEvent.click(screen.getByText('common.operation.save')) + + await waitFor(() => { + expect(toastNotifySpy).toHaveBeenCalledWith(expect.objectContaining({ + message: 'common.errorMsg.fieldRequired:{"field":"tools.createTool.name"}', + type: 'error', + })) + }) + expect(mockOnAdd).not.toHaveBeenCalled() + }) + + it('shows an error when the schema is missing', async () => { + renderModal() + + const providerInput = screen.getByPlaceholderText('tools.createTool.toolNamePlaceHolder') + fireEvent.change(providerInput, { target: { value: 'provider' } }) + + fireEvent.click(screen.getByText('common.operation.save')) + + await waitFor(() => { + expect(toastNotifySpy).toHaveBeenCalledWith(expect.objectContaining({ + message: 'common.errorMsg.fieldRequired:{"field":"tools.createTool.schema"}', + type: 'error', + })) + }) + expect(mockOnAdd).not.toHaveBeenCalled() + }) + + it('saves a valid custom collection', async () => { + renderModal() + const providerInput = screen.getByPlaceholderText('tools.createTool.toolNamePlaceHolder') + fireEvent.change(providerInput, { target: { value: 'provider' } }) + + const schemaInput = screen.getByPlaceholderText('tools.createTool.schemaPlaceHolder') + fireEvent.change(schemaInput, { target: { value: '{}' } }) + + await waitFor(() => { + expect(parseParamsSchemaMock).toHaveBeenCalledWith('{}') + }) + + await act(async () => { + fireEvent.click(screen.getByText('common.operation.save')) + }) + + await waitFor(() => { + expect(mockOnAdd).toHaveBeenCalledWith(expect.objectContaining({ + provider: 'provider', + schema: '{}', + schema_type: 'openapi', + credentials: { + auth_type: 'none', + }, + labels: [], + })) + expect(toastNotifySpy).not.toHaveBeenCalled() + }) + }) +}) diff --git a/web/app/components/tools/edit-custom-collection-modal/index.tsx b/web/app/components/tools/edit-custom-collection-modal/index.tsx index 474c262010..a468af7257 100644 --- a/web/app/components/tools/edit-custom-collection-modal/index.tsx +++ b/web/app/components/tools/edit-custom-collection-modal/index.tsx @@ -48,6 +48,7 @@ const EditCustomCollectionModal: FC = ({ const [editFirst, setEditFirst] = useState(!isAdd) const [paramsSchemas, setParamsSchemas] = useState(payload?.tools || []) + const [labels, setLabels] = useState(payload?.labels || []) const [customCollection, setCustomCollection, getCustomCollection] = useGetState(isAdd ? { provider: '', @@ -67,6 +68,15 @@ const EditCustomCollectionModal: FC = ({ const originalProvider = isEdit ? payload.provider : '' + // Sync customCollection state when payload changes + useEffect(() => { + if (isEdit) { + setCustomCollection(payload) + setParamsSchemas(payload.tools || []) + setLabels(payload.labels || []) + } + }, [isEdit, payload]) + const [showEmojiPicker, setShowEmojiPicker] = useState(false) const emoji = customCollection.icon const setEmoji = (emoji: Emoji) => { @@ -124,7 +134,6 @@ const EditCustomCollectionModal: FC = ({ const [currTool, setCurrTool] = useState(null) const [isShowTestApi, setIsShowTestApi] = useState(false) - const [labels, setLabels] = useState(payload?.labels || []) const handleLabelSelect = (value: string[]) => { setLabels(value) } diff --git a/web/app/components/tools/edit-custom-collection-modal/test-api.spec.tsx b/web/app/components/tools/edit-custom-collection-modal/test-api.spec.tsx new file mode 100644 index 0000000000..2df967684a --- /dev/null +++ b/web/app/components/tools/edit-custom-collection-modal/test-api.spec.tsx @@ -0,0 +1,87 @@ +import type { CustomCollectionBackend, CustomParamSchema } from '@/app/components/tools/types' +import { fireEvent, render, screen, waitFor } from '@testing-library/react' +import { AuthType } from '@/app/components/tools/types' +import I18n from '@/context/i18n' +import { testAPIAvailable } from '@/service/tools' +import TestApi from './test-api' + +vi.mock('@/service/tools', () => ({ + testAPIAvailable: vi.fn(), +})) +const testAPIAvailableMock = vi.mocked(testAPIAvailable) + +describe('TestApi', () => { + const customCollection: CustomCollectionBackend = { + provider: 'custom', + credentials: { + auth_type: AuthType.none, + }, + schema_type: 'openapi', + schema: '{ }', + icon: { background: '', content: '' }, + privacy_policy: '', + custom_disclaimer: '', + id: 'test-id', + labels: [], + } + const tool: CustomParamSchema = { + operation_id: 'testOp', + summary: 'summary', + method: 'GET', + server_url: 'https://api.example.com', + parameters: [{ + name: 'limit', + label: { + en_US: 'Limit', + zh_Hans: '限制', + }, + // eslint-disable-next-line ts/no-explicit-any + } as any], + } + + const renderTestApi = () => { + const providerValue = { + locale: 'en-US', + i18n: {}, + setLocaleOnClient: vi.fn(), + } + return render( + + + , + ) + } + + beforeEach(() => { + vi.clearAllMocks() + }) + + it('renders parameters and runs the API test', async () => { + testAPIAvailableMock.mockResolvedValueOnce({ result: 'ok' }) + renderTestApi() + + const parameterInput = screen.getAllByRole('textbox')[0] + fireEvent.change(parameterInput, { target: { value: '5' } }) + fireEvent.click(screen.getByRole('button', { name: 'tools.test.title' })) + + await waitFor(() => { + expect(testAPIAvailableMock).toHaveBeenCalledWith({ + provider_name: customCollection.provider, + tool_name: tool.operation_id, + credentials: { + auth_type: AuthType.none, + }, + schema_type: customCollection.schema_type, + schema: customCollection.schema, + parameters: { + limit: '5', + }, + }) + expect(screen.getByText('ok')).toBeInTheDocument() + }) + }) +}) diff --git a/web/app/components/tools/provider/detail.tsx b/web/app/components/tools/provider/detail.tsx index c4b65f353d..e0a2281696 100644 --- a/web/app/components/tools/provider/detail.tsx +++ b/web/app/components/tools/provider/detail.tsx @@ -100,9 +100,28 @@ const ProviderDetail = ({ const [isShowEditCollectionToolModal, setIsShowEditCustomCollectionModal] = useState(false) const [showConfirmDelete, setShowConfirmDelete] = useState(false) const [deleteAction, setDeleteAction] = useState('') + + const getCustomProvider = useCallback(async () => { + setIsDetailLoading(true) + const res = await fetchCustomCollection(collection.name) + if (res.credentials.auth_type === AuthType.apiKey && !res.credentials.api_key_header_prefix) { + if (res.credentials.api_key_value) + res.credentials.api_key_header_prefix = AuthHeaderPrefix.custom + } + setCustomCollection({ + ...res, + labels: collection.labels, + provider: collection.name, + }) + setIsDetailLoading(false) + }, [collection.labels, collection.name]) + const doUpdateCustomToolCollection = async (data: CustomCollectionBackend) => { await updateCustomCollection(data) onRefreshData() + await getCustomProvider() + // Use fresh data from form submission to avoid race condition with collection.labels + setCustomCollection(prev => prev ? { ...prev, labels: data.labels } : null) Toast.notify({ type: 'success', message: t('common.api.actionSuccess'), @@ -118,20 +137,6 @@ const ProviderDetail = ({ }) setIsShowEditCustomCollectionModal(false) } - const getCustomProvider = useCallback(async () => { - setIsDetailLoading(true) - const res = await fetchCustomCollection(collection.name) - if (res.credentials.auth_type === AuthType.apiKey && !res.credentials.api_key_header_prefix) { - if (res.credentials.api_key_value) - res.credentials.api_key_header_prefix = AuthHeaderPrefix.custom - } - setCustomCollection({ - ...res, - labels: collection.labels, - provider: collection.name, - }) - setIsDetailLoading(false) - }, [collection.labels, collection.name]) // workflow provider const [isShowEditWorkflowToolModal, setIsShowEditWorkflowToolModal] = useState(false) const getWorkflowToolProvider = useCallback(async () => { diff --git a/web/app/components/workflow-app/components/workflow-header/features-trigger.tsx b/web/app/components/workflow-app/components/workflow-header/features-trigger.tsx index 1df6f10195..13fc6a5ce0 100644 --- a/web/app/components/workflow-app/components/workflow-header/features-trigger.tsx +++ b/web/app/components/workflow-app/components/workflow-header/features-trigger.tsx @@ -188,8 +188,8 @@ const FeaturesTrigger = () => { {isChatMode && (