diff --git a/.claude/settings.json b/.claude/settings.json new file mode 100644 index 0000000000..7d42234cae --- /dev/null +++ b/.claude/settings.json @@ -0,0 +1,8 @@ +{ + "enabledPlugins": { + "feature-dev@claude-plugins-official": true, + "context7@claude-plugins-official": true, + "typescript-lsp@claude-plugins-official": true, + "pyright-lsp@claude-plugins-official": true + } +} diff --git a/.claude/settings.json.example b/.claude/settings.json.example deleted file mode 100644 index 1149895340..0000000000 --- a/.claude/settings.json.example +++ /dev/null @@ -1,19 +0,0 @@ -{ - "permissions": { - "allow": [], - "deny": [] - }, - "env": { - "__comment": "Environment variables for MCP servers. Override in .claude/settings.local.json with actual values.", - "GITHUB_PERSONAL_ACCESS_TOKEN": "ghp_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" - }, - "enabledMcpjsonServers": [ - "context7", - "sequential-thinking", - "github", - "fetch", - "playwright", - "ide" - ], - "enableAllProjectMcpServers": true - } \ No newline at end of file diff --git a/.claude/skills/component-refactoring/SKILL.md b/.claude/skills/component-refactoring/SKILL.md new file mode 100644 index 0000000000..7006c382c8 --- /dev/null +++ b/.claude/skills/component-refactoring/SKILL.md @@ -0,0 +1,483 @@ +--- +name: component-refactoring +description: Refactor high-complexity React components in Dify frontend. Use when `pnpm analyze-component --json` shows complexity > 50 or lineCount > 300, when the user asks for code splitting, hook extraction, or complexity reduction, or when `pnpm analyze-component` warns to refactor before testing; avoid for simple/well-structured components, third-party wrappers, or when the user explicitly wants testing without refactoring. +--- + +# Dify Component Refactoring Skill + +Refactor high-complexity React components in the Dify frontend codebase with the patterns and workflow below. + +> **Complexity Threshold**: Components with complexity > 50 (measured by `pnpm analyze-component`) should be refactored before testing. + +## Quick Reference + +### Commands (run from `web/`) + +Use paths relative to `web/` (e.g., `app/components/...`). +Use `refactor-component` for refactoring prompts and `analyze-component` for testing prompts and metrics. + +```bash +cd web + +# Generate refactoring prompt +pnpm refactor-component + +# Output refactoring analysis as JSON +pnpm refactor-component --json + +# Generate testing prompt (after refactoring) +pnpm analyze-component + +# Output testing analysis as JSON +pnpm analyze-component --json +``` + +### Complexity Analysis + +```bash +# Analyze component complexity +pnpm analyze-component --json + +# Key metrics to check: +# - complexity: normalized score 0-100 (target < 50) +# - maxComplexity: highest single function complexity +# - lineCount: total lines (target < 300) +``` + +### Complexity Score Interpretation + +| Score | Level | Action | +|-------|-------|--------| +| 0-25 | 🟢 Simple | Ready for testing | +| 26-50 | 🟔 Medium | Consider minor refactoring | +| 51-75 | 🟠 Complex | **Refactor before testing** | +| 76-100 | šŸ”“ Very Complex | **Must refactor** | + +## Core Refactoring Patterns + +### Pattern 1: Extract Custom Hooks + +**When**: Component has complex state management, multiple `useState`/`useEffect`, or business logic mixed with UI. + +**Dify Convention**: Place hooks in a `hooks/` subdirectory or alongside the component as `use-.ts`. + +```typescript +// āŒ Before: Complex state logic in component +const Configuration: FC = () => { + const [modelConfig, setModelConfig] = useState(...) + const [datasetConfigs, setDatasetConfigs] = useState(...) + const [completionParams, setCompletionParams] = useState({}) + + // 50+ lines of state management logic... + + return
...
+} + +// āœ… After: Extract to custom hook +// hooks/use-model-config.ts +export const useModelConfig = (appId: string) => { + const [modelConfig, setModelConfig] = useState(...) + const [completionParams, setCompletionParams] = useState({}) + + // Related state management logic here + + return { modelConfig, setModelConfig, completionParams, setCompletionParams } +} + +// Component becomes cleaner +const Configuration: FC = () => { + const { modelConfig, setModelConfig } = useModelConfig(appId) + return
...
+} +``` + +**Dify Examples**: +- `web/app/components/app/configuration/hooks/use-advanced-prompt-config.ts` +- `web/app/components/app/configuration/debug/hooks.tsx` +- `web/app/components/workflow/hooks/use-workflow.ts` + +### Pattern 2: Extract Sub-Components + +**When**: Single component has multiple UI sections, conditional rendering blocks, or repeated patterns. + +**Dify Convention**: Place sub-components in subdirectories or as separate files in the same directory. + +```typescript +// āŒ Before: Monolithic JSX with multiple sections +const AppInfo = () => { + return ( +
+ {/* 100 lines of header UI */} + {/* 100 lines of operations UI */} + {/* 100 lines of modals */} +
+ ) +} + +// āœ… After: Split into focused components +// app-info/ +// ā”œā”€ā”€ index.tsx (orchestration only) +// ā”œā”€ā”€ app-header.tsx (header UI) +// ā”œā”€ā”€ app-operations.tsx (operations UI) +// └── app-modals.tsx (modal management) + +const AppInfo = () => { + const { showModal, setShowModal } = useAppInfoModals() + + return ( +
+ + + setShowModal(null)} /> +
+ ) +} +``` + +**Dify Examples**: +- `web/app/components/app/configuration/` directory structure +- `web/app/components/workflow/nodes/` per-node organization + +### Pattern 3: Simplify Conditional Logic + +**When**: Deep nesting (> 3 levels), complex ternaries, or multiple `if/else` chains. + +```typescript +// āŒ Before: Deeply nested conditionals +const Template = useMemo(() => { + if (appDetail?.mode === AppModeEnum.CHAT) { + switch (locale) { + case LanguagesSupported[1]: + return + case LanguagesSupported[7]: + return + default: + return + } + } + if (appDetail?.mode === AppModeEnum.ADVANCED_CHAT) { + // Another 15 lines... + } + // More conditions... +}, [appDetail, locale]) + +// āœ… After: Use lookup tables + early returns +const TEMPLATE_MAP = { + [AppModeEnum.CHAT]: { + [LanguagesSupported[1]]: TemplateChatZh, + [LanguagesSupported[7]]: TemplateChatJa, + default: TemplateChatEn, + }, + [AppModeEnum.ADVANCED_CHAT]: { + [LanguagesSupported[1]]: TemplateAdvancedChatZh, + // ... + }, +} + +const Template = useMemo(() => { + const modeTemplates = TEMPLATE_MAP[appDetail?.mode] + if (!modeTemplates) return null + + const TemplateComponent = modeTemplates[locale] || modeTemplates.default + return +}, [appDetail, locale]) +``` + +### Pattern 4: Extract API/Data Logic + +**When**: Component directly handles API calls, data transformation, or complex async operations. + +**Dify Convention**: Use `@tanstack/react-query` hooks from `web/service/use-*.ts` or create custom data hooks. + +```typescript +// āŒ Before: API logic in component +const MCPServiceCard = () => { + const [basicAppConfig, setBasicAppConfig] = useState({}) + + useEffect(() => { + if (isBasicApp && appId) { + (async () => { + const res = await fetchAppDetail({ url: '/apps', id: appId }) + setBasicAppConfig(res?.model_config || {}) + })() + } + }, [appId, isBasicApp]) + + // More API-related logic... +} + +// āœ… After: Extract to data hook using React Query +// use-app-config.ts +import { useQuery } from '@tanstack/react-query' +import { get } from '@/service/base' + +const NAME_SPACE = 'appConfig' + +export const useAppConfig = (appId: string, isBasicApp: boolean) => { + return useQuery({ + enabled: isBasicApp && !!appId, + queryKey: [NAME_SPACE, 'detail', appId], + queryFn: () => get(`/apps/${appId}`), + select: data => data?.model_config || {}, + }) +} + +// Component becomes cleaner +const MCPServiceCard = () => { + const { data: config, isLoading } = useAppConfig(appId, isBasicApp) + // UI only +} +``` + +**React Query Best Practices in Dify**: +- Define `NAME_SPACE` for query key organization +- Use `enabled` option for conditional fetching +- Use `select` for data transformation +- Export invalidation hooks: `useInvalidXxx` + +**Dify Examples**: +- `web/service/use-workflow.ts` +- `web/service/use-common.ts` +- `web/service/knowledge/use-dataset.ts` +- `web/service/knowledge/use-document.ts` + +### Pattern 5: Extract Modal/Dialog Management + +**When**: Component manages multiple modals with complex open/close states. + +**Dify Convention**: Modals should be extracted with their state management. + +```typescript +// āŒ Before: Multiple modal states in component +const AppInfo = () => { + const [showEditModal, setShowEditModal] = useState(false) + const [showDuplicateModal, setShowDuplicateModal] = useState(false) + const [showConfirmDelete, setShowConfirmDelete] = useState(false) + const [showSwitchModal, setShowSwitchModal] = useState(false) + const [showImportDSLModal, setShowImportDSLModal] = useState(false) + // 5+ more modal states... +} + +// āœ… After: Extract to modal management hook +type ModalType = 'edit' | 'duplicate' | 'delete' | 'switch' | 'import' | null + +const useAppInfoModals = () => { + const [activeModal, setActiveModal] = useState(null) + + const openModal = useCallback((type: ModalType) => setActiveModal(type), []) + const closeModal = useCallback(() => setActiveModal(null), []) + + return { + activeModal, + openModal, + closeModal, + isOpen: (type: ModalType) => activeModal === type, + } +} +``` + +### Pattern 6: Extract Form Logic + +**When**: Complex form validation, submission handling, or field transformation. + +**Dify Convention**: Use `@tanstack/react-form` patterns from `web/app/components/base/form/`. + +```typescript +// āœ… Use existing form infrastructure +import { useAppForm } from '@/app/components/base/form' + +const ConfigForm = () => { + const form = useAppForm({ + defaultValues: { name: '', description: '' }, + onSubmit: handleSubmit, + }) + + return ... +} +``` + +## Dify-Specific Refactoring Guidelines + +### 1. Context Provider Extraction + +**When**: Component provides complex context values with multiple states. + +```typescript +// āŒ Before: Large context value object +const value = { + appId, isAPIKeySet, isTrailFinished, mode, modelModeType, + promptMode, isAdvancedMode, isAgent, isOpenAI, isFunctionCall, + // 50+ more properties... +} +return ... + +// āœ… After: Split into domain-specific contexts + + + + {children} + + + +``` + +**Dify Reference**: `web/context/` directory structure + +### 2. Workflow Node Components + +**When**: Refactoring workflow node components (`web/app/components/workflow/nodes/`). + +**Conventions**: +- Keep node logic in `use-interactions.ts` +- Extract panel UI to separate files +- Use `_base` components for common patterns + +``` +nodes// + ā”œā”€ā”€ index.tsx # Node registration + ā”œā”€ā”€ node.tsx # Node visual component + ā”œā”€ā”€ panel.tsx # Configuration panel + ā”œā”€ā”€ use-interactions.ts # Node-specific hooks + └── types.ts # Type definitions +``` + +### 3. Configuration Components + +**When**: Refactoring app configuration components. + +**Conventions**: +- Separate config sections into subdirectories +- Use existing patterns from `web/app/components/app/configuration/` +- Keep feature toggles in dedicated components + +### 4. Tool/Plugin Components + +**When**: Refactoring tool-related components (`web/app/components/tools/`). + +**Conventions**: +- Follow existing modal patterns +- Use service hooks from `web/service/use-tools.ts` +- Keep provider-specific logic isolated + +## Refactoring Workflow + +### Step 1: Generate Refactoring Prompt + +```bash +pnpm refactor-component +``` + +This command will: +- Analyze component complexity and features +- Identify specific refactoring actions needed +- Generate a prompt for AI assistant (auto-copied to clipboard on macOS) +- Provide detailed requirements based on detected patterns + +### Step 2: Analyze Details + +```bash +pnpm analyze-component --json +``` + +Identify: +- Total complexity score +- Max function complexity +- Line count +- Features detected (state, effects, API, etc.) + +### Step 3: Plan + +Create a refactoring plan based on detected features: + +| Detected Feature | Refactoring Action | +|------------------|-------------------| +| `hasState: true` + `hasEffects: true` | Extract custom hook | +| `hasAPI: true` | Extract data/service hook | +| `hasEvents: true` (many) | Extract event handlers | +| `lineCount > 300` | Split into sub-components | +| `maxComplexity > 50` | Simplify conditional logic | + +### Step 4: Execute Incrementally + +1. **Extract one piece at a time** +2. **Run lint, type-check, and tests after each extraction** +3. **Verify functionality before next step** + +``` +For each extraction: + ā”Œā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā” + │ 1. Extract code │ + │ 2. Run: pnpm lint:fix │ + │ 3. Run: pnpm type-check:tsgo │ + │ 4. Run: pnpm test │ + │ 5. Test functionality manually │ + │ 6. PASS? → Next extraction │ + │ FAIL? → Fix before continuing │ + ā””ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”˜ +``` + +### Step 5: Verify + +After refactoring: + +```bash +# Re-run refactor command to verify improvements +pnpm refactor-component + +# If complexity < 25 and lines < 200, you'll see: +# āœ… COMPONENT IS WELL-STRUCTURED + +# For detailed metrics: +pnpm analyze-component --json + +# Target metrics: +# - complexity < 50 +# - lineCount < 300 +# - maxComplexity < 30 +``` + +## Common Mistakes to Avoid + +### āŒ Over-Engineering + +```typescript +// āŒ Too many tiny hooks +const useButtonText = () => useState('Click') +const useButtonDisabled = () => useState(false) +const useButtonLoading = () => useState(false) + +// āœ… Cohesive hook with related state +const useButtonState = () => { + const [text, setText] = useState('Click') + const [disabled, setDisabled] = useState(false) + const [loading, setLoading] = useState(false) + return { text, setText, disabled, setDisabled, loading, setLoading } +} +``` + +### āŒ Breaking Existing Patterns + +- Follow existing directory structures +- Maintain naming conventions +- Preserve export patterns for compatibility + +### āŒ Premature Abstraction + +- Only extract when there's clear complexity benefit +- Don't create abstractions for single-use code +- Keep refactored code in the same domain area + +## References + +### Dify Codebase Examples + +- **Hook extraction**: `web/app/components/app/configuration/hooks/` +- **Component splitting**: `web/app/components/app/configuration/` +- **Service hooks**: `web/service/use-*.ts` +- **Workflow patterns**: `web/app/components/workflow/hooks/` +- **Form patterns**: `web/app/components/base/form/` + +### Related Skills + +- `frontend-testing` - For testing refactored components +- `web/testing/testing.md` - Testing specification diff --git a/.claude/skills/component-refactoring/references/complexity-patterns.md b/.claude/skills/component-refactoring/references/complexity-patterns.md new file mode 100644 index 0000000000..5a0a268f38 --- /dev/null +++ b/.claude/skills/component-refactoring/references/complexity-patterns.md @@ -0,0 +1,493 @@ +# Complexity Reduction Patterns + +This document provides patterns for reducing cognitive complexity in Dify React components. + +## Understanding Complexity + +### SonarJS Cognitive Complexity + +The `pnpm analyze-component` tool uses SonarJS cognitive complexity metrics: + +- **Total Complexity**: Sum of all functions' complexity in the file +- **Max Complexity**: Highest single function complexity + +### What Increases Complexity + +| Pattern | Complexity Impact | +|---------|-------------------| +| `if/else` | +1 per branch | +| Nested conditions | +1 per nesting level | +| `switch/case` | +1 per case | +| `for/while/do` | +1 per loop | +| `&&`/`||` chains | +1 per operator | +| Nested callbacks | +1 per nesting level | +| `try/catch` | +1 per catch | +| Ternary expressions | +1 per nesting | + +## Pattern 1: Replace Conditionals with Lookup Tables + +**Before** (complexity: ~15): + +```typescript +const Template = useMemo(() => { + if (appDetail?.mode === AppModeEnum.CHAT) { + switch (locale) { + case LanguagesSupported[1]: + return + case LanguagesSupported[7]: + return + default: + return + } + } + if (appDetail?.mode === AppModeEnum.ADVANCED_CHAT) { + switch (locale) { + case LanguagesSupported[1]: + return + case LanguagesSupported[7]: + return + default: + return + } + } + if (appDetail?.mode === AppModeEnum.WORKFLOW) { + // Similar pattern... + } + return null +}, [appDetail, locale]) +``` + +**After** (complexity: ~3): + +```typescript +// Define lookup table outside component +const TEMPLATE_MAP: Record>> = { + [AppModeEnum.CHAT]: { + [LanguagesSupported[1]]: TemplateChatZh, + [LanguagesSupported[7]]: TemplateChatJa, + default: TemplateChatEn, + }, + [AppModeEnum.ADVANCED_CHAT]: { + [LanguagesSupported[1]]: TemplateAdvancedChatZh, + [LanguagesSupported[7]]: TemplateAdvancedChatJa, + default: TemplateAdvancedChatEn, + }, + [AppModeEnum.WORKFLOW]: { + [LanguagesSupported[1]]: TemplateWorkflowZh, + [LanguagesSupported[7]]: TemplateWorkflowJa, + default: TemplateWorkflowEn, + }, + // ... +} + +// Clean component logic +const Template = useMemo(() => { + if (!appDetail?.mode) return null + + const templates = TEMPLATE_MAP[appDetail.mode] + if (!templates) return null + + const TemplateComponent = templates[locale] ?? templates.default + return +}, [appDetail, locale]) +``` + +## Pattern 2: Use Early Returns + +**Before** (complexity: ~10): + +```typescript +const handleSubmit = () => { + if (isValid) { + if (hasChanges) { + if (isConnected) { + submitData() + } else { + showConnectionError() + } + } else { + showNoChangesMessage() + } + } else { + showValidationError() + } +} +``` + +**After** (complexity: ~4): + +```typescript +const handleSubmit = () => { + if (!isValid) { + showValidationError() + return + } + + if (!hasChanges) { + showNoChangesMessage() + return + } + + if (!isConnected) { + showConnectionError() + return + } + + submitData() +} +``` + +## Pattern 3: Extract Complex Conditions + +**Before** (complexity: high): + +```typescript +const canPublish = (() => { + if (mode !== AppModeEnum.COMPLETION) { + if (!isAdvancedMode) + return true + + if (modelModeType === ModelModeType.completion) { + if (!hasSetBlockStatus.history || !hasSetBlockStatus.query) + return false + return true + } + return true + } + return !promptEmpty +})() +``` + +**After** (complexity: lower): + +```typescript +// Extract to named functions +const canPublishInCompletionMode = () => !promptEmpty + +const canPublishInChatMode = () => { + if (!isAdvancedMode) return true + if (modelModeType !== ModelModeType.completion) return true + return hasSetBlockStatus.history && hasSetBlockStatus.query +} + +// Clean main logic +const canPublish = mode === AppModeEnum.COMPLETION + ? canPublishInCompletionMode() + : canPublishInChatMode() +``` + +## Pattern 4: Replace Chained Ternaries + +**Before** (complexity: ~5): + +```typescript +const statusText = serverActivated + ? t('status.running') + : serverPublished + ? t('status.inactive') + : appUnpublished + ? t('status.unpublished') + : t('status.notConfigured') +``` + +**After** (complexity: ~2): + +```typescript +const getStatusText = () => { + if (serverActivated) return t('status.running') + if (serverPublished) return t('status.inactive') + if (appUnpublished) return t('status.unpublished') + return t('status.notConfigured') +} + +const statusText = getStatusText() +``` + +Or use lookup: + +```typescript +const STATUS_TEXT_MAP = { + running: 'status.running', + inactive: 'status.inactive', + unpublished: 'status.unpublished', + notConfigured: 'status.notConfigured', +} as const + +const getStatusKey = (): keyof typeof STATUS_TEXT_MAP => { + if (serverActivated) return 'running' + if (serverPublished) return 'inactive' + if (appUnpublished) return 'unpublished' + return 'notConfigured' +} + +const statusText = t(STATUS_TEXT_MAP[getStatusKey()]) +``` + +## Pattern 5: Flatten Nested Loops + +**Before** (complexity: high): + +```typescript +const processData = (items: Item[]) => { + const results: ProcessedItem[] = [] + + for (const item of items) { + if (item.isValid) { + for (const child of item.children) { + if (child.isActive) { + for (const prop of child.properties) { + if (prop.value !== null) { + results.push({ + itemId: item.id, + childId: child.id, + propValue: prop.value, + }) + } + } + } + } + } + } + + return results +} +``` + +**After** (complexity: lower): + +```typescript +// Use functional approach +const processData = (items: Item[]) => { + return items + .filter(item => item.isValid) + .flatMap(item => + item.children + .filter(child => child.isActive) + .flatMap(child => + child.properties + .filter(prop => prop.value !== null) + .map(prop => ({ + itemId: item.id, + childId: child.id, + propValue: prop.value, + })) + ) + ) +} +``` + +## Pattern 6: Extract Event Handler Logic + +**Before** (complexity: high in component): + +```typescript +const Component = () => { + const handleSelect = (data: DataSet[]) => { + if (isEqual(data.map(item => item.id), dataSets.map(item => item.id))) { + hideSelectDataSet() + return + } + + formattingChangedDispatcher() + let newDatasets = data + if (data.find(item => !item.name)) { + const newSelected = produce(data, (draft) => { + data.forEach((item, index) => { + if (!item.name) { + const newItem = dataSets.find(i => i.id === item.id) + if (newItem) + draft[index] = newItem + } + }) + }) + setDataSets(newSelected) + newDatasets = newSelected + } + else { + setDataSets(data) + } + hideSelectDataSet() + + // 40 more lines of logic... + } + + return
...
+} +``` + +**After** (complexity: lower): + +```typescript +// Extract to hook or utility +const useDatasetSelection = (dataSets: DataSet[], setDataSets: SetState) => { + const normalizeSelection = (data: DataSet[]) => { + const hasUnloadedItem = data.some(item => !item.name) + if (!hasUnloadedItem) return data + + return produce(data, (draft) => { + data.forEach((item, index) => { + if (!item.name) { + const existing = dataSets.find(i => i.id === item.id) + if (existing) draft[index] = existing + } + }) + }) + } + + const hasSelectionChanged = (newData: DataSet[]) => { + return !isEqual( + newData.map(item => item.id), + dataSets.map(item => item.id) + ) + } + + return { normalizeSelection, hasSelectionChanged } +} + +// Component becomes cleaner +const Component = () => { + const { normalizeSelection, hasSelectionChanged } = useDatasetSelection(dataSets, setDataSets) + + const handleSelect = (data: DataSet[]) => { + if (!hasSelectionChanged(data)) { + hideSelectDataSet() + return + } + + formattingChangedDispatcher() + const normalized = normalizeSelection(data) + setDataSets(normalized) + hideSelectDataSet() + } + + return
...
+} +``` + +## Pattern 7: Reduce Boolean Logic Complexity + +**Before** (complexity: ~8): + +```typescript +const toggleDisabled = hasInsufficientPermissions + || appUnpublished + || missingStartNode + || triggerModeDisabled + || (isAdvancedApp && !currentWorkflow?.graph) + || (isBasicApp && !basicAppConfig.updated_at) +``` + +**After** (complexity: ~3): + +```typescript +// Extract meaningful boolean functions +const isAppReady = () => { + if (isAdvancedApp) return !!currentWorkflow?.graph + return !!basicAppConfig.updated_at +} + +const hasRequiredPermissions = () => { + return isCurrentWorkspaceEditor && !hasInsufficientPermissions +} + +const canToggle = () => { + if (!hasRequiredPermissions()) return false + if (!isAppReady()) return false + if (missingStartNode) return false + if (triggerModeDisabled) return false + return true +} + +const toggleDisabled = !canToggle() +``` + +## Pattern 8: Simplify useMemo/useCallback Dependencies + +**Before** (complexity: multiple recalculations): + +```typescript +const payload = useMemo(() => { + let parameters: Parameter[] = [] + let outputParameters: OutputParameter[] = [] + + if (!published) { + parameters = (inputs || []).map((item) => ({ + name: item.variable, + description: '', + form: 'llm', + required: item.required, + type: item.type, + })) + outputParameters = (outputs || []).map((item) => ({ + name: item.variable, + description: '', + type: item.value_type, + })) + } + else if (detail && detail.tool) { + parameters = (inputs || []).map((item) => ({ + // Complex transformation... + })) + outputParameters = (outputs || []).map((item) => ({ + // Complex transformation... + })) + } + + return { + icon: detail?.icon || icon, + label: detail?.label || name, + // ...more fields + } +}, [detail, published, workflowAppId, icon, name, description, inputs, outputs]) +``` + +**After** (complexity: separated concerns): + +```typescript +// Separate transformations +const useParameterTransform = (inputs: InputVar[], detail?: ToolDetail, published?: boolean) => { + return useMemo(() => { + if (!published) { + return inputs.map(item => ({ + name: item.variable, + description: '', + form: 'llm', + required: item.required, + type: item.type, + })) + } + + if (!detail?.tool) return [] + + return inputs.map(item => ({ + name: item.variable, + required: item.required, + type: item.type === 'paragraph' ? 'string' : item.type, + description: detail.tool.parameters.find(p => p.name === item.variable)?.llm_description || '', + form: detail.tool.parameters.find(p => p.name === item.variable)?.form || 'llm', + })) + }, [inputs, detail, published]) +} + +// Component uses hook +const parameters = useParameterTransform(inputs, detail, published) +const outputParameters = useOutputTransform(outputs, detail, published) + +const payload = useMemo(() => ({ + icon: detail?.icon || icon, + label: detail?.label || name, + parameters, + outputParameters, + // ... +}), [detail, icon, name, parameters, outputParameters]) +``` + +## Target Metrics After Refactoring + +| Metric | Target | +|--------|--------| +| Total Complexity | < 50 | +| Max Function Complexity | < 30 | +| Function Length | < 30 lines | +| Nesting Depth | ≤ 3 levels | +| Conditional Chains | ≤ 3 conditions | diff --git a/.claude/skills/component-refactoring/references/component-splitting.md b/.claude/skills/component-refactoring/references/component-splitting.md new file mode 100644 index 0000000000..78a3389100 --- /dev/null +++ b/.claude/skills/component-refactoring/references/component-splitting.md @@ -0,0 +1,477 @@ +# Component Splitting Patterns + +This document provides detailed guidance on splitting large components into smaller, focused components in Dify. + +## When to Split Components + +Split a component when you identify: + +1. **Multiple UI sections** - Distinct visual areas with minimal coupling that can be composed independently +1. **Conditional rendering blocks** - Large `{condition && }` blocks +1. **Repeated patterns** - Similar UI structures used multiple times +1. **300+ lines** - Component exceeds manageable size +1. **Modal clusters** - Multiple modals rendered in one component + +## Splitting Strategies + +### Strategy 1: Section-Based Splitting + +Identify visual sections and extract each as a component. + +```typescript +// āŒ Before: Monolithic component (500+ lines) +const ConfigurationPage = () => { + return ( +
+ {/* Header Section - 50 lines */} +
+

{t('configuration.title')}

+
+ {isAdvancedMode && Advanced} + + +
+
+ + {/* Config Section - 200 lines */} +
+ +
+ + {/* Debug Section - 150 lines */} +
+ +
+ + {/* Modals Section - 100 lines */} + {showSelectDataSet && } + {showHistoryModal && } + {showUseGPT4Confirm && } +
+ ) +} + +// āœ… After: Split into focused components +// configuration/ +// ā”œā”€ā”€ index.tsx (orchestration) +// ā”œā”€ā”€ configuration-header.tsx +// ā”œā”€ā”€ configuration-content.tsx +// ā”œā”€ā”€ configuration-debug.tsx +// └── configuration-modals.tsx + +// configuration-header.tsx +interface ConfigurationHeaderProps { + isAdvancedMode: boolean + onPublish: () => void +} + +const ConfigurationHeader: FC = ({ + isAdvancedMode, + onPublish, +}) => { + const { t } = useTranslation() + + return ( +
+

{t('configuration.title')}

+
+ {isAdvancedMode && Advanced} + + +
+
+ ) +} + +// index.tsx (orchestration only) +const ConfigurationPage = () => { + const { modelConfig, setModelConfig } = useModelConfig() + const { activeModal, openModal, closeModal } = useModalState() + + return ( +
+ + + {!isMobile && ( + + )} + +
+ ) +} +``` + +### Strategy 2: Conditional Block Extraction + +Extract large conditional rendering blocks. + +```typescript +// āŒ Before: Large conditional blocks +const AppInfo = () => { + return ( +
+ {expand ? ( +
+ {/* 100 lines of expanded view */} +
+ ) : ( +
+ {/* 50 lines of collapsed view */} +
+ )} +
+ ) +} + +// āœ… After: Separate view components +const AppInfoExpanded: FC = ({ appDetail, onAction }) => { + return ( +
+ {/* Clean, focused expanded view */} +
+ ) +} + +const AppInfoCollapsed: FC = ({ appDetail, onAction }) => { + return ( +
+ {/* Clean, focused collapsed view */} +
+ ) +} + +const AppInfo = () => { + return ( +
+ {expand + ? + : + } +
+ ) +} +``` + +### Strategy 3: Modal Extraction + +Extract modals with their trigger logic. + +```typescript +// āŒ Before: Multiple modals in one component +const AppInfo = () => { + const [showEdit, setShowEdit] = useState(false) + const [showDuplicate, setShowDuplicate] = useState(false) + const [showDelete, setShowDelete] = useState(false) + const [showSwitch, setShowSwitch] = useState(false) + + const onEdit = async (data) => { /* 20 lines */ } + const onDuplicate = async (data) => { /* 20 lines */ } + const onDelete = async () => { /* 15 lines */ } + + return ( +
+ {/* Main content */} + + {showEdit && setShowEdit(false)} />} + {showDuplicate && setShowDuplicate(false)} />} + {showDelete && setShowDelete(false)} />} + {showSwitch && } +
+ ) +} + +// āœ… After: Modal manager component +// app-info-modals.tsx +type ModalType = 'edit' | 'duplicate' | 'delete' | 'switch' | null + +interface AppInfoModalsProps { + appDetail: AppDetail + activeModal: ModalType + onClose: () => void + onSuccess: () => void +} + +const AppInfoModals: FC = ({ + appDetail, + activeModal, + onClose, + onSuccess, +}) => { + const handleEdit = async (data) => { /* logic */ } + const handleDuplicate = async (data) => { /* logic */ } + const handleDelete = async () => { /* logic */ } + + return ( + <> + {activeModal === 'edit' && ( + + )} + {activeModal === 'duplicate' && ( + + )} + {activeModal === 'delete' && ( + + )} + {activeModal === 'switch' && ( + + )} + + ) +} + +// Parent component +const AppInfo = () => { + const { activeModal, openModal, closeModal } = useModalState() + + return ( +
+ {/* Main content with openModal triggers */} + + + +
+ ) +} +``` + +### Strategy 4: List Item Extraction + +Extract repeated item rendering. + +```typescript +// āŒ Before: Inline item rendering +const OperationsList = () => { + return ( +
+ {operations.map(op => ( +
+ {op.icon} + {op.title} + {op.description} + + {op.badge && {op.badge}} + {/* More complex rendering... */} +
+ ))} +
+ ) +} + +// āœ… After: Extracted item component +interface OperationItemProps { + operation: Operation + onAction: (id: string) => void +} + +const OperationItem: FC = ({ operation, onAction }) => { + return ( +
+ {operation.icon} + {operation.title} + {operation.description} + + {operation.badge && {operation.badge}} +
+ ) +} + +const OperationsList = () => { + const handleAction = useCallback((id: string) => { + const op = operations.find(o => o.id === id) + op?.onClick() + }, [operations]) + + return ( +
+ {operations.map(op => ( + + ))} +
+ ) +} +``` + +## Directory Structure Patterns + +### Pattern A: Flat Structure (Simple Components) + +For components with 2-3 sub-components: + +``` +component-name/ + ā”œā”€ā”€ index.tsx # Main component + ā”œā”€ā”€ sub-component-a.tsx + ā”œā”€ā”€ sub-component-b.tsx + └── types.ts # Shared types +``` + +### Pattern B: Nested Structure (Complex Components) + +For components with many sub-components: + +``` +component-name/ + ā”œā”€ā”€ index.tsx # Main orchestration + ā”œā”€ā”€ types.ts # Shared types + ā”œā”€ā”€ hooks/ + │ ā”œā”€ā”€ use-feature-a.ts + │ └── use-feature-b.ts + ā”œā”€ā”€ components/ + │ ā”œā”€ā”€ header/ + │ │ └── index.tsx + │ ā”œā”€ā”€ content/ + │ │ └── index.tsx + │ └── modals/ + │ └── index.tsx + └── utils/ + └── helpers.ts +``` + +### Pattern C: Feature-Based Structure (Dify Standard) + +Following Dify's existing patterns: + +``` +configuration/ + ā”œā”€ā”€ index.tsx # Main page component + ā”œā”€ā”€ base/ # Base/shared components + │ ā”œā”€ā”€ feature-panel/ + │ ā”œā”€ā”€ group-name/ + │ └── operation-btn/ + ā”œā”€ā”€ config/ # Config section + │ ā”œā”€ā”€ index.tsx + │ ā”œā”€ā”€ agent/ + │ └── automatic/ + ā”œā”€ā”€ dataset-config/ # Dataset section + │ ā”œā”€ā”€ index.tsx + │ ā”œā”€ā”€ card-item/ + │ └── params-config/ + ā”œā”€ā”€ debug/ # Debug section + │ ā”œā”€ā”€ index.tsx + │ └── hooks.tsx + └── hooks/ # Shared hooks + └── use-advanced-prompt-config.ts +``` + +## Props Design + +### Minimal Props Principle + +Pass only what's needed: + +```typescript +// āŒ Bad: Passing entire objects when only some fields needed + + +// āœ… Good: Destructure to minimum required + +``` + +### Callback Props Pattern + +Use callbacks for child-to-parent communication: + +```typescript +// Parent +const Parent = () => { + const [value, setValue] = useState('') + + return ( + + ) +} + +// Child +interface ChildProps { + value: string + onChange: (value: string) => void + onSubmit: () => void +} + +const Child: FC = ({ value, onChange, onSubmit }) => { + return ( +
+ onChange(e.target.value)} /> + +
+ ) +} +``` + +### Render Props for Flexibility + +When sub-components need parent context: + +```typescript +interface ListProps { + items: T[] + renderItem: (item: T, index: number) => React.ReactNode + renderEmpty?: () => React.ReactNode +} + +function List({ items, renderItem, renderEmpty }: ListProps) { + if (items.length === 0 && renderEmpty) { + return <>{renderEmpty()} + } + + return ( +
+ {items.map((item, index) => renderItem(item, index))} +
+ ) +} + +// Usage + } + renderEmpty={() => } +/> +``` diff --git a/.claude/skills/component-refactoring/references/hook-extraction.md b/.claude/skills/component-refactoring/references/hook-extraction.md new file mode 100644 index 0000000000..a8d75deffd --- /dev/null +++ b/.claude/skills/component-refactoring/references/hook-extraction.md @@ -0,0 +1,317 @@ +# Hook Extraction Patterns + +This document provides detailed guidance on extracting custom hooks from complex components in Dify. + +## When to Extract Hooks + +Extract a custom hook when you identify: + +1. **Coupled state groups** - Multiple `useState` hooks that are always used together +1. **Complex effects** - `useEffect` with multiple dependencies or cleanup logic +1. **Business logic** - Data transformations, validations, or calculations +1. **Reusable patterns** - Logic that appears in multiple components + +## Extraction Process + +### Step 1: Identify State Groups + +Look for state variables that are logically related: + +```typescript +// āŒ These belong together - extract to hook +const [modelConfig, setModelConfig] = useState(...) +const [completionParams, setCompletionParams] = useState({}) +const [modelModeType, setModelModeType] = useState(...) + +// These are model-related state that should be in useModelConfig() +``` + +### Step 2: Identify Related Effects + +Find effects that modify the grouped state: + +```typescript +// āŒ These effects belong with the state above +useEffect(() => { + if (hasFetchedDetail && !modelModeType) { + const mode = currModel?.model_properties.mode + if (mode) { + const newModelConfig = produce(modelConfig, (draft) => { + draft.mode = mode + }) + setModelConfig(newModelConfig) + } + } +}, [textGenerationModelList, hasFetchedDetail, modelModeType, currModel]) +``` + +### Step 3: Create the Hook + +```typescript +// hooks/use-model-config.ts +import type { FormValue } from '@/app/components/header/account-setting/model-provider-page/declarations' +import type { ModelConfig } from '@/models/debug' +import { produce } from 'immer' +import { useEffect, useState } from 'react' +import { ModelModeType } from '@/types/app' + +interface UseModelConfigParams { + initialConfig?: Partial + currModel?: { model_properties?: { mode?: ModelModeType } } + hasFetchedDetail: boolean +} + +interface UseModelConfigReturn { + modelConfig: ModelConfig + setModelConfig: (config: ModelConfig) => void + completionParams: FormValue + setCompletionParams: (params: FormValue) => void + modelModeType: ModelModeType +} + +export const useModelConfig = ({ + initialConfig, + currModel, + hasFetchedDetail, +}: UseModelConfigParams): UseModelConfigReturn => { + const [modelConfig, setModelConfig] = useState({ + provider: 'langgenius/openai/openai', + model_id: 'gpt-3.5-turbo', + mode: ModelModeType.unset, + // ... default values + ...initialConfig, + }) + + const [completionParams, setCompletionParams] = useState({}) + + const modelModeType = modelConfig.mode + + // Fill old app data missing model mode + useEffect(() => { + if (hasFetchedDetail && !modelModeType) { + const mode = currModel?.model_properties?.mode + if (mode) { + setModelConfig(produce(modelConfig, (draft) => { + draft.mode = mode + })) + } + } + }, [hasFetchedDetail, modelModeType, currModel]) + + return { + modelConfig, + setModelConfig, + completionParams, + setCompletionParams, + modelModeType, + } +} +``` + +### Step 4: Update Component + +```typescript +// Before: 50+ lines of state management +const Configuration: FC = () => { + const [modelConfig, setModelConfig] = useState(...) + // ... lots of related state and effects +} + +// After: Clean component +const Configuration: FC = () => { + const { + modelConfig, + setModelConfig, + completionParams, + setCompletionParams, + modelModeType, + } = useModelConfig({ + currModel, + hasFetchedDetail, + }) + + // Component now focuses on UI +} +``` + +## Naming Conventions + +### Hook Names + +- Use `use` prefix: `useModelConfig`, `useDatasetConfig` +- Be specific: `useAdvancedPromptConfig` not `usePrompt` +- Include domain: `useWorkflowVariables`, `useMCPServer` + +### File Names + +- Kebab-case: `use-model-config.ts` +- Place in `hooks/` subdirectory when multiple hooks exist +- Place alongside component for single-use hooks + +### Return Type Names + +- Suffix with `Return`: `UseModelConfigReturn` +- Suffix params with `Params`: `UseModelConfigParams` + +## Common Hook Patterns in Dify + +### 1. Data Fetching Hook (React Query) + +```typescript +// Pattern: Use @tanstack/react-query for data fetching +import { useQuery, useQueryClient } from '@tanstack/react-query' +import { get } from '@/service/base' +import { useInvalid } from '@/service/use-base' + +const NAME_SPACE = 'appConfig' + +// Query keys for cache management +export const appConfigQueryKeys = { + detail: (appId: string) => [NAME_SPACE, 'detail', appId] as const, +} + +// Main data hook +export const useAppConfig = (appId: string) => { + return useQuery({ + enabled: !!appId, + queryKey: appConfigQueryKeys.detail(appId), + queryFn: () => get(`/apps/${appId}`), + select: data => data?.model_config || null, + }) +} + +// Invalidation hook for refreshing data +export const useInvalidAppConfig = () => { + return useInvalid([NAME_SPACE]) +} + +// Usage in component +const Component = () => { + const { data: config, isLoading, error, refetch } = useAppConfig(appId) + const invalidAppConfig = useInvalidAppConfig() + + const handleRefresh = () => { + invalidAppConfig() // Invalidates cache and triggers refetch + } + + return
...
+} +``` + +### 2. Form State Hook + +```typescript +// Pattern: Form state + validation + submission +export const useConfigForm = (initialValues: ConfigFormValues) => { + const [values, setValues] = useState(initialValues) + const [errors, setErrors] = useState>({}) + const [isSubmitting, setIsSubmitting] = useState(false) + + const validate = useCallback(() => { + const newErrors: Record = {} + if (!values.name) newErrors.name = 'Name is required' + setErrors(newErrors) + return Object.keys(newErrors).length === 0 + }, [values]) + + const handleChange = useCallback((field: string, value: any) => { + setValues(prev => ({ ...prev, [field]: value })) + }, []) + + const handleSubmit = useCallback(async (onSubmit: (values: ConfigFormValues) => Promise) => { + if (!validate()) return + setIsSubmitting(true) + try { + await onSubmit(values) + } finally { + setIsSubmitting(false) + } + }, [values, validate]) + + return { values, errors, isSubmitting, handleChange, handleSubmit } +} +``` + +### 3. Modal State Hook + +```typescript +// Pattern: Multiple modal management +type ModalType = 'edit' | 'delete' | 'duplicate' | null + +export const useModalState = () => { + const [activeModal, setActiveModal] = useState(null) + const [modalData, setModalData] = useState(null) + + const openModal = useCallback((type: ModalType, data?: any) => { + setActiveModal(type) + setModalData(data) + }, []) + + const closeModal = useCallback(() => { + setActiveModal(null) + setModalData(null) + }, []) + + return { + activeModal, + modalData, + openModal, + closeModal, + isOpen: useCallback((type: ModalType) => activeModal === type, [activeModal]), + } +} +``` + +### 4. Toggle/Boolean Hook + +```typescript +// Pattern: Boolean state with convenience methods +export const useToggle = (initialValue = false) => { + const [value, setValue] = useState(initialValue) + + const toggle = useCallback(() => setValue(v => !v), []) + const setTrue = useCallback(() => setValue(true), []) + const setFalse = useCallback(() => setValue(false), []) + + return [value, { toggle, setTrue, setFalse, set: setValue }] as const +} + +// Usage +const [isExpanded, { toggle, setTrue: expand, setFalse: collapse }] = useToggle() +``` + +## Testing Extracted Hooks + +After extraction, test hooks in isolation: + +```typescript +// use-model-config.spec.ts +import { renderHook, act } from '@testing-library/react' +import { useModelConfig } from './use-model-config' + +describe('useModelConfig', () => { + it('should initialize with default values', () => { + const { result } = renderHook(() => useModelConfig({ + hasFetchedDetail: false, + })) + + expect(result.current.modelConfig.provider).toBe('langgenius/openai/openai') + expect(result.current.modelModeType).toBe(ModelModeType.unset) + }) + + it('should update model config', () => { + const { result } = renderHook(() => useModelConfig({ + hasFetchedDetail: true, + })) + + act(() => { + result.current.setModelConfig({ + ...result.current.modelConfig, + model_id: 'gpt-4', + }) + }) + + expect(result.current.modelConfig.model_id).toBe('gpt-4') + }) +}) +``` diff --git a/.claude/skills/frontend-testing/SKILL.md b/.claude/skills/frontend-testing/SKILL.md index 7475513ba0..dd9677a78e 100644 --- a/.claude/skills/frontend-testing/SKILL.md +++ b/.claude/skills/frontend-testing/SKILL.md @@ -49,10 +49,10 @@ pnpm test pnpm test:watch # Run specific file -pnpm test -- path/to/file.spec.tsx +pnpm test path/to/file.spec.tsx # Generate coverage report -pnpm test -- --coverage +pnpm test:coverage # Analyze component complexity pnpm analyze-component @@ -155,7 +155,7 @@ describe('ComponentName', () => { For each file: ā”Œā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā” │ 1. Write test │ - │ 2. Run: pnpm test -- .spec.tsx │ + │ 2. Run: pnpm test .spec.tsx │ │ 3. PASS? → Mark complete, next file │ │ FAIL? → Fix first, then continue │ ā””ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”˜ @@ -318,5 +318,5 @@ For more detailed information, refer to: - `web/vitest.config.ts` - Vitest configuration - `web/vitest.setup.ts` - Test environment setup -- `web/testing/analyze-component.js` - Component analysis tool +- `web/scripts/analyze-component.js` - Component analysis tool - Modules are not mocked automatically. Global mocks live in `web/vitest.setup.ts` (for example `react-i18next`, `next/image`); mock other modules like `ky` or `mime` locally in test files. diff --git a/.claude/skills/frontend-testing/assets/component-test.template.tsx b/.claude/skills/frontend-testing/assets/component-test.template.tsx index 92dd797c83..c39baff916 100644 --- a/.claude/skills/frontend-testing/assets/component-test.template.tsx +++ b/.claude/skills/frontend-testing/assets/component-test.template.tsx @@ -198,7 +198,7 @@ describe('ComponentName', () => { }) // -------------------------------------------------------------------------- - // Async Operations (if component fetches data - useSWR, useQuery, fetch) + // Async Operations (if component fetches data - useQuery, fetch) // -------------------------------------------------------------------------- // WHY: Async operations have 3 states users experience: loading, success, error describe('Async Operations', () => { diff --git a/.claude/skills/frontend-testing/references/checklist.md b/.claude/skills/frontend-testing/references/checklist.md index aad80b120e..1ff2b27bbb 100644 --- a/.claude/skills/frontend-testing/references/checklist.md +++ b/.claude/skills/frontend-testing/references/checklist.md @@ -114,15 +114,15 @@ For the current file being tested: **Run these checks after EACH test file, not just at the end:** -- [ ] Run `pnpm test -- path/to/file.spec.tsx` - **MUST PASS before next file** +- [ ] Run `pnpm test path/to/file.spec.tsx` - **MUST PASS before next file** - [ ] Fix any failures immediately - [ ] Mark file as complete in todo list - [ ] Only then proceed to next file ### After All Files Complete -- [ ] Run full directory test: `pnpm test -- path/to/directory/` -- [ ] Check coverage report: `pnpm test -- --coverage` +- [ ] Run full directory test: `pnpm test path/to/directory/` +- [ ] Check coverage report: `pnpm test:coverage` - [ ] Run `pnpm lint:fix` on all test files - [ ] Run `pnpm type-check:tsgo` @@ -186,16 +186,16 @@ Always test these scenarios: ```bash # Run specific test -pnpm test -- path/to/file.spec.tsx +pnpm test path/to/file.spec.tsx # Run with coverage -pnpm test -- --coverage path/to/file.spec.tsx +pnpm test:coverage path/to/file.spec.tsx # Watch mode -pnpm test:watch -- path/to/file.spec.tsx +pnpm test:watch path/to/file.spec.tsx # Update snapshots (use sparingly) -pnpm test -- -u path/to/file.spec.tsx +pnpm test -u path/to/file.spec.tsx # Analyze component pnpm analyze-component path/to/component.tsx diff --git a/.claude/skills/frontend-testing/references/mocking.md b/.claude/skills/frontend-testing/references/mocking.md index 51920ebc64..23889c8d3d 100644 --- a/.claude/skills/frontend-testing/references/mocking.md +++ b/.claude/skills/frontend-testing/references/mocking.md @@ -242,32 +242,9 @@ describe('Component with Context', () => { }) ``` -### 7. SWR / React Query +### 7. React Query ```typescript -// SWR -vi.mock('swr', () => ({ - __esModule: true, - default: vi.fn(), -})) - -import useSWR from 'swr' -const mockedUseSWR = vi.mocked(useSWR) - -describe('Component with SWR', () => { - it('should show loading state', () => { - mockedUseSWR.mockReturnValue({ - data: undefined, - error: undefined, - isLoading: true, - }) - - render() - expect(screen.getByText(/loading/i)).toBeInTheDocument() - }) -}) - -// React Query import { QueryClient, QueryClientProvider } from '@tanstack/react-query' const createTestQueryClient = () => new QueryClient({ diff --git a/.claude/skills/frontend-testing/references/workflow.md b/.claude/skills/frontend-testing/references/workflow.md index b0f2994bde..009c3e013b 100644 --- a/.claude/skills/frontend-testing/references/workflow.md +++ b/.claude/skills/frontend-testing/references/workflow.md @@ -35,7 +35,7 @@ When testing a **single component, hook, or utility**: 2. Run `pnpm analyze-component ` (if available) 3. Check complexity score and features detected 4. Write the test file -5. Run test: `pnpm test -- .spec.tsx` +5. Run test: `pnpm test .spec.tsx` 6. Fix any failures 7. Verify coverage meets goals (100% function, >95% branch) ``` @@ -80,7 +80,7 @@ Process files in this recommended order: ``` ā”Œā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā” │ 1. Write test file │ -│ 2. Run: pnpm test -- .spec.tsx │ +│ 2. Run: pnpm test .spec.tsx │ │ 3. If FAIL → Fix immediately, re-run │ │ 4. If PASS → Mark complete in todo list │ │ 5. ONLY THEN proceed to next file │ @@ -95,10 +95,10 @@ After all individual tests pass: ```bash # Run all tests in the directory together -pnpm test -- path/to/directory/ +pnpm test path/to/directory/ # Check coverage -pnpm test -- --coverage path/to/directory/ +pnpm test:coverage path/to/directory/ ``` ## Component Complexity Guidelines @@ -201,9 +201,9 @@ Run pnpm test ← Multiple failures, hard to debug ``` # GOOD: Incremental with verification Write component-a.spec.tsx -Run pnpm test -- component-a.spec.tsx āœ… +Run pnpm test component-a.spec.tsx āœ… Write component-b.spec.tsx -Run pnpm test -- component-b.spec.tsx āœ… +Run pnpm test component-b.spec.tsx āœ… ...continue... ``` diff --git a/.github/workflows/api-tests.yml b/.github/workflows/api-tests.yml index 76cbf64fca..152a9caee8 100644 --- a/.github/workflows/api-tests.yml +++ b/.github/workflows/api-tests.yml @@ -22,12 +22,12 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: persist-credentials: false - name: Setup UV and Python - uses: astral-sh/setup-uv@v6 + uses: astral-sh/setup-uv@v7 with: enable-cache: true python-version: ${{ matrix.python-version }} @@ -57,7 +57,7 @@ jobs: run: sh .github/workflows/expose_service_ports.sh - name: Set up Sandbox - uses: hoverkraft-tech/compose-action@v2.0.2 + uses: hoverkraft-tech/compose-action@v2 with: compose-file: | docker/docker-compose.middleware.yaml diff --git a/.github/workflows/autofix.yml b/.github/workflows/autofix.yml index dbced47988..5413f83c27 100644 --- a/.github/workflows/autofix.yml +++ b/.github/workflows/autofix.yml @@ -12,12 +12,28 @@ jobs: if: github.repository == 'langgenius/dify' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 + + - name: Check Docker Compose inputs + id: docker-compose-changes + uses: tj-actions/changed-files@v46 + with: + files: | + docker/generate_docker_compose + docker/.env.example + docker/docker-compose-template.yaml + docker/docker-compose.yaml - uses: actions/setup-python@v5 with: python-version: "3.11" - - uses: astral-sh/setup-uv@v6 + - uses: astral-sh/setup-uv@v7 + + - name: Generate Docker Compose + if: steps.docker-compose-changes.outputs.any_changed == 'true' + run: | + cd docker + ./generate_docker_compose - run: | cd api diff --git a/.github/workflows/build-push.yml b/.github/workflows/build-push.yml index f7f464a601..bbf89236de 100644 --- a/.github/workflows/build-push.yml +++ b/.github/workflows/build-push.yml @@ -90,7 +90,7 @@ jobs: touch "/tmp/digests/${sanitized_digest}" - name: Upload digest - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 with: name: digests-${{ matrix.context }}-${{ env.PLATFORM_PAIR }} path: /tmp/digests/* diff --git a/.github/workflows/db-migration-test.yml b/.github/workflows/db-migration-test.yml index 101d973466..e20cf9850b 100644 --- a/.github/workflows/db-migration-test.yml +++ b/.github/workflows/db-migration-test.yml @@ -13,13 +13,13 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 0 persist-credentials: false - name: Setup UV and Python - uses: astral-sh/setup-uv@v6 + uses: astral-sh/setup-uv@v7 with: enable-cache: true python-version: "3.12" @@ -63,13 +63,13 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 0 persist-credentials: false - name: Setup UV and Python - uses: astral-sh/setup-uv@v6 + uses: astral-sh/setup-uv@v7 with: enable-cache: true python-version: "3.12" diff --git a/.github/workflows/main-ci.yml b/.github/workflows/main-ci.yml index 876ec23a3d..d6653de950 100644 --- a/.github/workflows/main-ci.yml +++ b/.github/workflows/main-ci.yml @@ -27,7 +27,7 @@ jobs: vdb-changed: ${{ steps.changes.outputs.vdb }} migration-changed: ${{ steps.changes.outputs.migration }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 - uses: dorny/paths-filter@v3 id: changes with: @@ -38,6 +38,7 @@ jobs: - '.github/workflows/api-tests.yml' web: - 'web/**' + - '.github/workflows/web-tests.yml' vdb: - 'api/core/rag/datasource/**' - 'docker/**' diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml index 2fb8121f74..d463349686 100644 --- a/.github/workflows/style.yml +++ b/.github/workflows/style.yml @@ -19,13 +19,13 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: persist-credentials: false - name: Check changed files id: changed-files - uses: tj-actions/changed-files@v46 + uses: tj-actions/changed-files@v47 with: files: | api/** @@ -33,7 +33,7 @@ jobs: - name: Setup UV and Python if: steps.changed-files.outputs.any_changed == 'true' - uses: astral-sh/setup-uv@v6 + uses: astral-sh/setup-uv@v7 with: enable-cache: false python-version: "3.12" @@ -68,15 +68,17 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: persist-credentials: false - name: Check changed files id: changed-files - uses: tj-actions/changed-files@v46 + uses: tj-actions/changed-files@v47 with: - files: web/** + files: | + web/** + .github/workflows/style.yml - name: Install pnpm uses: pnpm/action-setup@v4 @@ -85,7 +87,7 @@ jobs: run_install: false - name: Setup NodeJS - uses: actions/setup-node@v4 + uses: actions/setup-node@v6 if: steps.changed-files.outputs.any_changed == 'true' with: node-version: 22 @@ -108,50 +110,20 @@ jobs: working-directory: ./web run: pnpm run type-check:tsgo - docker-compose-template: - name: Docker Compose Template - runs-on: ubuntu-latest - - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - persist-credentials: false - - - name: Check changed files - id: changed-files - uses: tj-actions/changed-files@v46 - with: - files: | - docker/generate_docker_compose - docker/.env.example - docker/docker-compose-template.yaml - docker/docker-compose.yaml - - - name: Generate Docker Compose - if: steps.changed-files.outputs.any_changed == 'true' - run: | - cd docker - ./generate_docker_compose - - - name: Check for changes - if: steps.changed-files.outputs.any_changed == 'true' - run: git diff --exit-code - superlinter: name: SuperLinter runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 0 persist-credentials: false - name: Check changed files id: changed-files - uses: tj-actions/changed-files@v46 + uses: tj-actions/changed-files@v47 with: files: | **.sh diff --git a/.github/workflows/tool-test-sdks.yaml b/.github/workflows/tool-test-sdks.yaml index b1ccd7417a..0259ef2232 100644 --- a/.github/workflows/tool-test-sdks.yaml +++ b/.github/workflows/tool-test-sdks.yaml @@ -25,12 +25,12 @@ jobs: working-directory: sdks/nodejs-client steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 with: persist-credentials: false - name: Use Node.js ${{ matrix.node-version }} - uses: actions/setup-node@v4 + uses: actions/setup-node@v6 with: node-version: ${{ matrix.node-version }} cache: '' diff --git a/.github/workflows/translate-i18n-base-on-english.yml b/.github/workflows/translate-i18n-base-on-english.yml index 8bb82d5d44..a51350f630 100644 --- a/.github/workflows/translate-i18n-base-on-english.yml +++ b/.github/workflows/translate-i18n-base-on-english.yml @@ -1,10 +1,10 @@ -name: Check i18n Files and Create PR +name: Translate i18n Files Based on English on: push: branches: [main] paths: - - 'web/i18n/en-US/*.ts' + - 'web/i18n/en-US/*.json' permissions: contents: write @@ -18,7 +18,7 @@ jobs: run: working-directory: web steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 with: fetch-depth: 0 token: ${{ secrets.GITHUB_TOKEN }} @@ -28,13 +28,13 @@ jobs: run: | git fetch origin "${{ github.event.before }}" || true git fetch origin "${{ github.sha }}" || true - changed_files=$(git diff --name-only "${{ github.event.before }}" "${{ github.sha }}" -- 'i18n/en-US/*.ts') + changed_files=$(git diff --name-only "${{ github.event.before }}" "${{ github.sha }}" -- 'i18n/en-US/*.json') echo "Changed files: $changed_files" if [ -n "$changed_files" ]; then echo "FILES_CHANGED=true" >> $GITHUB_ENV file_args="" for file in $changed_files; do - filename=$(basename "$file" .ts) + filename=$(basename "$file" .json) file_args="$file_args --file $filename" done echo "FILE_ARGS=$file_args" >> $GITHUB_ENV @@ -51,7 +51,7 @@ jobs: - name: Set up Node.js if: env.FILES_CHANGED == 'true' - uses: actions/setup-node@v4 + uses: actions/setup-node@v6 with: node-version: 'lts/*' cache: pnpm @@ -65,12 +65,7 @@ jobs: - name: Generate i18n translations if: env.FILES_CHANGED == 'true' working-directory: ./web - run: pnpm run auto-gen-i18n ${{ env.FILE_ARGS }} - - - name: Generate i18n type definitions - if: env.FILES_CHANGED == 'true' - working-directory: ./web - run: pnpm run gen:i18n-types + run: pnpm run i18n:gen ${{ env.FILE_ARGS }} - name: Create Pull Request if: env.FILES_CHANGED == 'true' @@ -78,14 +73,13 @@ jobs: with: token: ${{ secrets.GITHUB_TOKEN }} commit-message: 'chore(i18n): update translations based on en-US changes' - title: 'chore(i18n): translate i18n files and update type definitions' + title: 'chore(i18n): translate i18n files based on en-US changes' body: | - This PR was automatically created to update i18n files and TypeScript type definitions based on changes in en-US locale. + This PR was automatically created to update i18n translation files based on changes in en-US locale. **Triggered by:** ${{ github.sha }} **Changes included:** - Updated translation files for all locales - - Regenerated TypeScript type definitions for type safety branch: chore/automated-i18n-updates-${{ github.sha }} delete-branch: true diff --git a/.github/workflows/vdb-tests.yml b/.github/workflows/vdb-tests.yml index 291171e5c7..7735afdaca 100644 --- a/.github/workflows/vdb-tests.yml +++ b/.github/workflows/vdb-tests.yml @@ -19,19 +19,19 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: persist-credentials: false - name: Free Disk Space - uses: endersonmenezes/free-disk-space@v2 + uses: endersonmenezes/free-disk-space@v3 with: remove_dotnet: true remove_haskell: true remove_tool_cache: true - name: Setup UV and Python - uses: astral-sh/setup-uv@v6 + uses: astral-sh/setup-uv@v7 with: enable-cache: true python-version: ${{ matrix.python-version }} diff --git a/.github/workflows/web-tests.yml b/.github/workflows/web-tests.yml index 8eba0f084b..0fd1d5d22b 100644 --- a/.github/workflows/web-tests.yml +++ b/.github/workflows/web-tests.yml @@ -18,7 +18,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: persist-credentials: false @@ -29,7 +29,7 @@ jobs: run_install: false - name: Setup Node.js - uses: actions/setup-node@v4 + uses: actions/setup-node@v6 with: node-version: 22 cache: pnpm @@ -38,11 +38,8 @@ jobs: - name: Install dependencies run: pnpm install --frozen-lockfile - - name: Check i18n types synchronization - run: pnpm run check:i18n-types - - name: Run tests - run: pnpm test --coverage + run: pnpm test:coverage - name: Coverage Summary if: always() @@ -363,7 +360,7 @@ jobs: - name: Upload Coverage Artifact if: steps.coverage-summary.outputs.has_coverage == 'true' - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 with: name: web-coverage-report path: web/coverage diff --git a/.gitignore b/.gitignore index 48fd921162..a2a6089a66 100644 --- a/.gitignore +++ b/.gitignore @@ -195,6 +195,7 @@ docker/nginx/ssl/* !docker/nginx/ssl/.gitkeep docker/middleware.env docker/docker-compose.override.yaml +docker/env-backup/* sdks/python-client/build sdks/python-client/dist diff --git a/.mcp.json b/.mcp.json deleted file mode 100644 index 8eceaf9ead..0000000000 --- a/.mcp.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "mcpServers": { - "context7": { - "type": "http", - "url": "https://mcp.context7.com/mcp" - }, - "sequential-thinking": { - "type": "stdio", - "command": "npx", - "args": ["-y", "@modelcontextprotocol/server-sequential-thinking"], - "env": {} - }, - "github": { - "type": "stdio", - "command": "npx", - "args": ["-y", "@modelcontextprotocol/server-github"], - "env": { - "GITHUB_PERSONAL_ACCESS_TOKEN": "${GITHUB_PERSONAL_ACCESS_TOKEN}" - } - }, - "fetch": { - "type": "stdio", - "command": "uvx", - "args": ["mcp-server-fetch"], - "env": {} - }, - "playwright": { - "type": "stdio", - "command": "npx", - "args": ["-y", "@playwright/mcp@latest"], - "env": {} - } - } - } \ No newline at end of file diff --git a/api/.env.example b/api/.env.example index 9cbb111d31..99cd2ba558 100644 --- a/api/.env.example +++ b/api/.env.example @@ -128,6 +128,7 @@ TENCENT_COS_SECRET_KEY=your-secret-key TENCENT_COS_SECRET_ID=your-secret-id TENCENT_COS_REGION=your-region TENCENT_COS_SCHEME=your-scheme +TENCENT_COS_CUSTOM_DOMAIN=your-custom-domain # Huawei OBS Storage Configuration HUAWEI_OBS_BUCKET_NAME=your-bucket-name diff --git a/api/configs/middleware/storage/tencent_cos_storage_config.py b/api/configs/middleware/storage/tencent_cos_storage_config.py index e297e748e9..cdd10740f8 100644 --- a/api/configs/middleware/storage/tencent_cos_storage_config.py +++ b/api/configs/middleware/storage/tencent_cos_storage_config.py @@ -31,3 +31,8 @@ class TencentCloudCOSStorageConfig(BaseSettings): description="Protocol scheme for COS requests: 'https' (recommended) or 'http'", default=None, ) + + TENCENT_COS_CUSTOM_DOMAIN: str | None = Field( + description="Tencent Cloud COS custom domain setting", + default=None, + ) diff --git a/api/controllers/common/file_response.py b/api/controllers/common/file_response.py new file mode 100644 index 0000000000..ca8ea3d52e --- /dev/null +++ b/api/controllers/common/file_response.py @@ -0,0 +1,57 @@ +import os +from email.message import Message +from urllib.parse import quote + +from flask import Response + +HTML_MIME_TYPES = frozenset({"text/html", "application/xhtml+xml"}) +HTML_EXTENSIONS = frozenset({"html", "htm"}) + + +def _normalize_mime_type(mime_type: str | None) -> str: + if not mime_type: + return "" + message = Message() + message["Content-Type"] = mime_type + return message.get_content_type().strip().lower() + + +def _is_html_extension(extension: str | None) -> bool: + if not extension: + return False + return extension.lstrip(".").lower() in HTML_EXTENSIONS + + +def is_html_content(mime_type: str | None, filename: str | None, extension: str | None = None) -> bool: + normalized_mime_type = _normalize_mime_type(mime_type) + if normalized_mime_type in HTML_MIME_TYPES: + return True + + if _is_html_extension(extension): + return True + + if filename: + return _is_html_extension(os.path.splitext(filename)[1]) + + return False + + +def enforce_download_for_html( + response: Response, + *, + mime_type: str | None, + filename: str | None, + extension: str | None = None, +) -> bool: + if not is_html_content(mime_type, filename, extension): + return False + + if filename: + encoded_filename = quote(filename) + response.headers["Content-Disposition"] = f"attachment; filename*=UTF-8''{encoded_filename}" + else: + response.headers["Content-Disposition"] = "attachment" + + response.headers["Content-Type"] = "application/octet-stream" + response.headers["X-Content-Type-Options"] = "nosniff" + return True diff --git a/api/controllers/console/billing/billing.py b/api/controllers/console/billing/billing.py index 7f907dc420..ac039f9c5d 100644 --- a/api/controllers/console/billing/billing.py +++ b/api/controllers/console/billing/billing.py @@ -1,8 +1,9 @@ import base64 +from typing import Literal from flask import request from flask_restx import Resource, fields -from pydantic import BaseModel, Field, field_validator +from pydantic import BaseModel, Field from werkzeug.exceptions import BadRequest from controllers.console import console_ns @@ -15,22 +16,8 @@ DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}" class SubscriptionQuery(BaseModel): - plan: str = Field(..., description="Subscription plan") - interval: str = Field(..., description="Billing interval") - - @field_validator("plan") - @classmethod - def validate_plan(cls, value: str) -> str: - if value not in [CloudPlan.PROFESSIONAL, CloudPlan.TEAM]: - raise ValueError("Invalid plan") - return value - - @field_validator("interval") - @classmethod - def validate_interval(cls, value: str) -> str: - if value not in {"month", "year"}: - raise ValueError("Invalid interval") - return value + plan: Literal[CloudPlan.PROFESSIONAL, CloudPlan.TEAM] = Field(..., description="Subscription plan") + interval: Literal["month", "year"] = Field(..., description="Billing interval") class PartnerTenantsPayload(BaseModel): diff --git a/api/controllers/console/datasets/datasets_document.py b/api/controllers/console/datasets/datasets_document.py index 6145da31a5..e94768f985 100644 --- a/api/controllers/console/datasets/datasets_document.py +++ b/api/controllers/console/datasets/datasets_document.py @@ -572,7 +572,7 @@ class DocumentBatchIndexingEstimateApi(DocumentResource): datasource_type=DatasourceType.NOTION, notion_info=NotionInfo.model_validate( { - "credential_id": data_source_info["credential_id"], + "credential_id": data_source_info.get("credential_id"), "notion_workspace_id": data_source_info["notion_workspace_id"], "notion_obj_id": data_source_info["notion_page_id"], "notion_page_type": data_source_info["type"], diff --git a/api/controllers/console/explore/message.py b/api/controllers/console/explore/message.py index 229b7c8865..d596d60b36 100644 --- a/api/controllers/console/explore/message.py +++ b/api/controllers/console/explore/message.py @@ -1,6 +1,5 @@ import logging from typing import Literal -from uuid import UUID from flask import request from flask_restx import marshal_with @@ -26,6 +25,7 @@ from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotIni from core.model_runtime.errors.invoke import InvokeError from fields.message_fields import message_infinite_scroll_pagination_fields from libs import helper +from libs.helper import UUIDStrOrEmpty from libs.login import current_account_with_tenant from models.model import AppMode from services.app_generate_service import AppGenerateService @@ -44,8 +44,8 @@ logger = logging.getLogger(__name__) class MessageListQuery(BaseModel): - conversation_id: UUID - first_id: UUID | None = None + conversation_id: UUIDStrOrEmpty + first_id: UUIDStrOrEmpty | None = None limit: int = Field(default=20, ge=1, le=100) diff --git a/api/controllers/console/explore/saved_message.py b/api/controllers/console/explore/saved_message.py index 6a9e274a0e..bc7b8e7651 100644 --- a/api/controllers/console/explore/saved_message.py +++ b/api/controllers/console/explore/saved_message.py @@ -1,5 +1,3 @@ -from uuid import UUID - from flask import request from flask_restx import fields, marshal_with from pydantic import BaseModel, Field @@ -10,19 +8,19 @@ from controllers.console import console_ns from controllers.console.explore.error import NotCompletionAppError from controllers.console.explore.wraps import InstalledAppResource from fields.conversation_fields import message_file_fields -from libs.helper import TimestampField +from libs.helper import TimestampField, UUIDStrOrEmpty from libs.login import current_account_with_tenant from services.errors.message import MessageNotExistsError from services.saved_message_service import SavedMessageService class SavedMessageListQuery(BaseModel): - last_id: UUID | None = None + last_id: UUIDStrOrEmpty | None = None limit: int = Field(default=20, ge=1, le=100) class SavedMessageCreatePayload(BaseModel): - message_id: UUID + message_id: UUIDStrOrEmpty register_schema_models(console_ns, SavedMessageListQuery, SavedMessageCreatePayload) diff --git a/api/controllers/console/extension.py b/api/controllers/console/extension.py index 08f29b4655..efa46c9779 100644 --- a/api/controllers/console/extension.py +++ b/api/controllers/console/extension.py @@ -1,14 +1,32 @@ -from flask_restx import Resource, fields, marshal_with, reqparse +from flask import request +from flask_restx import Resource, fields, marshal_with +from pydantic import BaseModel, Field from constants import HIDDEN_VALUE -from controllers.console import console_ns -from controllers.console.wraps import account_initialization_required, setup_required from fields.api_based_extension_fields import api_based_extension_fields from libs.login import current_account_with_tenant, login_required from models.api_based_extension import APIBasedExtension from services.api_based_extension_service import APIBasedExtensionService from services.code_based_extension_service import CodeBasedExtensionService +from ..common.schema import register_schema_models +from . import console_ns +from .wraps import account_initialization_required, setup_required + + +class CodeBasedExtensionQuery(BaseModel): + module: str + + +class APIBasedExtensionPayload(BaseModel): + name: str = Field(description="Extension name") + api_endpoint: str = Field(description="API endpoint URL") + api_key: str = Field(description="API key for authentication") + + +register_schema_models(console_ns, APIBasedExtensionPayload) + + api_based_extension_model = console_ns.model("ApiBasedExtensionModel", api_based_extension_fields) api_based_extension_list_model = fields.List(fields.Nested(api_based_extension_model)) @@ -18,11 +36,7 @@ api_based_extension_list_model = fields.List(fields.Nested(api_based_extension_m class CodeBasedExtensionAPI(Resource): @console_ns.doc("get_code_based_extension") @console_ns.doc(description="Get code-based extension data by module name") - @console_ns.expect( - console_ns.parser().add_argument( - "module", type=str, required=True, location="args", help="Extension module name" - ) - ) + @console_ns.doc(params={"module": "Extension module name"}) @console_ns.response( 200, "Success", @@ -35,10 +49,9 @@ class CodeBasedExtensionAPI(Resource): @login_required @account_initialization_required def get(self): - parser = reqparse.RequestParser().add_argument("module", type=str, required=True, location="args") - args = parser.parse_args() + query = CodeBasedExtensionQuery.model_validate(request.args.to_dict(flat=True)) # type: ignore - return {"module": args["module"], "data": CodeBasedExtensionService.get_code_based_extension(args["module"])} + return {"module": query.module, "data": CodeBasedExtensionService.get_code_based_extension(query.module)} @console_ns.route("/api-based-extension") @@ -56,30 +69,21 @@ class APIBasedExtensionAPI(Resource): @console_ns.doc("create_api_based_extension") @console_ns.doc(description="Create a new API-based extension") - @console_ns.expect( - console_ns.model( - "CreateAPIBasedExtensionRequest", - { - "name": fields.String(required=True, description="Extension name"), - "api_endpoint": fields.String(required=True, description="API endpoint URL"), - "api_key": fields.String(required=True, description="API key for authentication"), - }, - ) - ) + @console_ns.expect(console_ns.models[APIBasedExtensionPayload.__name__]) @console_ns.response(201, "Extension created successfully", api_based_extension_model) @setup_required @login_required @account_initialization_required @marshal_with(api_based_extension_model) def post(self): - args = console_ns.payload + payload = APIBasedExtensionPayload.model_validate(console_ns.payload or {}) _, current_tenant_id = current_account_with_tenant() extension_data = APIBasedExtension( tenant_id=current_tenant_id, - name=args["name"], - api_endpoint=args["api_endpoint"], - api_key=args["api_key"], + name=payload.name, + api_endpoint=payload.api_endpoint, + api_key=payload.api_key, ) return APIBasedExtensionService.save(extension_data) @@ -104,16 +108,7 @@ class APIBasedExtensionDetailAPI(Resource): @console_ns.doc("update_api_based_extension") @console_ns.doc(description="Update API-based extension") @console_ns.doc(params={"id": "Extension ID"}) - @console_ns.expect( - console_ns.model( - "UpdateAPIBasedExtensionRequest", - { - "name": fields.String(required=True, description="Extension name"), - "api_endpoint": fields.String(required=True, description="API endpoint URL"), - "api_key": fields.String(required=True, description="API key for authentication"), - }, - ) - ) + @console_ns.expect(console_ns.models[APIBasedExtensionPayload.__name__]) @console_ns.response(200, "Extension updated successfully", api_based_extension_model) @setup_required @login_required @@ -125,13 +120,13 @@ class APIBasedExtensionDetailAPI(Resource): extension_data_from_db = APIBasedExtensionService.get_with_tenant_id(current_tenant_id, api_based_extension_id) - args = console_ns.payload + payload = APIBasedExtensionPayload.model_validate(console_ns.payload or {}) - extension_data_from_db.name = args["name"] - extension_data_from_db.api_endpoint = args["api_endpoint"] + extension_data_from_db.name = payload.name + extension_data_from_db.api_endpoint = payload.api_endpoint - if args["api_key"] != HIDDEN_VALUE: - extension_data_from_db.api_key = args["api_key"] + if payload.api_key != HIDDEN_VALUE: + extension_data_from_db.api_key = payload.api_key return APIBasedExtensionService.save(extension_data_from_db) diff --git a/api/controllers/console/workspace/load_balancing_config.py b/api/controllers/console/workspace/load_balancing_config.py index 9bf393ea2e..ccb60b1461 100644 --- a/api/controllers/console/workspace/load_balancing_config.py +++ b/api/controllers/console/workspace/load_balancing_config.py @@ -1,6 +1,8 @@ -from flask_restx import Resource, reqparse +from flask_restx import Resource +from pydantic import BaseModel from werkzeug.exceptions import Forbidden +from controllers.common.schema import register_schema_models from controllers.console import console_ns from controllers.console.wraps import account_initialization_required, setup_required from core.model_runtime.entities.model_entities import ModelType @@ -10,10 +12,20 @@ from models import TenantAccountRole from services.model_load_balancing_service import ModelLoadBalancingService +class LoadBalancingCredentialPayload(BaseModel): + model: str + model_type: ModelType + credentials: dict[str, object] + + +register_schema_models(console_ns, LoadBalancingCredentialPayload) + + @console_ns.route( "/workspaces/current/model-providers//models/load-balancing-configs/credentials-validate" ) class LoadBalancingCredentialsValidateApi(Resource): + @console_ns.expect(console_ns.models[LoadBalancingCredentialPayload.__name__]) @setup_required @login_required @account_initialization_required @@ -24,20 +36,7 @@ class LoadBalancingCredentialsValidateApi(Resource): tenant_id = current_tenant_id - parser = ( - reqparse.RequestParser() - .add_argument("model", type=str, required=True, nullable=False, location="json") - .add_argument( - "model_type", - type=str, - required=True, - nullable=False, - choices=[mt.value for mt in ModelType], - location="json", - ) - .add_argument("credentials", type=dict, required=True, nullable=False, location="json") - ) - args = parser.parse_args() + payload = LoadBalancingCredentialPayload.model_validate(console_ns.payload or {}) # validate model load balancing credentials model_load_balancing_service = ModelLoadBalancingService() @@ -49,9 +48,9 @@ class LoadBalancingCredentialsValidateApi(Resource): model_load_balancing_service.validate_load_balancing_credentials( tenant_id=tenant_id, provider=provider, - model=args["model"], - model_type=args["model_type"], - credentials=args["credentials"], + model=payload.model, + model_type=payload.model_type, + credentials=payload.credentials, ) except CredentialsValidateFailedError as ex: result = False @@ -69,6 +68,7 @@ class LoadBalancingCredentialsValidateApi(Resource): "/workspaces/current/model-providers//models/load-balancing-configs//credentials-validate" ) class LoadBalancingConfigCredentialsValidateApi(Resource): + @console_ns.expect(console_ns.models[LoadBalancingCredentialPayload.__name__]) @setup_required @login_required @account_initialization_required @@ -79,20 +79,7 @@ class LoadBalancingConfigCredentialsValidateApi(Resource): tenant_id = current_tenant_id - parser = ( - reqparse.RequestParser() - .add_argument("model", type=str, required=True, nullable=False, location="json") - .add_argument( - "model_type", - type=str, - required=True, - nullable=False, - choices=[mt.value for mt in ModelType], - location="json", - ) - .add_argument("credentials", type=dict, required=True, nullable=False, location="json") - ) - args = parser.parse_args() + payload = LoadBalancingCredentialPayload.model_validate(console_ns.payload or {}) # validate model load balancing config credentials model_load_balancing_service = ModelLoadBalancingService() @@ -104,9 +91,9 @@ class LoadBalancingConfigCredentialsValidateApi(Resource): model_load_balancing_service.validate_load_balancing_credentials( tenant_id=tenant_id, provider=provider, - model=args["model"], - model_type=args["model_type"], - credentials=args["credentials"], + model=payload.model, + model_type=payload.model_type, + credentials=payload.credentials, config_id=config_id, ) except CredentialsValidateFailedError as ex: diff --git a/api/controllers/console/workspace/plugin.py b/api/controllers/console/workspace/plugin.py index 805058ba5a..ea74fc0337 100644 --- a/api/controllers/console/workspace/plugin.py +++ b/api/controllers/console/workspace/plugin.py @@ -1,5 +1,6 @@ import io -from typing import Literal +from collections.abc import Mapping +from typing import Any, Literal from flask import request, send_file from flask_restx import Resource @@ -141,6 +142,15 @@ class ParserDynamicOptions(BaseModel): provider_type: Literal["tool", "trigger"] +class ParserDynamicOptionsWithCredentials(BaseModel): + plugin_id: str + provider: str + action: str + parameter: str + credential_id: str + credentials: Mapping[str, Any] + + class PluginPermissionSettingsPayload(BaseModel): install_permission: TenantPluginPermission.InstallPermission = TenantPluginPermission.InstallPermission.EVERYONE debug_permission: TenantPluginPermission.DebugPermission = TenantPluginPermission.DebugPermission.EVERYONE @@ -183,6 +193,7 @@ reg(ParserGithubUpgrade) reg(ParserUninstall) reg(ParserPermissionChange) reg(ParserDynamicOptions) +reg(ParserDynamicOptionsWithCredentials) reg(ParserPreferencesChange) reg(ParserExcludePlugin) reg(ParserReadme) @@ -657,6 +668,37 @@ class PluginFetchDynamicSelectOptionsApi(Resource): return jsonable_encoder({"options": options}) +@console_ns.route("/workspaces/current/plugin/parameters/dynamic-options-with-credentials") +class PluginFetchDynamicSelectOptionsWithCredentialsApi(Resource): + @console_ns.expect(console_ns.models[ParserDynamicOptionsWithCredentials.__name__]) + @setup_required + @login_required + @is_admin_or_owner_required + @account_initialization_required + def post(self): + """Fetch dynamic options using credentials directly (for edit mode).""" + current_user, tenant_id = current_account_with_tenant() + user_id = current_user.id + + args = ParserDynamicOptionsWithCredentials.model_validate(console_ns.payload) + + try: + options = PluginParameterService.get_dynamic_select_options_with_credentials( + tenant_id=tenant_id, + user_id=user_id, + plugin_id=args.plugin_id, + provider=args.provider, + action=args.action, + parameter=args.parameter, + credential_id=args.credential_id, + credentials=args.credentials, + ) + except PluginDaemonClientSideError as e: + raise ValueError(e) + + return jsonable_encoder({"options": options}) + + @console_ns.route("/workspaces/current/plugin/preferences/change") class PluginChangePreferencesApi(Resource): @console_ns.expect(console_ns.models[ParserPreferencesChange.__name__]) diff --git a/api/controllers/console/workspace/tool_providers.py b/api/controllers/console/workspace/tool_providers.py index cb711d16e4..e9e7b72718 100644 --- a/api/controllers/console/workspace/tool_providers.py +++ b/api/controllers/console/workspace/tool_providers.py @@ -1,4 +1,5 @@ import io +import logging from urllib.parse import urlparse from flask import make_response, redirect, request, send_file @@ -17,8 +18,8 @@ from controllers.console.wraps import ( is_admin_or_owner_required, setup_required, ) +from core.db.session_factory import session_factory from core.entities.mcp_provider import MCPAuthentication, MCPConfiguration -from core.helper.tool_provider_cache import ToolProviderListCache from core.mcp.auth.auth_flow import auth, handle_callback from core.mcp.error import MCPAuthError, MCPError, MCPRefreshTokenError from core.mcp.mcp_client import MCPClient @@ -40,6 +41,8 @@ from services.tools.tools_manage_service import ToolCommonService from services.tools.tools_transform_service import ToolTransformService from services.tools.workflow_tools_manage_service import WorkflowToolManageService +logger = logging.getLogger(__name__) + def is_valid_url(url: str) -> bool: if not url: @@ -945,8 +948,8 @@ class ToolProviderMCPApi(Resource): configuration = MCPConfiguration.model_validate(args["configuration"]) authentication = MCPAuthentication.model_validate(args["authentication"]) if args["authentication"] else None - # Create provider in transaction - with Session(db.engine) as session, session.begin(): + # 1) Create provider in a short transaction (no network I/O inside) + with session_factory.create_session() as session, session.begin(): service = MCPToolManageService(session=session) result = service.create_provider( tenant_id=tenant_id, @@ -962,8 +965,26 @@ class ToolProviderMCPApi(Resource): authentication=authentication, ) - # Invalidate cache AFTER transaction commits to avoid holding locks during Redis operations - ToolProviderListCache.invalidate_cache(tenant_id) + # 2) Try to fetch tools immediately after creation so they appear without a second save. + # Perform network I/O outside any DB session to avoid holding locks. + try: + reconnect = MCPToolManageService.reconnect_with_url( + server_url=args["server_url"], + headers=args.get("headers") or {}, + timeout=configuration.timeout, + sse_read_timeout=configuration.sse_read_timeout, + ) + # Update just-created provider with authed/tools in a new short transaction + with session_factory.create_session() as session, session.begin(): + service = MCPToolManageService(session=session) + db_provider = service.get_provider(provider_id=result.id, tenant_id=tenant_id) + db_provider.authed = reconnect.authed + db_provider.tools = reconnect.tools + + result = ToolTransformService.mcp_provider_to_user_provider(db_provider, for_list=True) + except Exception: + # Best-effort: if initial fetch fails (e.g., auth required), return created provider as-is + logger.warning("Failed to fetch MCP tools after creation", exc_info=True) return jsonable_encoder(result) @@ -1011,9 +1032,6 @@ class ToolProviderMCPApi(Resource): validation_result=validation_result, ) - # Invalidate cache AFTER transaction commits to avoid holding locks during Redis operations - ToolProviderListCache.invalidate_cache(current_tenant_id) - return {"result": "success"} @console_ns.expect(parser_mcp_delete) @@ -1028,9 +1046,6 @@ class ToolProviderMCPApi(Resource): service = MCPToolManageService(session=session) service.delete_provider(tenant_id=current_tenant_id, provider_id=args["provider_id"]) - # Invalidate cache AFTER transaction commits to avoid holding locks during Redis operations - ToolProviderListCache.invalidate_cache(current_tenant_id) - return {"result": "success"} @@ -1081,8 +1096,6 @@ class ToolMCPAuthApi(Resource): credentials=provider_entity.credentials, authed=True, ) - # Invalidate cache after updating credentials - ToolProviderListCache.invalidate_cache(tenant_id) return {"result": "success"} except MCPAuthError as e: try: @@ -1096,22 +1109,16 @@ class ToolMCPAuthApi(Resource): with Session(db.engine) as session, session.begin(): service = MCPToolManageService(session=session) response = service.execute_auth_actions(auth_result) - # Invalidate cache after auth actions may have updated provider state - ToolProviderListCache.invalidate_cache(tenant_id) return response except MCPRefreshTokenError as e: with Session(db.engine) as session, session.begin(): service = MCPToolManageService(session=session) service.clear_provider_credentials(provider_id=provider_id, tenant_id=tenant_id) - # Invalidate cache after clearing credentials - ToolProviderListCache.invalidate_cache(tenant_id) raise ValueError(f"Failed to refresh token, please try to authorize again: {e}") from e except (MCPError, ValueError) as e: with Session(db.engine) as session, session.begin(): service = MCPToolManageService(session=session) service.clear_provider_credentials(provider_id=provider_id, tenant_id=tenant_id) - # Invalidate cache after clearing credentials - ToolProviderListCache.invalidate_cache(tenant_id) raise ValueError(f"Failed to connect to MCP server: {e}") from e diff --git a/api/controllers/console/workspace/trigger_providers.py b/api/controllers/console/workspace/trigger_providers.py index 268473d6d1..497e62b790 100644 --- a/api/controllers/console/workspace/trigger_providers.py +++ b/api/controllers/console/workspace/trigger_providers.py @@ -1,11 +1,15 @@ import logging +from collections.abc import Mapping +from typing import Any from flask import make_response, redirect, request from flask_restx import Resource, reqparse +from pydantic import BaseModel, Field from sqlalchemy.orm import Session from werkzeug.exceptions import BadRequest, Forbidden from configs import dify_config +from constants import HIDDEN_VALUE, UNKNOWN_VALUE from controllers.web.error import NotFoundError from core.model_runtime.utils.encoders import jsonable_encoder from core.plugin.entities.plugin_daemon import CredentialType @@ -32,6 +36,32 @@ from ..wraps import ( logger = logging.getLogger(__name__) +class TriggerSubscriptionUpdateRequest(BaseModel): + """Request payload for updating a trigger subscription""" + + name: str | None = Field(default=None, description="The name for the subscription") + credentials: Mapping[str, Any] | None = Field(default=None, description="The credentials for the subscription") + parameters: Mapping[str, Any] | None = Field(default=None, description="The parameters for the subscription") + properties: Mapping[str, Any] | None = Field(default=None, description="The properties for the subscription") + + +class TriggerSubscriptionVerifyRequest(BaseModel): + """Request payload for verifying subscription credentials.""" + + credentials: Mapping[str, Any] = Field(description="The credentials to verify") + + +console_ns.schema_model( + TriggerSubscriptionUpdateRequest.__name__, + TriggerSubscriptionUpdateRequest.model_json_schema(ref_template="#/definitions/{model}"), +) + +console_ns.schema_model( + TriggerSubscriptionVerifyRequest.__name__, + TriggerSubscriptionVerifyRequest.model_json_schema(ref_template="#/definitions/{model}"), +) + + @console_ns.route("/workspaces/current/trigger-provider//icon") class TriggerProviderIconApi(Resource): @setup_required @@ -155,16 +185,16 @@ parser_api = ( @console_ns.route( - "/workspaces/current/trigger-provider//subscriptions/builder/verify/", + "/workspaces/current/trigger-provider//subscriptions/builder/verify-and-update/", ) -class TriggerSubscriptionBuilderVerifyApi(Resource): +class TriggerSubscriptionBuilderVerifyAndUpdateApi(Resource): @console_ns.expect(parser_api) @setup_required @login_required @edit_permission_required @account_initialization_required def post(self, provider, subscription_builder_id): - """Verify a subscription instance for a trigger provider""" + """Verify and update a subscription instance for a trigger provider""" user = current_user assert user.current_tenant_id is not None @@ -289,6 +319,83 @@ class TriggerSubscriptionBuilderBuildApi(Resource): raise ValueError(str(e)) from e +@console_ns.route( + "/workspaces/current/trigger-provider//subscriptions/update", +) +class TriggerSubscriptionUpdateApi(Resource): + @console_ns.expect(console_ns.models[TriggerSubscriptionUpdateRequest.__name__]) + @setup_required + @login_required + @edit_permission_required + @account_initialization_required + def post(self, subscription_id: str): + """Update a subscription instance""" + user = current_user + assert user.current_tenant_id is not None + + args = TriggerSubscriptionUpdateRequest.model_validate(console_ns.payload) + + subscription = TriggerProviderService.get_subscription_by_id( + tenant_id=user.current_tenant_id, + subscription_id=subscription_id, + ) + if not subscription: + raise NotFoundError(f"Subscription {subscription_id} not found") + + provider_id = TriggerProviderID(subscription.provider_id) + + try: + # rename only + if ( + args.name is not None + and args.credentials is None + and args.parameters is None + and args.properties is None + ): + TriggerProviderService.update_trigger_subscription( + tenant_id=user.current_tenant_id, + subscription_id=subscription_id, + name=args.name, + ) + return 200 + + # rebuild for create automatically by the provider + match subscription.credential_type: + case CredentialType.UNAUTHORIZED: + TriggerProviderService.update_trigger_subscription( + tenant_id=user.current_tenant_id, + subscription_id=subscription_id, + name=args.name, + properties=args.properties, + ) + return 200 + case CredentialType.API_KEY | CredentialType.OAUTH2: + if args.credentials: + new_credentials: dict[str, Any] = { + key: value if value != HIDDEN_VALUE else subscription.credentials.get(key, UNKNOWN_VALUE) + for key, value in args.credentials.items() + } + else: + new_credentials = subscription.credentials + + TriggerProviderService.rebuild_trigger_subscription( + tenant_id=user.current_tenant_id, + name=args.name, + provider_id=provider_id, + subscription_id=subscription_id, + credentials=new_credentials, + parameters=args.parameters or subscription.parameters, + ) + return 200 + case _: + raise BadRequest("Invalid credential type") + except ValueError as e: + raise BadRequest(str(e)) + except Exception as e: + logger.exception("Error updating subscription", exc_info=e) + raise + + @console_ns.route( "/workspaces/current/trigger-provider//subscriptions/delete", ) @@ -576,3 +683,38 @@ class TriggerOAuthClientManageApi(Resource): except Exception as e: logger.exception("Error removing OAuth client", exc_info=e) raise + + +@console_ns.route( + "/workspaces/current/trigger-provider//subscriptions/verify/", +) +class TriggerSubscriptionVerifyApi(Resource): + @console_ns.expect(console_ns.models[TriggerSubscriptionVerifyRequest.__name__]) + @setup_required + @login_required + @edit_permission_required + @account_initialization_required + def post(self, provider, subscription_id): + """Verify credentials for an existing subscription (edit mode only)""" + user = current_user + assert user.current_tenant_id is not None + + verify_request: TriggerSubscriptionVerifyRequest = TriggerSubscriptionVerifyRequest.model_validate( + console_ns.payload + ) + + try: + result = TriggerProviderService.verify_subscription_credentials( + tenant_id=user.current_tenant_id, + user_id=user.id, + provider_id=TriggerProviderID(provider), + subscription_id=subscription_id, + credentials=verify_request.credentials, + ) + return result + except ValueError as e: + logger.warning("Credential verification failed", exc_info=e) + raise BadRequest(str(e)) from e + except Exception as e: + logger.exception("Error verifying subscription credentials", exc_info=e) + raise BadRequest(str(e)) from e diff --git a/api/controllers/files/image_preview.py b/api/controllers/files/image_preview.py index 64f47f426a..04db1c67cb 100644 --- a/api/controllers/files/image_preview.py +++ b/api/controllers/files/image_preview.py @@ -7,6 +7,7 @@ from werkzeug.exceptions import NotFound import services from controllers.common.errors import UnsupportedFileTypeError +from controllers.common.file_response import enforce_download_for_html from controllers.files import files_ns from extensions.ext_database import db from services.account_service import TenantService @@ -138,6 +139,13 @@ class FilePreviewApi(Resource): response.headers["Content-Disposition"] = f"attachment; filename*=UTF-8''{encoded_filename}" response.headers["Content-Type"] = "application/octet-stream" + enforce_download_for_html( + response, + mime_type=upload_file.mime_type, + filename=upload_file.name, + extension=upload_file.extension, + ) + return response diff --git a/api/controllers/files/tool_files.py b/api/controllers/files/tool_files.py index c487a0a915..89aa472015 100644 --- a/api/controllers/files/tool_files.py +++ b/api/controllers/files/tool_files.py @@ -6,6 +6,7 @@ from pydantic import BaseModel, Field from werkzeug.exceptions import Forbidden, NotFound from controllers.common.errors import UnsupportedFileTypeError +from controllers.common.file_response import enforce_download_for_html from controllers.files import files_ns from core.tools.signature import verify_tool_file_signature from core.tools.tool_file_manager import ToolFileManager @@ -78,4 +79,11 @@ class ToolFileApi(Resource): encoded_filename = quote(tool_file.name) response.headers["Content-Disposition"] = f"attachment; filename*=UTF-8''{encoded_filename}" + enforce_download_for_html( + response, + mime_type=tool_file.mimetype, + filename=tool_file.name, + extension=extension, + ) + return response diff --git a/api/controllers/service_api/app/file_preview.py b/api/controllers/service_api/app/file_preview.py index 60f422b88e..f853a124ef 100644 --- a/api/controllers/service_api/app/file_preview.py +++ b/api/controllers/service_api/app/file_preview.py @@ -5,6 +5,7 @@ from flask import Response, request from flask_restx import Resource from pydantic import BaseModel, Field +from controllers.common.file_response import enforce_download_for_html from controllers.common.schema import register_schema_model from controllers.service_api import service_api_ns from controllers.service_api.app.error import ( @@ -183,6 +184,13 @@ class FilePreviewApi(Resource): # Override content-type for downloads to force download response.headers["Content-Type"] = "application/octet-stream" + enforce_download_for_html( + response, + mime_type=upload_file.mime_type, + filename=upload_file.name, + extension=upload_file.extension, + ) + # Add caching headers for performance response.headers["Cache-Control"] = "public, max-age=3600" # Cache for 1 hour diff --git a/api/controllers/service_api/dataset/dataset.py b/api/controllers/service_api/dataset/dataset.py index 4f91f40c55..94faf8dd42 100644 --- a/api/controllers/service_api/dataset/dataset.py +++ b/api/controllers/service_api/dataset/dataset.py @@ -13,7 +13,6 @@ from controllers.service_api.dataset.error import DatasetInUseError, DatasetName from controllers.service_api.wraps import ( DatasetApiResource, cloud_edition_billing_rate_limit_check, - validate_dataset_token, ) from core.model_runtime.entities.model_entities import ModelType from core.provider_manager import ProviderManager @@ -460,9 +459,8 @@ class DatasetTagsApi(DatasetApiResource): 401: "Unauthorized - invalid API token", } ) - @validate_dataset_token @service_api_ns.marshal_with(build_dataset_tag_fields(service_api_ns)) - def get(self, _, dataset_id): + def get(self, _): """Get all knowledge type tags.""" assert isinstance(current_user, Account) cid = current_user.current_tenant_id @@ -482,8 +480,7 @@ class DatasetTagsApi(DatasetApiResource): } ) @service_api_ns.marshal_with(build_dataset_tag_fields(service_api_ns)) - @validate_dataset_token - def post(self, _, dataset_id): + def post(self, _): """Add a knowledge type tag.""" assert isinstance(current_user, Account) if not (current_user.has_edit_permission or current_user.is_dataset_editor): @@ -506,8 +503,7 @@ class DatasetTagsApi(DatasetApiResource): } ) @service_api_ns.marshal_with(build_dataset_tag_fields(service_api_ns)) - @validate_dataset_token - def patch(self, _, dataset_id): + def patch(self, _): assert isinstance(current_user, Account) if not (current_user.has_edit_permission or current_user.is_dataset_editor): raise Forbidden() @@ -533,9 +529,8 @@ class DatasetTagsApi(DatasetApiResource): 403: "Forbidden - insufficient permissions", } ) - @validate_dataset_token @edit_permission_required - def delete(self, _, dataset_id): + def delete(self, _): """Delete a knowledge type tag.""" payload = TagDeletePayload.model_validate(service_api_ns.payload or {}) TagService.delete_tag(payload.tag_id) @@ -555,8 +550,7 @@ class DatasetTagBindingApi(DatasetApiResource): 403: "Forbidden - insufficient permissions", } ) - @validate_dataset_token - def post(self, _, dataset_id): + def post(self, _): # The role of the current user in the ta table must be admin, owner, editor, or dataset_operator assert isinstance(current_user, Account) if not (current_user.has_edit_permission or current_user.is_dataset_editor): @@ -580,8 +574,7 @@ class DatasetTagUnbindingApi(DatasetApiResource): 403: "Forbidden - insufficient permissions", } ) - @validate_dataset_token - def post(self, _, dataset_id): + def post(self, _): # The role of the current user in the ta table must be admin, owner, editor, or dataset_operator assert isinstance(current_user, Account) if not (current_user.has_edit_permission or current_user.is_dataset_editor): @@ -604,7 +597,6 @@ class DatasetTagsBindingStatusApi(DatasetApiResource): 401: "Unauthorized - invalid API token", } ) - @validate_dataset_token def get(self, _, *args, **kwargs): """Get all knowledge type tags.""" dataset_id = kwargs.get("dataset_id") diff --git a/api/controllers/web/forgot_password.py b/api/controllers/web/forgot_password.py index b9e391e049..690b76655f 100644 --- a/api/controllers/web/forgot_password.py +++ b/api/controllers/web/forgot_password.py @@ -2,10 +2,12 @@ import base64 import secrets from flask import request -from flask_restx import Resource, reqparse +from flask_restx import Resource +from pydantic import BaseModel, Field, field_validator from sqlalchemy import select from sqlalchemy.orm import Session +from controllers.common.schema import register_schema_models from controllers.console.auth.error import ( AuthenticationFailedError, EmailCodeError, @@ -18,14 +20,40 @@ from controllers.console.error import EmailSendIpLimitError from controllers.console.wraps import email_password_login_enabled, only_edition_enterprise, setup_required from controllers.web import web_ns from extensions.ext_database import db -from libs.helper import email, extract_remote_ip +from libs.helper import EmailStr, extract_remote_ip from libs.password import hash_password, valid_password from models import Account from services.account_service import AccountService +class ForgotPasswordSendPayload(BaseModel): + email: EmailStr + language: str | None = None + + +class ForgotPasswordCheckPayload(BaseModel): + email: EmailStr + code: str + token: str = Field(min_length=1) + + +class ForgotPasswordResetPayload(BaseModel): + token: str = Field(min_length=1) + new_password: str + password_confirm: str + + @field_validator("new_password", "password_confirm") + @classmethod + def validate_password(cls, value: str) -> str: + return valid_password(value) + + +register_schema_models(web_ns, ForgotPasswordSendPayload, ForgotPasswordCheckPayload, ForgotPasswordResetPayload) + + @web_ns.route("/forgot-password") class ForgotPasswordSendEmailApi(Resource): + @web_ns.expect(web_ns.models[ForgotPasswordSendPayload.__name__]) @only_edition_enterprise @setup_required @email_password_login_enabled @@ -40,35 +68,31 @@ class ForgotPasswordSendEmailApi(Resource): } ) def post(self): - parser = ( - reqparse.RequestParser() - .add_argument("email", type=email, required=True, location="json") - .add_argument("language", type=str, required=False, location="json") - ) - args = parser.parse_args() + payload = ForgotPasswordSendPayload.model_validate(web_ns.payload or {}) ip_address = extract_remote_ip(request) if AccountService.is_email_send_ip_limit(ip_address): raise EmailSendIpLimitError() - if args["language"] is not None and args["language"] == "zh-Hans": + if payload.language == "zh-Hans": language = "zh-Hans" else: language = "en-US" with Session(db.engine) as session: - account = session.execute(select(Account).filter_by(email=args["email"])).scalar_one_or_none() + account = session.execute(select(Account).filter_by(email=payload.email)).scalar_one_or_none() token = None if account is None: raise AuthenticationFailedError() else: - token = AccountService.send_reset_password_email(account=account, email=args["email"], language=language) + token = AccountService.send_reset_password_email(account=account, email=payload.email, language=language) return {"result": "success", "data": token} @web_ns.route("/forgot-password/validity") class ForgotPasswordCheckApi(Resource): + @web_ns.expect(web_ns.models[ForgotPasswordCheckPayload.__name__]) @only_edition_enterprise @setup_required @email_password_login_enabled @@ -78,45 +102,40 @@ class ForgotPasswordCheckApi(Resource): responses={200: "Token is valid", 400: "Bad request - invalid token format", 401: "Invalid or expired token"} ) def post(self): - parser = ( - reqparse.RequestParser() - .add_argument("email", type=str, required=True, location="json") - .add_argument("code", type=str, required=True, location="json") - .add_argument("token", type=str, required=True, nullable=False, location="json") - ) - args = parser.parse_args() + payload = ForgotPasswordCheckPayload.model_validate(web_ns.payload or {}) - user_email = args["email"] + user_email = payload.email - is_forgot_password_error_rate_limit = AccountService.is_forgot_password_error_rate_limit(args["email"]) + is_forgot_password_error_rate_limit = AccountService.is_forgot_password_error_rate_limit(payload.email) if is_forgot_password_error_rate_limit: raise EmailPasswordResetLimitError() - token_data = AccountService.get_reset_password_data(args["token"]) + token_data = AccountService.get_reset_password_data(payload.token) if token_data is None: raise InvalidTokenError() if user_email != token_data.get("email"): raise InvalidEmailError() - if args["code"] != token_data.get("code"): - AccountService.add_forgot_password_error_rate_limit(args["email"]) + if payload.code != token_data.get("code"): + AccountService.add_forgot_password_error_rate_limit(payload.email) raise EmailCodeError() # Verified, revoke the first token - AccountService.revoke_reset_password_token(args["token"]) + AccountService.revoke_reset_password_token(payload.token) # Refresh token data by generating a new token _, new_token = AccountService.generate_reset_password_token( - user_email, code=args["code"], additional_data={"phase": "reset"} + user_email, code=payload.code, additional_data={"phase": "reset"} ) - AccountService.reset_forgot_password_error_rate_limit(args["email"]) + AccountService.reset_forgot_password_error_rate_limit(payload.email) return {"is_valid": True, "email": token_data.get("email"), "token": new_token} @web_ns.route("/forgot-password/resets") class ForgotPasswordResetApi(Resource): + @web_ns.expect(web_ns.models[ForgotPasswordResetPayload.__name__]) @only_edition_enterprise @setup_required @email_password_login_enabled @@ -131,20 +150,14 @@ class ForgotPasswordResetApi(Resource): } ) def post(self): - parser = ( - reqparse.RequestParser() - .add_argument("token", type=str, required=True, nullable=False, location="json") - .add_argument("new_password", type=valid_password, required=True, nullable=False, location="json") - .add_argument("password_confirm", type=valid_password, required=True, nullable=False, location="json") - ) - args = parser.parse_args() + payload = ForgotPasswordResetPayload.model_validate(web_ns.payload or {}) # Validate passwords match - if args["new_password"] != args["password_confirm"]: + if payload.new_password != payload.password_confirm: raise PasswordMismatchError() # Validate token and get reset data - reset_data = AccountService.get_reset_password_data(args["token"]) + reset_data = AccountService.get_reset_password_data(payload.token) if not reset_data: raise InvalidTokenError() # Must use token in reset phase @@ -152,11 +165,11 @@ class ForgotPasswordResetApi(Resource): raise InvalidTokenError() # Revoke token to prevent reuse - AccountService.revoke_reset_password_token(args["token"]) + AccountService.revoke_reset_password_token(payload.token) # Generate secure salt and hash password salt = secrets.token_bytes(16) - password_hashed = hash_password(args["new_password"], salt) + password_hashed = hash_password(payload.new_password, salt) email = reset_data.get("email", "") @@ -170,7 +183,7 @@ class ForgotPasswordResetApi(Resource): return {"result": "success"} - def _update_existing_account(self, account, password_hashed, salt, session): + def _update_existing_account(self, account: Account, password_hashed, salt, session): # Update existing account credentials account.password = base64.b64encode(password_hashed).decode() account.password_salt = base64.b64encode(salt).decode() diff --git a/api/core/app/apps/base_app_queue_manager.py b/api/core/app/apps/base_app_queue_manager.py index 698eee9894..b41bedbea4 100644 --- a/api/core/app/apps/base_app_queue_manager.py +++ b/api/core/app/apps/base_app_queue_manager.py @@ -90,6 +90,7 @@ class AppQueueManager: """ self._clear_task_belong_cache() self._q.put(None) + self._graph_runtime_state = None # Release reference to allow GC to reclaim memory def _clear_task_belong_cache(self) -> None: """ diff --git a/api/core/helper/code_executor/jinja2/jinja2_transformer.py b/api/core/helper/code_executor/jinja2/jinja2_transformer.py index 969125d2f7..5e4807401e 100644 --- a/api/core/helper/code_executor/jinja2/jinja2_transformer.py +++ b/api/core/helper/code_executor/jinja2/jinja2_transformer.py @@ -1,9 +1,14 @@ +from collections.abc import Mapping from textwrap import dedent +from typing import Any from core.helper.code_executor.template_transformer import TemplateTransformer class Jinja2TemplateTransformer(TemplateTransformer): + # Use separate placeholder for base64-encoded template to avoid confusion + _template_b64_placeholder: str = "{{template_b64}}" + @classmethod def transform_response(cls, response: str): """ @@ -13,18 +18,35 @@ class Jinja2TemplateTransformer(TemplateTransformer): """ return {"result": cls.extract_result_str_from_response(response)} + @classmethod + def assemble_runner_script(cls, code: str, inputs: Mapping[str, Any]) -> str: + """ + Override base class to use base64 encoding for template code. + This prevents issues with special characters (quotes, newlines) in templates + breaking the generated Python script. Fixes #26818. + """ + script = cls.get_runner_script() + # Encode template as base64 to safely embed any content including quotes + code_b64 = cls.serialize_code(code) + script = script.replace(cls._template_b64_placeholder, code_b64) + inputs_str = cls.serialize_inputs(inputs) + script = script.replace(cls._inputs_placeholder, inputs_str) + return script + @classmethod def get_runner_script(cls) -> str: runner_script = dedent(f""" - # declare main function - def main(**inputs): - import jinja2 - template = jinja2.Template('''{cls._code_placeholder}''') - return template.render(**inputs) - + import jinja2 import json from base64 import b64decode + # declare main function + def main(**inputs): + # Decode base64-encoded template to handle special characters safely + template_code = b64decode('{cls._template_b64_placeholder}').decode('utf-8') + template = jinja2.Template(template_code) + return template.render(**inputs) + # decode and prepare input dict inputs_obj = json.loads(b64decode('{cls._inputs_placeholder}').decode('utf-8')) diff --git a/api/core/helper/code_executor/template_transformer.py b/api/core/helper/code_executor/template_transformer.py index 3965f8cb31..6fda073913 100644 --- a/api/core/helper/code_executor/template_transformer.py +++ b/api/core/helper/code_executor/template_transformer.py @@ -13,6 +13,15 @@ class TemplateTransformer(ABC): _inputs_placeholder: str = "{{inputs}}" _result_tag: str = "<>" + @classmethod + def serialize_code(cls, code: str) -> str: + """ + Serialize template code to base64 to safely embed in generated script. + This prevents issues with special characters like quotes breaking the script. + """ + code_bytes = code.encode("utf-8") + return b64encode(code_bytes).decode("utf-8") + @classmethod def transform_caller(cls, code: str, inputs: Mapping[str, Any]) -> tuple[str, str]: """ diff --git a/api/core/helper/ssrf_proxy.py b/api/core/helper/ssrf_proxy.py index f2172e4e2f..0b36969cf9 100644 --- a/api/core/helper/ssrf_proxy.py +++ b/api/core/helper/ssrf_proxy.py @@ -118,13 +118,11 @@ def make_request(method, url, max_retries=SSRF_DEFAULT_MAX_RETRIES, **kwargs): # Build the request manually to preserve the Host header # httpx may override the Host header when using a proxy, so we use # the request API to explicitly set headers before sending - request = client.build_request(method=method, url=url, **kwargs) - - # If user explicitly provided a Host header, ensure it's preserved + headers = {k: v for k, v in headers.items() if k.lower() != "host"} if user_provided_host is not None: - request.headers["Host"] = user_provided_host - - response = client.send(request) + headers["host"] = user_provided_host + kwargs["headers"] = headers + response = client.request(method=method, url=url, **kwargs) # Check for SSRF protection by Squid proxy if response.status_code in (401, 403): diff --git a/api/core/helper/tool_provider_cache.py b/api/core/helper/tool_provider_cache.py deleted file mode 100644 index eef5937407..0000000000 --- a/api/core/helper/tool_provider_cache.py +++ /dev/null @@ -1,56 +0,0 @@ -import json -import logging -from typing import Any - -from core.tools.entities.api_entities import ToolProviderTypeApiLiteral -from extensions.ext_redis import redis_client, redis_fallback - -logger = logging.getLogger(__name__) - - -class ToolProviderListCache: - """Cache for tool provider lists""" - - CACHE_TTL = 300 # 5 minutes - - @staticmethod - def _generate_cache_key(tenant_id: str, typ: ToolProviderTypeApiLiteral = None) -> str: - """Generate cache key for tool providers list""" - type_filter = typ or "all" - return f"tool_providers:tenant_id:{tenant_id}:type:{type_filter}" - - @staticmethod - @redis_fallback(default_return=None) - def get_cached_providers(tenant_id: str, typ: ToolProviderTypeApiLiteral = None) -> list[dict[str, Any]] | None: - """Get cached tool providers""" - cache_key = ToolProviderListCache._generate_cache_key(tenant_id, typ) - cached_data = redis_client.get(cache_key) - if cached_data: - try: - return json.loads(cached_data.decode("utf-8")) - except (json.JSONDecodeError, UnicodeDecodeError): - logger.warning("Failed to decode cached tool providers data") - return None - return None - - @staticmethod - @redis_fallback() - def set_cached_providers(tenant_id: str, typ: ToolProviderTypeApiLiteral, providers: list[dict[str, Any]]): - """Cache tool providers""" - cache_key = ToolProviderListCache._generate_cache_key(tenant_id, typ) - redis_client.setex(cache_key, ToolProviderListCache.CACHE_TTL, json.dumps(providers)) - - @staticmethod - @redis_fallback() - def invalidate_cache(tenant_id: str, typ: ToolProviderTypeApiLiteral = None): - """Invalidate cache for tool providers""" - if typ: - # Invalidate specific type cache - cache_key = ToolProviderListCache._generate_cache_key(tenant_id, typ) - redis_client.delete(cache_key) - else: - # Invalidate all caches for this tenant - pattern = f"tool_providers:tenant_id:{tenant_id}:*" - keys = list(redis_client.scan_iter(pattern)) - if keys: - redis_client.delete(*keys) diff --git a/api/core/indexing_runner.py b/api/core/indexing_runner.py index 59de4f403d..f1b50f360b 100644 --- a/api/core/indexing_runner.py +++ b/api/core/indexing_runner.py @@ -396,7 +396,7 @@ class IndexingRunner: datasource_type=DatasourceType.NOTION, notion_info=NotionInfo.model_validate( { - "credential_id": data_source_info["credential_id"], + "credential_id": data_source_info.get("credential_id"), "notion_workspace_id": data_source_info["notion_workspace_id"], "notion_obj_id": data_source_info["notion_page_id"], "notion_page_type": data_source_info["type"], diff --git a/api/core/mcp/client/streamable_client.py b/api/core/mcp/client/streamable_client.py index f81e7cead8..5c3cd0d8f8 100644 --- a/api/core/mcp/client/streamable_client.py +++ b/api/core/mcp/client/streamable_client.py @@ -313,17 +313,20 @@ class StreamableHTTPTransport: if is_initialization: self._maybe_extract_session_id_from_response(response) - content_type = cast(str, response.headers.get(CONTENT_TYPE, "").lower()) + # Per https://modelcontextprotocol.io/specification/2025-06-18/basic#notifications: + # The server MUST NOT send a response to notifications. + if isinstance(message.root, JSONRPCRequest): + content_type = cast(str, response.headers.get(CONTENT_TYPE, "").lower()) - if content_type.startswith(JSON): - self._handle_json_response(response, ctx.server_to_client_queue) - elif content_type.startswith(SSE): - self._handle_sse_response(response, ctx) - else: - self._handle_unexpected_content_type( - content_type, - ctx.server_to_client_queue, - ) + if content_type.startswith(JSON): + self._handle_json_response(response, ctx.server_to_client_queue) + elif content_type.startswith(SSE): + self._handle_sse_response(response, ctx) + else: + self._handle_unexpected_content_type( + content_type, + ctx.server_to_client_queue, + ) def _handle_json_response( self, diff --git a/api/core/plugin/entities/parameters.py b/api/core/plugin/entities/parameters.py index 88a3a7bd43..bfa662b9f6 100644 --- a/api/core/plugin/entities/parameters.py +++ b/api/core/plugin/entities/parameters.py @@ -76,7 +76,7 @@ class PluginParameter(BaseModel): auto_generate: PluginParameterAutoGenerate | None = None template: PluginParameterTemplate | None = None required: bool = False - default: Union[float, int, str, bool] | None = None + default: Union[float, int, str, bool, list, dict] | None = None min: Union[float, int] | None = None max: Union[float, int] | None = None precision: int | None = None diff --git a/api/core/rag/datasource/retrieval_service.py b/api/core/rag/datasource/retrieval_service.py index 9807cb4e6a..43912cd75d 100644 --- a/api/core/rag/datasource/retrieval_service.py +++ b/api/core/rag/datasource/retrieval_service.py @@ -13,7 +13,7 @@ from core.model_runtime.entities.model_entities import ModelType from core.rag.data_post_processor.data_post_processor import DataPostProcessor from core.rag.datasource.keyword.keyword_factory import Keyword from core.rag.datasource.vdb.vector_factory import Vector -from core.rag.embedding.retrieval import RetrievalSegments +from core.rag.embedding.retrieval import RetrievalChildChunk, RetrievalSegments from core.rag.entities.metadata_entities import MetadataCondition from core.rag.index_processor.constant.doc_type import DocType from core.rag.index_processor.constant.index_type import IndexStructureType @@ -381,10 +381,9 @@ class RetrievalService: records = [] include_segment_ids = set() segment_child_map = {} - segment_file_map = {} valid_dataset_documents = {} - image_doc_ids = [] + image_doc_ids: list[Any] = [] child_index_node_ids = [] index_node_ids = [] doc_to_document_map = {} @@ -417,28 +416,39 @@ class RetrievalService: child_index_node_ids = [i for i in child_index_node_ids if i] index_node_ids = [i for i in index_node_ids if i] - segment_ids = [] + segment_ids: list[str] = [] index_node_segments: list[DocumentSegment] = [] segments: list[DocumentSegment] = [] - attachment_map = {} - child_chunk_map = {} - doc_segment_map = {} + attachment_map: dict[str, list[dict[str, Any]]] = {} + child_chunk_map: dict[str, list[ChildChunk]] = {} + doc_segment_map: dict[str, list[str]] = {} with session_factory.create_session() as session: attachments = cls.get_segment_attachment_infos(image_doc_ids, session) for attachment in attachments: segment_ids.append(attachment["segment_id"]) - attachment_map[attachment["segment_id"]] = attachment - doc_segment_map[attachment["segment_id"]] = attachment["attachment_id"] - + if attachment["segment_id"] in attachment_map: + attachment_map[attachment["segment_id"]].append(attachment["attachment_info"]) + else: + attachment_map[attachment["segment_id"]] = [attachment["attachment_info"]] + if attachment["segment_id"] in doc_segment_map: + doc_segment_map[attachment["segment_id"]].append(attachment["attachment_id"]) + else: + doc_segment_map[attachment["segment_id"]] = [attachment["attachment_id"]] child_chunk_stmt = select(ChildChunk).where(ChildChunk.index_node_id.in_(child_index_node_ids)) child_index_nodes = session.execute(child_chunk_stmt).scalars().all() for i in child_index_nodes: segment_ids.append(i.segment_id) - child_chunk_map[i.segment_id] = i - doc_segment_map[i.segment_id] = i.index_node_id + if i.segment_id in child_chunk_map: + child_chunk_map[i.segment_id].append(i) + else: + child_chunk_map[i.segment_id] = [i] + if i.segment_id in doc_segment_map: + doc_segment_map[i.segment_id].append(i.index_node_id) + else: + doc_segment_map[i.segment_id] = [i.index_node_id] if index_node_ids: document_segment_stmt = select(DocumentSegment).where( @@ -448,7 +458,7 @@ class RetrievalService: ) index_node_segments = session.execute(document_segment_stmt).scalars().all() # type: ignore for index_node_segment in index_node_segments: - doc_segment_map[index_node_segment.id] = index_node_segment.index_node_id + doc_segment_map[index_node_segment.id] = [index_node_segment.index_node_id] if segment_ids: document_segment_stmt = select(DocumentSegment).where( DocumentSegment.enabled == True, @@ -461,95 +471,86 @@ class RetrievalService: segments.extend(index_node_segments) for segment in segments: - doc_id = doc_segment_map.get(segment.id) - child_chunk = child_chunk_map.get(segment.id) - attachment_info = attachment_map.get(segment.id) + child_chunks: list[ChildChunk] = child_chunk_map.get(segment.id, []) + attachment_infos: list[dict[str, Any]] = attachment_map.get(segment.id, []) + ds_dataset_document: DatasetDocument | None = valid_dataset_documents.get(segment.document_id) - if doc_id: - document = doc_to_document_map[doc_id] - ds_dataset_document: DatasetDocument | None = valid_dataset_documents.get( - document.metadata.get("document_id") - ) - - if ds_dataset_document and ds_dataset_document.doc_form == IndexStructureType.PARENT_CHILD_INDEX: - if segment.id not in include_segment_ids: - include_segment_ids.add(segment.id) - if child_chunk: + if ds_dataset_document and ds_dataset_document.doc_form == IndexStructureType.PARENT_CHILD_INDEX: + if segment.id not in include_segment_ids: + include_segment_ids.add(segment.id) + if child_chunks or attachment_infos: + child_chunk_details = [] + max_score = 0.0 + for child_chunk in child_chunks: + document = doc_to_document_map[child_chunk.index_node_id] child_chunk_detail = { "id": child_chunk.id, "content": child_chunk.content, "position": child_chunk.position, "score": document.metadata.get("score", 0.0) if document else 0.0, } - map_detail = { - "max_score": document.metadata.get("score", 0.0) if document else 0.0, - "child_chunks": [child_chunk_detail], - } - segment_child_map[segment.id] = map_detail - record = { - "segment": segment, + child_chunk_details.append(child_chunk_detail) + max_score = max(max_score, document.metadata.get("score", 0.0) if document else 0.0) + for attachment_info in attachment_infos: + file_document = doc_to_document_map[attachment_info["id"]] + max_score = max( + max_score, file_document.metadata.get("score", 0.0) if file_document else 0.0 + ) + + map_detail = { + "max_score": max_score, + "child_chunks": child_chunk_details, } - if attachment_info: - segment_file_map[segment.id] = [attachment_info] - records.append(record) - else: - if child_chunk: - child_chunk_detail = { - "id": child_chunk.id, - "content": child_chunk.content, - "position": child_chunk.position, - "score": document.metadata.get("score", 0.0), - } - if segment.id in segment_child_map: - segment_child_map[segment.id]["child_chunks"].append(child_chunk_detail) # type: ignore - segment_child_map[segment.id]["max_score"] = max( - segment_child_map[segment.id]["max_score"], - document.metadata.get("score", 0.0) if document else 0.0, - ) - else: - segment_child_map[segment.id] = { - "max_score": document.metadata.get("score", 0.0) if document else 0.0, - "child_chunks": [child_chunk_detail], - } - if attachment_info: - if segment.id in segment_file_map: - segment_file_map[segment.id].append(attachment_info) - else: - segment_file_map[segment.id] = [attachment_info] - else: - if segment.id not in include_segment_ids: - include_segment_ids.add(segment.id) - record = { - "segment": segment, - "score": document.metadata.get("score", 0.0), # type: ignore - } - if attachment_info: - segment_file_map[segment.id] = [attachment_info] - records.append(record) - else: - if attachment_info: - attachment_infos = segment_file_map.get(segment.id, []) - if attachment_info not in attachment_infos: - attachment_infos.append(attachment_info) - segment_file_map[segment.id] = attachment_infos + segment_child_map[segment.id] = map_detail + record: dict[str, Any] = { + "segment": segment, + } + records.append(record) + else: + if segment.id not in include_segment_ids: + include_segment_ids.add(segment.id) + max_score = 0.0 + segment_document = doc_to_document_map.get(segment.index_node_id) + if segment_document: + max_score = max(max_score, segment_document.metadata.get("score", 0.0)) + for attachment_info in attachment_infos: + file_doc = doc_to_document_map.get(attachment_info["id"]) + if file_doc: + max_score = max(max_score, file_doc.metadata.get("score", 0.0)) + record = { + "segment": segment, + "score": max_score, + } + records.append(record) # Add child chunks information to records for record in records: if record["segment"].id in segment_child_map: record["child_chunks"] = segment_child_map[record["segment"].id].get("child_chunks") # type: ignore record["score"] = segment_child_map[record["segment"].id]["max_score"] # type: ignore - if record["segment"].id in segment_file_map: - record["files"] = segment_file_map[record["segment"].id] # type: ignore[assignment] + if record["segment"].id in attachment_map: + record["files"] = attachment_map[record["segment"].id] # type: ignore[assignment] - result = [] + result: list[RetrievalSegments] = [] for record in records: # Extract segment segment = record["segment"] # Extract child_chunks, ensuring it's a list or None - child_chunks = record.get("child_chunks") - if not isinstance(child_chunks, list): - child_chunks = None + raw_child_chunks = record.get("child_chunks") + child_chunks_list: list[RetrievalChildChunk] | None = None + if isinstance(raw_child_chunks, list): + # Sort by score descending + sorted_chunks = sorted(raw_child_chunks, key=lambda x: x.get("score", 0.0), reverse=True) + child_chunks_list = [ + RetrievalChildChunk( + id=chunk["id"], + content=chunk["content"], + score=chunk.get("score", 0.0), + position=chunk["position"], + ) + for chunk in sorted_chunks + ] # Extract files, ensuring it's a list or None files = record.get("files") @@ -566,11 +567,11 @@ class RetrievalService: # Create RetrievalSegments object retrieval_segment = RetrievalSegments( - segment=segment, child_chunks=child_chunks, score=score, files=files + segment=segment, child_chunks=child_chunks_list, score=score, files=files ) result.append(retrieval_segment) - return result + return sorted(result, key=lambda x: x.score if x.score is not None else 0.0, reverse=True) except Exception as e: db.session.rollback() raise e diff --git a/api/core/rag/datasource/vdb/pgvector/pgvector.py b/api/core/rag/datasource/vdb/pgvector/pgvector.py index 445a0a7f8b..0615b8312c 100644 --- a/api/core/rag/datasource/vdb/pgvector/pgvector.py +++ b/api/core/rag/datasource/vdb/pgvector/pgvector.py @@ -255,7 +255,10 @@ class PGVector(BaseVector): return with self._get_cursor() as cur: - cur.execute("CREATE EXTENSION IF NOT EXISTS vector") + cur.execute("SELECT 1 FROM pg_extension WHERE extname = 'vector'") + if not cur.fetchone(): + cur.execute("CREATE EXTENSION vector") + cur.execute(SQL_CREATE_TABLE.format(table_name=self.table_name, dimension=dimension)) # PG hnsw index only support 2000 dimension or less # ref: https://github.com/pgvector/pgvector?tab=readme-ov-file#indexing diff --git a/api/core/rag/extractor/firecrawl/firecrawl_app.py b/api/core/rag/extractor/firecrawl/firecrawl_app.py index 789ac8557d..5d6223db06 100644 --- a/api/core/rag/extractor/firecrawl/firecrawl_app.py +++ b/api/core/rag/extractor/firecrawl/firecrawl_app.py @@ -25,7 +25,7 @@ class FirecrawlApp: } if params: json_data.update(params) - response = self._post_request(f"{self.base_url}/v2/scrape", json_data, headers) + response = self._post_request(self._build_url("v2/scrape"), json_data, headers) if response.status_code == 200: response_data = response.json() data = response_data["data"] @@ -42,7 +42,7 @@ class FirecrawlApp: json_data = {"url": url} if params: json_data.update(params) - response = self._post_request(f"{self.base_url}/v2/crawl", json_data, headers) + response = self._post_request(self._build_url("v2/crawl"), json_data, headers) if response.status_code == 200: # There's also another two fields in the response: "success" (bool) and "url" (str) job_id = response.json().get("id") @@ -58,7 +58,7 @@ class FirecrawlApp: if params: # Pass through provided params, including optional "sitemap": "only" | "include" | "skip" json_data.update(params) - response = self._post_request(f"{self.base_url}/v2/map", json_data, headers) + response = self._post_request(self._build_url("v2/map"), json_data, headers) if response.status_code == 200: return cast(dict[str, Any], response.json()) elif response.status_code in {402, 409, 500, 429, 408}: @@ -69,7 +69,7 @@ class FirecrawlApp: def check_crawl_status(self, job_id) -> dict[str, Any]: headers = self._prepare_headers() - response = self._get_request(f"{self.base_url}/v2/crawl/{job_id}", headers) + response = self._get_request(self._build_url(f"v2/crawl/{job_id}"), headers) if response.status_code == 200: crawl_status_response = response.json() if crawl_status_response.get("status") == "completed": @@ -120,6 +120,10 @@ class FirecrawlApp: def _prepare_headers(self) -> dict[str, Any]: return {"Content-Type": "application/json", "Authorization": f"Bearer {self.api_key}"} + def _build_url(self, path: str) -> str: + # ensure exactly one slash between base and path, regardless of user-provided base_url + return f"{self.base_url.rstrip('/')}/{path.lstrip('/')}" + def _post_request(self, url, data, headers, retries=3, backoff_factor=0.5) -> httpx.Response: for attempt in range(retries): response = httpx.post(url, headers=headers, json=data) @@ -139,7 +143,11 @@ class FirecrawlApp: return response def _handle_error(self, response, action): - error_message = response.json().get("error", "Unknown error occurred") + try: + payload = response.json() + error_message = payload.get("error") or payload.get("message") or response.text or "Unknown error occurred" + except json.JSONDecodeError: + error_message = response.text or "Unknown error occurred" raise Exception(f"Failed to {action}. Status code: {response.status_code}. Error: {error_message}") # type: ignore[return] def search(self, query: str, params: dict[str, Any] | None = None) -> dict[str, Any]: @@ -160,7 +168,7 @@ class FirecrawlApp: } if params: json_data.update(params) - response = self._post_request(f"{self.base_url}/v2/search", json_data, headers) + response = self._post_request(self._build_url("v2/search"), json_data, headers) if response.status_code == 200: response_data = response.json() if not response_data.get("success"): diff --git a/api/core/rag/extractor/notion_extractor.py b/api/core/rag/extractor/notion_extractor.py index e87ab38349..372af8fd94 100644 --- a/api/core/rag/extractor/notion_extractor.py +++ b/api/core/rag/extractor/notion_extractor.py @@ -48,13 +48,21 @@ class NotionExtractor(BaseExtractor): if notion_access_token: self._notion_access_token = notion_access_token else: - self._notion_access_token = self._get_access_token(tenant_id, self._credential_id) - if not self._notion_access_token: + try: + self._notion_access_token = self._get_access_token(tenant_id, self._credential_id) + except Exception as e: + logger.warning( + ( + "Failed to get Notion access token from datasource credentials: %s, " + "falling back to environment variable NOTION_INTEGRATION_TOKEN" + ), + e, + ) integration_token = dify_config.NOTION_INTEGRATION_TOKEN if integration_token is None: raise ValueError( "Must specify `integration_token` or set environment variable `NOTION_INTEGRATION_TOKEN`." - ) + ) from e self._notion_access_token = integration_token diff --git a/api/core/rag/retrieval/dataset_retrieval.py b/api/core/rag/retrieval/dataset_retrieval.py index baf879df95..2c3fc5ab75 100644 --- a/api/core/rag/retrieval/dataset_retrieval.py +++ b/api/core/rag/retrieval/dataset_retrieval.py @@ -7,7 +7,7 @@ from collections.abc import Generator, Mapping from typing import Any, Union, cast from flask import Flask, current_app -from sqlalchemy import and_, or_, select +from sqlalchemy import and_, literal, or_, select from sqlalchemy.orm import Session from core.app.app_config.entities import ( @@ -1036,7 +1036,7 @@ class DatasetRetrieval: if automatic_metadata_filters: conditions = [] for sequence, filter in enumerate(automatic_metadata_filters): - self._process_metadata_filter_func( + self.process_metadata_filter_func( sequence, filter.get("condition"), # type: ignore filter.get("metadata_name"), # type: ignore @@ -1072,7 +1072,7 @@ class DatasetRetrieval: value=expected_value, ) ) - filters = self._process_metadata_filter_func( + filters = self.process_metadata_filter_func( sequence, condition.comparison_operator, metadata_name, @@ -1168,8 +1168,9 @@ class DatasetRetrieval: return None return automatic_metadata_filters - def _process_metadata_filter_func( - self, sequence: int, condition: str, metadata_name: str, value: Any | None, filters: list + @classmethod + def process_metadata_filter_func( + cls, sequence: int, condition: str, metadata_name: str, value: Any | None, filters: list ): if value is None and condition not in ("empty", "not empty"): return filters @@ -1218,6 +1219,20 @@ class DatasetRetrieval: case "≄" | ">=": filters.append(DatasetDocument.doc_metadata[metadata_name].as_float() >= value) + case "in" | "not in": + if isinstance(value, str): + value_list = [v.strip() for v in value.split(",") if v.strip()] + elif isinstance(value, (list, tuple)): + value_list = [str(v) for v in value if v is not None] + else: + value_list = [str(value)] if value is not None else [] + + if not value_list: + # `field in []` is False, `field not in []` is True + filters.append(literal(condition == "not in")) + else: + op = json_field.in_ if condition == "in" else json_field.notin_ + filters.append(op(value_list)) case _: pass diff --git a/api/core/tools/entities/tool_entities.py b/api/core/tools/entities/tool_entities.py index 353f3a646a..583a3584f7 100644 --- a/api/core/tools/entities/tool_entities.py +++ b/api/core/tools/entities/tool_entities.py @@ -153,11 +153,11 @@ class ToolInvokeMessage(BaseModel): @classmethod def transform_variable_value(cls, values): """ - Only basic types and lists are allowed. + Only basic types, lists, and None are allowed. """ value = values.get("variable_value") - if not isinstance(value, dict | list | str | int | float | bool): - raise ValueError("Only basic types and lists are allowed.") + if value is not None and not isinstance(value, dict | list | str | int | float | bool): + raise ValueError("Only basic types, lists, and None are allowed.") # if stream is true, the value must be a string if values.get("stream"): diff --git a/api/core/tools/mcp_tool/tool.py b/api/core/tools/mcp_tool/tool.py index fbaf31ad09..96917045e3 100644 --- a/api/core/tools/mcp_tool/tool.py +++ b/api/core/tools/mcp_tool/tool.py @@ -6,7 +6,15 @@ from typing import Any from core.mcp.auth_client import MCPClientWithAuthRetry from core.mcp.error import MCPConnectionError -from core.mcp.types import AudioContent, CallToolResult, ImageContent, TextContent +from core.mcp.types import ( + AudioContent, + BlobResourceContents, + CallToolResult, + EmbeddedResource, + ImageContent, + TextContent, + TextResourceContents, +) from core.tools.__base.tool import Tool from core.tools.__base.tool_runtime import ToolRuntime from core.tools.entities.tool_entities import ToolEntity, ToolInvokeMessage, ToolProviderType @@ -53,10 +61,19 @@ class MCPTool(Tool): for content in result.content: if isinstance(content, TextContent): yield from self._process_text_content(content) - elif isinstance(content, ImageContent): - yield self._process_image_content(content) - elif isinstance(content, AudioContent): - yield self._process_audio_content(content) + elif isinstance(content, ImageContent | AudioContent): + yield self.create_blob_message( + blob=base64.b64decode(content.data), meta={"mime_type": content.mimeType} + ) + elif isinstance(content, EmbeddedResource): + resource = content.resource + if isinstance(resource, TextResourceContents): + yield self.create_text_message(resource.text) + elif isinstance(resource, BlobResourceContents): + mime_type = resource.mimeType or "application/octet-stream" + yield self.create_blob_message(blob=base64.b64decode(resource.blob), meta={"mime_type": mime_type}) + else: + raise ToolInvokeError(f"Unsupported embedded resource type: {type(resource)}") else: logger.warning("Unsupported content type=%s", type(content)) @@ -101,14 +118,6 @@ class MCPTool(Tool): for item in json_list: yield self.create_json_message(item) - def _process_image_content(self, content: ImageContent) -> ToolInvokeMessage: - """Process image content and return a blob message.""" - return self.create_blob_message(blob=base64.b64decode(content.data), meta={"mime_type": content.mimeType}) - - def _process_audio_content(self, content: AudioContent) -> ToolInvokeMessage: - """Process audio content and return a blob message.""" - return self.create_blob_message(blob=base64.b64decode(content.data), meta={"mime_type": content.mimeType}) - def fork_tool_runtime(self, runtime: ToolRuntime) -> "MCPTool": return MCPTool( entity=self.entity, diff --git a/api/core/tools/workflow_as_tool/provider.py b/api/core/tools/workflow_as_tool/provider.py index 0439fb1d60..2bd973f831 100644 --- a/api/core/tools/workflow_as_tool/provider.py +++ b/api/core/tools/workflow_as_tool/provider.py @@ -5,6 +5,7 @@ from sqlalchemy.orm import Session from core.app.app_config.entities import VariableEntity, VariableEntityType from core.app.apps.workflow.app_config_manager import WorkflowAppConfigManager +from core.db.session_factory import session_factory from core.plugin.entities.parameters import PluginParameterOption from core.tools.__base.tool_provider import ToolProviderController from core.tools.__base.tool_runtime import ToolRuntime @@ -47,33 +48,30 @@ class WorkflowToolProviderController(ToolProviderController): @classmethod def from_db(cls, db_provider: WorkflowToolProvider) -> "WorkflowToolProviderController": - with Session(db.engine, expire_on_commit=False) as session, session.begin(): - provider = session.get(WorkflowToolProvider, db_provider.id) if db_provider.id else None - if not provider: - raise ValueError("workflow provider not found") - app = session.get(App, provider.app_id) + with session_factory.create_session() as session, session.begin(): + app = session.get(App, db_provider.app_id) if not app: raise ValueError("app not found") - user = session.get(Account, provider.user_id) if provider.user_id else None + user = session.get(Account, db_provider.user_id) if db_provider.user_id else None controller = WorkflowToolProviderController( entity=ToolProviderEntity( identity=ToolProviderIdentity( author=user.name if user else "", - name=provider.label, - label=I18nObject(en_US=provider.label, zh_Hans=provider.label), - description=I18nObject(en_US=provider.description, zh_Hans=provider.description), - icon=provider.icon, + name=db_provider.label, + label=I18nObject(en_US=db_provider.label, zh_Hans=db_provider.label), + description=I18nObject(en_US=db_provider.description, zh_Hans=db_provider.description), + icon=db_provider.icon, ), credentials_schema=[], plugin_id=None, ), - provider_id=provider.id or "", + provider_id="", ) controller.tools = [ - controller._get_db_provider_tool(provider, app, session=session, user=user), + controller._get_db_provider_tool(db_provider, app, session=session, user=user), ] return controller diff --git a/api/core/trigger/utils/encryption.py b/api/core/trigger/utils/encryption.py index 026a65aa23..b12291e299 100644 --- a/api/core/trigger/utils/encryption.py +++ b/api/core/trigger/utils/encryption.py @@ -67,12 +67,16 @@ def create_trigger_provider_encrypter_for_subscription( def delete_cache_for_subscription(tenant_id: str, provider_id: str, subscription_id: str): - cache = TriggerProviderCredentialsCache( + TriggerProviderCredentialsCache( tenant_id=tenant_id, provider_id=provider_id, credential_id=subscription_id, - ) - cache.delete() + ).delete() + TriggerProviderPropertiesCache( + tenant_id=tenant_id, + provider_id=provider_id, + subscription_id=subscription_id, + ).delete() def create_trigger_provider_encrypter_for_properties( diff --git a/api/core/workflow/enums.py b/api/core/workflow/enums.py index 2d62b00ecf..e4da9a82d2 100644 --- a/api/core/workflow/enums.py +++ b/api/core/workflow/enums.py @@ -248,6 +248,7 @@ class WorkflowNodeExecutionMetadataKey(StrEnum): ERROR_STRATEGY = "error_strategy" # node in continue on error mode return the field LOOP_VARIABLE_MAP = "loop_variable_map" # single loop variable output DATASOURCE_INFO = "datasource_info" + COMPLETED_REASON = "completed_reason" # completed reason for loop node class WorkflowNodeExecutionStatus(StrEnum): diff --git a/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py b/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py index adc474bd60..8670a71aa3 100644 --- a/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py +++ b/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py @@ -6,7 +6,7 @@ from collections import defaultdict from collections.abc import Mapping, Sequence from typing import TYPE_CHECKING, Any, cast -from sqlalchemy import and_, func, literal, or_, select +from sqlalchemy import and_, func, or_, select from sqlalchemy.orm import sessionmaker from core.app.app_config.entities import DatasetRetrieveConfigEntity @@ -460,7 +460,7 @@ class KnowledgeRetrievalNode(LLMUsageTrackingMixin, Node[KnowledgeRetrievalNodeD if automatic_metadata_filters: conditions = [] for sequence, filter in enumerate(automatic_metadata_filters): - self._process_metadata_filter_func( + DatasetRetrieval.process_metadata_filter_func( sequence, filter.get("condition", ""), filter.get("metadata_name", ""), @@ -504,7 +504,7 @@ class KnowledgeRetrievalNode(LLMUsageTrackingMixin, Node[KnowledgeRetrievalNodeD value=expected_value, ) ) - filters = self._process_metadata_filter_func( + filters = DatasetRetrieval.process_metadata_filter_func( sequence, condition.comparison_operator, metadata_name, @@ -603,87 +603,6 @@ class KnowledgeRetrievalNode(LLMUsageTrackingMixin, Node[KnowledgeRetrievalNodeD return [], usage return automatic_metadata_filters, usage - def _process_metadata_filter_func( - self, sequence: int, condition: str, metadata_name: str, value: Any, filters: list[Any] - ) -> list[Any]: - if value is None and condition not in ("empty", "not empty"): - return filters - - json_field = Document.doc_metadata[metadata_name].as_string() - - match condition: - case "contains": - filters.append(json_field.like(f"%{value}%")) - - case "not contains": - filters.append(json_field.notlike(f"%{value}%")) - - case "start with": - filters.append(json_field.like(f"{value}%")) - - case "end with": - filters.append(json_field.like(f"%{value}")) - case "in": - if isinstance(value, str): - value_list = [v.strip() for v in value.split(",") if v.strip()] - elif isinstance(value, (list, tuple)): - value_list = [str(v) for v in value if v is not None] - else: - value_list = [str(value)] if value is not None else [] - - if not value_list: - filters.append(literal(False)) - else: - filters.append(json_field.in_(value_list)) - - case "not in": - if isinstance(value, str): - value_list = [v.strip() for v in value.split(",") if v.strip()] - elif isinstance(value, (list, tuple)): - value_list = [str(v) for v in value if v is not None] - else: - value_list = [str(value)] if value is not None else [] - - if not value_list: - filters.append(literal(True)) - else: - filters.append(json_field.notin_(value_list)) - - case "is" | "=": - if isinstance(value, str): - filters.append(json_field == value) - elif isinstance(value, (int, float)): - filters.append(Document.doc_metadata[metadata_name].as_float() == value) - - case "is not" | "≠": - if isinstance(value, str): - filters.append(json_field != value) - elif isinstance(value, (int, float)): - filters.append(Document.doc_metadata[metadata_name].as_float() != value) - - case "empty": - filters.append(Document.doc_metadata[metadata_name].is_(None)) - - case "not empty": - filters.append(Document.doc_metadata[metadata_name].isnot(None)) - - case "before" | "<": - filters.append(Document.doc_metadata[metadata_name].as_float() < value) - - case "after" | ">": - filters.append(Document.doc_metadata[metadata_name].as_float() > value) - - case "≤" | "<=": - filters.append(Document.doc_metadata[metadata_name].as_float() <= value) - - case "≄" | ">=": - filters.append(Document.doc_metadata[metadata_name].as_float() >= value) - - case _: - pass - - return filters - @classmethod def _extract_variable_selector_to_variable_mapping( cls, diff --git a/api/core/workflow/nodes/loop/entities.py b/api/core/workflow/nodes/loop/entities.py index 4fcad888e4..92a8702fc3 100644 --- a/api/core/workflow/nodes/loop/entities.py +++ b/api/core/workflow/nodes/loop/entities.py @@ -1,3 +1,4 @@ +from enum import StrEnum from typing import Annotated, Any, Literal from pydantic import AfterValidator, BaseModel, Field, field_validator @@ -96,3 +97,8 @@ class LoopState(BaseLoopState): Get current output. """ return self.current_output + + +class LoopCompletedReason(StrEnum): + LOOP_BREAK = "loop_break" + LOOP_COMPLETED = "loop_completed" diff --git a/api/core/workflow/nodes/loop/loop_node.py b/api/core/workflow/nodes/loop/loop_node.py index 1c26bbc2d0..1f9fc8a115 100644 --- a/api/core/workflow/nodes/loop/loop_node.py +++ b/api/core/workflow/nodes/loop/loop_node.py @@ -29,7 +29,7 @@ from core.workflow.node_events import ( ) from core.workflow.nodes.base import LLMUsageTrackingMixin from core.workflow.nodes.base.node import Node -from core.workflow.nodes.loop.entities import LoopNodeData, LoopVariableData +from core.workflow.nodes.loop.entities import LoopCompletedReason, LoopNodeData, LoopVariableData from core.workflow.utils.condition.processor import ConditionProcessor from factories.variable_factory import TypeMismatchError, build_segment_with_type, segment_to_variable from libs.datetime_utils import naive_utc_now @@ -96,6 +96,7 @@ class LoopNode(LLMUsageTrackingMixin, Node[LoopNodeData]): loop_duration_map: dict[str, float] = {} single_loop_variable_map: dict[str, dict[str, Any]] = {} # single loop variable output loop_usage = LLMUsage.empty_usage() + loop_node_ids = self._extract_loop_node_ids_from_config(self.graph_config, self._node_id) # Start Loop event yield LoopStartedEvent( @@ -118,6 +119,8 @@ class LoopNode(LLMUsageTrackingMixin, Node[LoopNodeData]): loop_count = 0 for i in range(loop_count): + # Clear stale variables from previous loop iterations to avoid streaming old values + self._clear_loop_subgraph_variables(loop_node_ids) graph_engine = self._create_graph_engine(start_at=start_at, root_node_id=root_node_id) loop_start_time = naive_utc_now() @@ -177,7 +180,11 @@ class LoopNode(LLMUsageTrackingMixin, Node[LoopNodeData]): WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: loop_usage.total_tokens, WorkflowNodeExecutionMetadataKey.TOTAL_PRICE: loop_usage.total_price, WorkflowNodeExecutionMetadataKey.CURRENCY: loop_usage.currency, - "completed_reason": "loop_break" if reach_break_condition else "loop_completed", + WorkflowNodeExecutionMetadataKey.COMPLETED_REASON: ( + LoopCompletedReason.LOOP_BREAK + if reach_break_condition + else LoopCompletedReason.LOOP_COMPLETED.value + ), WorkflowNodeExecutionMetadataKey.LOOP_DURATION_MAP: loop_duration_map, WorkflowNodeExecutionMetadataKey.LOOP_VARIABLE_MAP: single_loop_variable_map, }, @@ -274,6 +281,17 @@ class LoopNode(LLMUsageTrackingMixin, Node[LoopNodeData]): if WorkflowNodeExecutionMetadataKey.LOOP_ID not in current_metadata: event.node_run_result.metadata = {**current_metadata, **loop_metadata} + def _clear_loop_subgraph_variables(self, loop_node_ids: set[str]) -> None: + """ + Remove variables produced by loop sub-graph nodes from previous iterations. + + Keeping stale variables causes a freshly created response coordinator in the + next iteration to fall back to outdated values when no stream chunks exist. + """ + variable_pool = self.graph_runtime_state.variable_pool + for node_id in loop_node_ids: + variable_pool.remove([node_id]) + @classmethod def _extract_variable_selector_to_variable_mapping( cls, diff --git a/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py b/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py index 93db417b15..08e0542d61 100644 --- a/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py +++ b/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py @@ -281,7 +281,7 @@ class ParameterExtractorNode(Node[ParameterExtractorNodeData]): # handle invoke result - text = invoke_result.message.content or "" + text = invoke_result.message.get_text_content() if not isinstance(text, str): raise InvalidTextContentTypeError(f"Invalid text content type: {type(text)}. Expected str.") diff --git a/api/extensions/storage/tencent_cos_storage.py b/api/extensions/storage/tencent_cos_storage.py index ea5d982efc..cf092c6973 100644 --- a/api/extensions/storage/tencent_cos_storage.py +++ b/api/extensions/storage/tencent_cos_storage.py @@ -13,12 +13,20 @@ class TencentCosStorage(BaseStorage): super().__init__() self.bucket_name = dify_config.TENCENT_COS_BUCKET_NAME - config = CosConfig( - Region=dify_config.TENCENT_COS_REGION, - SecretId=dify_config.TENCENT_COS_SECRET_ID, - SecretKey=dify_config.TENCENT_COS_SECRET_KEY, - Scheme=dify_config.TENCENT_COS_SCHEME, - ) + if dify_config.TENCENT_COS_CUSTOM_DOMAIN: + config = CosConfig( + Domain=dify_config.TENCENT_COS_CUSTOM_DOMAIN, + SecretId=dify_config.TENCENT_COS_SECRET_ID, + SecretKey=dify_config.TENCENT_COS_SECRET_KEY, + Scheme=dify_config.TENCENT_COS_SCHEME, + ) + else: + config = CosConfig( + Region=dify_config.TENCENT_COS_REGION, + SecretId=dify_config.TENCENT_COS_SECRET_ID, + SecretKey=dify_config.TENCENT_COS_SECRET_KEY, + Scheme=dify_config.TENCENT_COS_SCHEME, + ) self.client = CosS3Client(config) def save(self, filename, data): diff --git a/api/pyproject.toml b/api/pyproject.toml index 6716603dd4..dbc6a2eb83 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "dify-api" -version = "1.11.1" +version = "1.11.2" requires-python = ">=3.11,<3.13" dependencies = [ diff --git a/api/services/app_generate_service.py b/api/services/app_generate_service.py index 4514c86f7c..cc58899dc4 100644 --- a/api/services/app_generate_service.py +++ b/api/services/app_generate_service.py @@ -14,7 +14,8 @@ from enums.quota_type import QuotaType, unlimited from extensions.otel import AppGenerateHandler, trace_span from models.model import Account, App, AppMode, EndUser from models.workflow import Workflow -from services.errors.app import InvokeRateLimitError, QuotaExceededError, WorkflowIdFormatError, WorkflowNotFoundError +from services.errors.app import QuotaExceededError, WorkflowIdFormatError, WorkflowNotFoundError +from services.errors.llm import InvokeRateLimitError from services.workflow_service import WorkflowService diff --git a/api/services/async_workflow_service.py b/api/services/async_workflow_service.py index e100582511..bc73b7c8c2 100644 --- a/api/services/async_workflow_service.py +++ b/api/services/async_workflow_service.py @@ -21,7 +21,7 @@ from models.model import App, EndUser from models.trigger import WorkflowTriggerLog from models.workflow import Workflow from repositories.sqlalchemy_workflow_trigger_log_repository import SQLAlchemyWorkflowTriggerLogRepository -from services.errors.app import InvokeRateLimitError, QuotaExceededError, WorkflowNotFoundError +from services.errors.app import QuotaExceededError, WorkflowNotFoundError, WorkflowQuotaLimitError from services.workflow.entities import AsyncTriggerResponse, TriggerData, WorkflowTaskData from services.workflow.queue_dispatcher import QueueDispatcherManager, QueuePriority from services.workflow_service import WorkflowService @@ -141,7 +141,7 @@ class AsyncWorkflowService: trigger_log_repo.update(trigger_log) session.commit() - raise InvokeRateLimitError( + raise WorkflowQuotaLimitError( f"Workflow execution quota limit reached for tenant {trigger_data.tenant_id}" ) from e diff --git a/api/services/auth/firecrawl/firecrawl.py b/api/services/auth/firecrawl/firecrawl.py index d455475bfc..b002706931 100644 --- a/api/services/auth/firecrawl/firecrawl.py +++ b/api/services/auth/firecrawl/firecrawl.py @@ -26,7 +26,7 @@ class FirecrawlAuth(ApiKeyAuthBase): "limit": 1, "scrapeOptions": {"onlyMainContent": True}, } - response = self._post_request(f"{self.base_url}/v1/crawl", options, headers) + response = self._post_request(self._build_url("v1/crawl"), options, headers) if response.status_code == 200: return True else: @@ -35,15 +35,17 @@ class FirecrawlAuth(ApiKeyAuthBase): def _prepare_headers(self): return {"Content-Type": "application/json", "Authorization": f"Bearer {self.api_key}"} + def _build_url(self, path: str) -> str: + # ensure exactly one slash between base and path, regardless of user-provided base_url + return f"{self.base_url.rstrip('/')}/{path.lstrip('/')}" + def _post_request(self, url, data, headers): return httpx.post(url, headers=headers, json=data) def _handle_error(self, response): - if response.status_code in {402, 409, 500}: - error_message = response.json().get("error", "Unknown error occurred") - raise Exception(f"Failed to authorize. Status code: {response.status_code}. Error: {error_message}") - else: - if response.text: - error_message = json.loads(response.text).get("error", "Unknown error occurred") - raise Exception(f"Failed to authorize. Status code: {response.status_code}. Error: {error_message}") - raise Exception(f"Unexpected error occurred while trying to authorize. Status code: {response.status_code}") + try: + payload = response.json() + except json.JSONDecodeError: + payload = {} + error_message = payload.get("error") or payload.get("message") or (response.text or "Unknown error occurred") + raise Exception(f"Failed to authorize. Status code: {response.status_code}. Error: {error_message}") diff --git a/api/services/dataset_service.py b/api/services/dataset_service.py index 970192fde5..ac4b25c5dc 100644 --- a/api/services/dataset_service.py +++ b/api/services/dataset_service.py @@ -3458,7 +3458,7 @@ class SegmentService: if keyword: query = query.where(DocumentSegment.content.ilike(f"%{keyword}%")) - query = query.order_by(DocumentSegment.position.asc()) + query = query.order_by(DocumentSegment.position.asc(), DocumentSegment.id.asc()) paginated_segments = db.paginate(select=query, page=page, per_page=limit, max_per_page=100, error_out=False) return paginated_segments.items, paginated_segments.total diff --git a/api/services/enterprise/enterprise_service.py b/api/services/enterprise/enterprise_service.py index 83d0fcf296..c0cc0e5233 100644 --- a/api/services/enterprise/enterprise_service.py +++ b/api/services/enterprise/enterprise_service.py @@ -110,5 +110,5 @@ class EnterpriseService: if not app_id: raise ValueError("app_id must be provided.") - body = {"appId": app_id} - EnterpriseRequest.send_request("DELETE", "/webapp/clean", json=body) + params = {"appId": app_id} + EnterpriseRequest.send_request("DELETE", "/webapp/clean", params=params) diff --git a/api/services/errors/app.py b/api/services/errors/app.py index 24e4760acc..60e59e97dc 100644 --- a/api/services/errors/app.py +++ b/api/services/errors/app.py @@ -18,8 +18,8 @@ class WorkflowIdFormatError(Exception): pass -class InvokeRateLimitError(Exception): - """Raised when rate limit is exceeded for workflow invocations.""" +class WorkflowQuotaLimitError(Exception): + """Raised when workflow execution quota is exceeded (for async/background workflows).""" pass diff --git a/api/services/plugin/plugin_parameter_service.py b/api/services/plugin/plugin_parameter_service.py index c517d9f966..40565c56ed 100644 --- a/api/services/plugin/plugin_parameter_service.py +++ b/api/services/plugin/plugin_parameter_service.py @@ -105,3 +105,49 @@ class PluginParameterService: ) .options ) + + @staticmethod + def get_dynamic_select_options_with_credentials( + tenant_id: str, + user_id: str, + plugin_id: str, + provider: str, + action: str, + parameter: str, + credential_id: str, + credentials: Mapping[str, Any], + ) -> Sequence[PluginParameterOption]: + """ + Get dynamic select options using provided credentials directly. + Used for edit mode when credentials have been modified but not yet saved. + + Security: credential_id is validated against tenant_id to ensure + users can only access their own credentials. + """ + from constants import HIDDEN_VALUE + + # Get original subscription to replace hidden values (with tenant_id check for security) + original_subscription = TriggerProviderService.get_subscription_by_id(tenant_id, credential_id) + if not original_subscription: + raise ValueError(f"Subscription {credential_id} not found") + + # Replace [__HIDDEN__] with original values + resolved_credentials: dict[str, Any] = { + key: (original_subscription.credentials.get(key) if value == HIDDEN_VALUE else value) + for key, value in credentials.items() + } + + return ( + DynamicSelectClient() + .fetch_dynamic_select_options( + tenant_id, + user_id, + plugin_id, + provider, + action, + resolved_credentials, + original_subscription.credential_type or CredentialType.UNAUTHORIZED.value, + parameter, + ) + .options + ) diff --git a/api/services/tools/api_tools_manage_service.py b/api/services/tools/api_tools_manage_service.py index b3b6e36346..250d29f335 100644 --- a/api/services/tools/api_tools_manage_service.py +++ b/api/services/tools/api_tools_manage_service.py @@ -7,7 +7,6 @@ from httpx import get from sqlalchemy import select from core.entities.provider_entities import ProviderConfig -from core.helper.tool_provider_cache import ToolProviderListCache from core.model_runtime.utils.encoders import jsonable_encoder from core.tools.__base.tool_runtime import ToolRuntime from core.tools.custom_tool.provider import ApiToolProviderController @@ -178,9 +177,6 @@ class ApiToolManageService: # update labels ToolLabelManager.update_tool_labels(provider_controller, labels) - # Invalidate tool providers cache - ToolProviderListCache.invalidate_cache(tenant_id) - return {"result": "success"} @staticmethod @@ -322,9 +318,6 @@ class ApiToolManageService: # update labels ToolLabelManager.update_tool_labels(provider_controller, labels) - # Invalidate tool providers cache - ToolProviderListCache.invalidate_cache(tenant_id) - return {"result": "success"} @staticmethod @@ -347,9 +340,6 @@ class ApiToolManageService: db.session.delete(provider) db.session.commit() - # Invalidate tool providers cache - ToolProviderListCache.invalidate_cache(tenant_id) - return {"result": "success"} @staticmethod diff --git a/api/services/tools/builtin_tools_manage_service.py b/api/services/tools/builtin_tools_manage_service.py index cf1d39fa25..6797a67dde 100644 --- a/api/services/tools/builtin_tools_manage_service.py +++ b/api/services/tools/builtin_tools_manage_service.py @@ -12,7 +12,6 @@ from constants import HIDDEN_VALUE, UNKNOWN_VALUE from core.helper.name_generator import generate_incremental_name from core.helper.position_helper import is_filtered from core.helper.provider_cache import NoOpProviderCredentialCache, ToolProviderCredentialsCache -from core.helper.tool_provider_cache import ToolProviderListCache from core.plugin.entities.plugin_daemon import CredentialType from core.tools.builtin_tool.provider import BuiltinToolProviderController from core.tools.builtin_tool.providers._positions import BuiltinToolProviderSort @@ -205,9 +204,6 @@ class BuiltinToolManageService: db_provider.name = name session.commit() - - # Invalidate tool providers cache - ToolProviderListCache.invalidate_cache(tenant_id) except Exception as e: session.rollback() raise ValueError(str(e)) @@ -286,12 +282,10 @@ class BuiltinToolManageService: session.add(db_provider) session.commit() - - # Invalidate tool providers cache - ToolProviderListCache.invalidate_cache(tenant_id) except Exception as e: session.rollback() raise ValueError(str(e)) + return {"result": "success"} @staticmethod @@ -409,9 +403,6 @@ class BuiltinToolManageService: ) cache.delete() - # Invalidate tool providers cache - ToolProviderListCache.invalidate_cache(tenant_id) - return {"result": "success"} @staticmethod @@ -434,8 +425,6 @@ class BuiltinToolManageService: target_provider.is_default = True session.commit() - # Invalidate tool providers cache - ToolProviderListCache.invalidate_cache(tenant_id) return {"result": "success"} @staticmethod diff --git a/api/services/tools/mcp_tools_manage_service.py b/api/services/tools/mcp_tools_manage_service.py index 252be77b27..0be106f597 100644 --- a/api/services/tools/mcp_tools_manage_service.py +++ b/api/services/tools/mcp_tools_manage_service.py @@ -319,8 +319,14 @@ class MCPToolManageService: except MCPError as e: raise ValueError(f"Failed to connect to MCP server: {e}") - # Update database with retrieved tools - db_provider.tools = json.dumps([tool.model_dump() for tool in tools]) + # Update database with retrieved tools (ensure description is a non-null string) + tools_payload = [] + for tool in tools: + data = tool.model_dump() + if data.get("description") is None: + data["description"] = "" + tools_payload.append(data) + db_provider.tools = json.dumps(tools_payload) db_provider.authed = True db_provider.updated_at = datetime.now() self._session.flush() @@ -620,6 +626,21 @@ class MCPToolManageService: server_url_hash=new_server_url_hash, ) + @staticmethod + def reconnect_with_url( + *, + server_url: str, + headers: dict[str, str], + timeout: float | None, + sse_read_timeout: float | None, + ) -> ReconnectResult: + return MCPToolManageService._reconnect_with_url( + server_url=server_url, + headers=headers, + timeout=timeout, + sse_read_timeout=sse_read_timeout, + ) + @staticmethod def _reconnect_with_url( *, @@ -642,9 +663,16 @@ class MCPToolManageService: sse_read_timeout=sse_read_timeout, ) as mcp_client: tools = mcp_client.list_tools() + # Ensure tool descriptions are non-null in payload + tools_payload = [] + for t in tools: + d = t.model_dump() + if d.get("description") is None: + d["description"] = "" + tools_payload.append(d) return ReconnectResult( authed=True, - tools=json.dumps([tool.model_dump() for tool in tools]), + tools=json.dumps(tools_payload), encrypted_credentials=EMPTY_CREDENTIALS_JSON, ) except MCPAuthError: diff --git a/api/services/tools/tools_manage_service.py b/api/services/tools/tools_manage_service.py index 038c462f15..51e9120b8d 100644 --- a/api/services/tools/tools_manage_service.py +++ b/api/services/tools/tools_manage_service.py @@ -1,6 +1,5 @@ import logging -from core.helper.tool_provider_cache import ToolProviderListCache from core.tools.entities.api_entities import ToolProviderTypeApiLiteral from core.tools.tool_manager import ToolManager from services.tools.tools_transform_service import ToolTransformService @@ -16,14 +15,6 @@ class ToolCommonService: :return: the list of tool providers """ - # Try to get from cache first - cached_result = ToolProviderListCache.get_cached_providers(tenant_id, typ) - if cached_result is not None: - logger.debug("Returning cached tool providers for tenant %s, type %s", tenant_id, typ) - return cached_result - - # Cache miss - fetch from database - logger.debug("Cache miss for tool providers, fetching from database for tenant %s, type %s", tenant_id, typ) providers = ToolManager.list_providers_from_api(user_id, tenant_id, typ) # add icon @@ -32,7 +23,4 @@ class ToolCommonService: result = [provider.to_dict() for provider in providers] - # Cache the result - ToolProviderListCache.set_cached_providers(tenant_id, typ, result) - return result diff --git a/api/services/tools/workflow_tools_manage_service.py b/api/services/tools/workflow_tools_manage_service.py index fe77ff2dc5..ab5d5480df 100644 --- a/api/services/tools/workflow_tools_manage_service.py +++ b/api/services/tools/workflow_tools_manage_service.py @@ -7,7 +7,6 @@ from typing import Any from sqlalchemy import or_, select from sqlalchemy.orm import Session -from core.helper.tool_provider_cache import ToolProviderListCache from core.model_runtime.utils.encoders import jsonable_encoder from core.tools.__base.tool_provider import ToolProviderController from core.tools.entities.api_entities import ToolApiEntity, ToolProviderApiEntity @@ -68,34 +67,31 @@ class WorkflowToolManageService: if workflow is None: raise ValueError(f"Workflow not found for app {workflow_app_id}") - with Session(db.engine, expire_on_commit=False) as session, session.begin(): - workflow_tool_provider = WorkflowToolProvider( - tenant_id=tenant_id, - user_id=user_id, - app_id=workflow_app_id, - name=name, - label=label, - icon=json.dumps(icon), - description=description, - parameter_configuration=json.dumps(parameters), - privacy_policy=privacy_policy, - version=workflow.version, - ) - session.add(workflow_tool_provider) + workflow_tool_provider = WorkflowToolProvider( + tenant_id=tenant_id, + user_id=user_id, + app_id=workflow_app_id, + name=name, + label=label, + icon=json.dumps(icon), + description=description, + parameter_configuration=json.dumps(parameters), + privacy_policy=privacy_policy, + version=workflow.version, + ) try: WorkflowToolProviderController.from_db(workflow_tool_provider) except Exception as e: raise ValueError(str(e)) + with Session(db.engine, expire_on_commit=False) as session, session.begin(): + session.add(workflow_tool_provider) + if labels is not None: ToolLabelManager.update_tool_labels( ToolTransformService.workflow_provider_to_controller(workflow_tool_provider), labels ) - - # Invalidate tool providers cache - ToolProviderListCache.invalidate_cache(tenant_id) - return {"result": "success"} @classmethod @@ -183,9 +179,6 @@ class WorkflowToolManageService: ToolTransformService.workflow_provider_to_controller(workflow_tool_provider), labels ) - # Invalidate tool providers cache - ToolProviderListCache.invalidate_cache(tenant_id) - return {"result": "success"} @classmethod @@ -248,9 +241,6 @@ class WorkflowToolManageService: db.session.commit() - # Invalidate tool providers cache - ToolProviderListCache.invalidate_cache(tenant_id) - return {"result": "success"} @classmethod diff --git a/api/services/trigger/trigger_provider_service.py b/api/services/trigger/trigger_provider_service.py index 668e4c5be2..ef77c33c1b 100644 --- a/api/services/trigger/trigger_provider_service.py +++ b/api/services/trigger/trigger_provider_service.py @@ -94,16 +94,23 @@ class TriggerProviderService: provider_controller = TriggerManager.get_trigger_provider(tenant_id, provider_id) for subscription in subscriptions: - encrypter, _ = create_trigger_provider_encrypter_for_subscription( + credential_encrypter, _ = create_trigger_provider_encrypter_for_subscription( tenant_id=tenant_id, controller=provider_controller, subscription=subscription, ) subscription.credentials = dict( - encrypter.mask_credentials(dict(encrypter.decrypt(subscription.credentials))) + credential_encrypter.mask_credentials(dict(credential_encrypter.decrypt(subscription.credentials))) ) - subscription.properties = dict(encrypter.mask_credentials(dict(encrypter.decrypt(subscription.properties)))) - subscription.parameters = dict(encrypter.mask_credentials(dict(encrypter.decrypt(subscription.parameters)))) + properties_encrypter, _ = create_trigger_provider_encrypter_for_properties( + tenant_id=tenant_id, + controller=provider_controller, + subscription=subscription, + ) + subscription.properties = dict( + properties_encrypter.mask_credentials(dict(properties_encrypter.decrypt(subscription.properties))) + ) + subscription.parameters = dict(subscription.parameters) count = workflows_in_use_map.get(subscription.id) subscription.workflows_in_use = count if count is not None else 0 @@ -209,6 +216,101 @@ class TriggerProviderService: logger.exception("Failed to add trigger provider") raise ValueError(str(e)) + @classmethod + def update_trigger_subscription( + cls, + tenant_id: str, + subscription_id: str, + name: str | None = None, + properties: Mapping[str, Any] | None = None, + parameters: Mapping[str, Any] | None = None, + credentials: Mapping[str, Any] | None = None, + credential_expires_at: int | None = None, + expires_at: int | None = None, + ) -> None: + """ + Update an existing trigger subscription. + + :param tenant_id: Tenant ID + :param subscription_id: Subscription instance ID + :param name: Optional new name for this subscription + :param properties: Optional new properties + :param parameters: Optional new parameters + :param credentials: Optional new credentials + :param credential_expires_at: Optional new credential expiration timestamp + :param expires_at: Optional new expiration timestamp + :return: Success response with updated subscription info + """ + with Session(db.engine, expire_on_commit=False) as session: + # Use distributed lock to prevent race conditions on the same subscription + lock_key = f"trigger_subscription_update_lock:{tenant_id}_{subscription_id}" + with redis_client.lock(lock_key, timeout=20): + subscription: TriggerSubscription | None = ( + session.query(TriggerSubscription).filter_by(tenant_id=tenant_id, id=subscription_id).first() + ) + if not subscription: + raise ValueError(f"Trigger subscription {subscription_id} not found") + + provider_id = TriggerProviderID(subscription.provider_id) + provider_controller = TriggerManager.get_trigger_provider(tenant_id, provider_id) + + # Check for name uniqueness if name is being updated + if name is not None and name != subscription.name: + existing = ( + session.query(TriggerSubscription) + .filter_by(tenant_id=tenant_id, provider_id=str(provider_id), name=name) + .first() + ) + if existing: + raise ValueError(f"Subscription name '{name}' already exists for this provider") + subscription.name = name + + # Update properties if provided + if properties is not None: + properties_encrypter, _ = create_provider_encrypter( + tenant_id=tenant_id, + config=provider_controller.get_properties_schema(), + cache=NoOpProviderCredentialCache(), + ) + # Handle hidden values - preserve original encrypted values + original_properties = properties_encrypter.decrypt(subscription.properties) + new_properties: dict[str, Any] = { + key: value if value != HIDDEN_VALUE else original_properties.get(key, UNKNOWN_VALUE) + for key, value in properties.items() + } + subscription.properties = dict(properties_encrypter.encrypt(new_properties)) + + # Update parameters if provided + if parameters is not None: + subscription.parameters = dict(parameters) + + # Update credentials if provided + if credentials is not None: + credential_type = CredentialType.of(subscription.credential_type) + credential_encrypter, _ = create_provider_encrypter( + tenant_id=tenant_id, + config=provider_controller.get_credential_schema_config(credential_type), + cache=NoOpProviderCredentialCache(), + ) + subscription.credentials = dict(credential_encrypter.encrypt(dict(credentials))) + + # Update credential expiration timestamp if provided + if credential_expires_at is not None: + subscription.credential_expires_at = credential_expires_at + + # Update expiration timestamp if provided + if expires_at is not None: + subscription.expires_at = expires_at + + session.commit() + + # Clear subscription cache + delete_cache_for_subscription( + tenant_id=tenant_id, + provider_id=subscription.provider_id, + subscription_id=subscription.id, + ) + @classmethod def get_subscription_by_id(cls, tenant_id: str, subscription_id: str | None = None) -> TriggerSubscription | None: """ @@ -257,17 +359,18 @@ class TriggerProviderService: raise ValueError(f"Trigger provider subscription {subscription_id} not found") credential_type: CredentialType = CredentialType.of(subscription.credential_type) + provider_id = TriggerProviderID(subscription.provider_id) + provider_controller: PluginTriggerProviderController = TriggerManager.get_trigger_provider( + tenant_id=tenant_id, provider_id=provider_id + ) + encrypter, _ = create_trigger_provider_encrypter_for_subscription( + tenant_id=tenant_id, + controller=provider_controller, + subscription=subscription, + ) + is_auto_created: bool = credential_type in [CredentialType.OAUTH2, CredentialType.API_KEY] if is_auto_created: - provider_id = TriggerProviderID(subscription.provider_id) - provider_controller: PluginTriggerProviderController = TriggerManager.get_trigger_provider( - tenant_id=tenant_id, provider_id=provider_id - ) - encrypter, _ = create_trigger_provider_encrypter_for_subscription( - tenant_id=tenant_id, - controller=provider_controller, - subscription=subscription, - ) try: TriggerManager.unsubscribe_trigger( tenant_id=tenant_id, @@ -280,8 +383,8 @@ class TriggerProviderService: except Exception as e: logger.exception("Error unsubscribing trigger", exc_info=e) - # Clear cache session.delete(subscription) + # Clear cache delete_cache_for_subscription( tenant_id=tenant_id, provider_id=subscription.provider_id, @@ -688,3 +791,188 @@ class TriggerProviderService: ) subscription.properties = dict(properties_encrypter.decrypt(subscription.properties)) return subscription + + @classmethod + def verify_subscription_credentials( + cls, + tenant_id: str, + user_id: str, + provider_id: TriggerProviderID, + subscription_id: str, + credentials: Mapping[str, Any], + ) -> dict[str, Any]: + """ + Verify credentials for an existing subscription without updating it. + + This is used in edit mode to validate new credentials before rebuild. + + :param tenant_id: Tenant ID + :param user_id: User ID + :param provider_id: Provider identifier + :param subscription_id: Subscription ID + :param credentials: New credentials to verify + :return: dict with 'verified' boolean + """ + provider_controller = TriggerManager.get_trigger_provider(tenant_id, provider_id) + if not provider_controller: + raise ValueError(f"Provider {provider_id} not found") + + subscription = cls.get_subscription_by_id( + tenant_id=tenant_id, + subscription_id=subscription_id, + ) + if not subscription: + raise ValueError(f"Subscription {subscription_id} not found") + + credential_type = CredentialType.of(subscription.credential_type) + + # For API Key, validate the new credentials + if credential_type == CredentialType.API_KEY: + new_credentials: dict[str, Any] = { + key: value if value != HIDDEN_VALUE else subscription.credentials.get(key, UNKNOWN_VALUE) + for key, value in credentials.items() + } + try: + provider_controller.validate_credentials(user_id, credentials=new_credentials) + return {"verified": True} + except Exception as e: + raise ValueError(f"Invalid credentials: {e}") from e + + return {"verified": True} + + @classmethod + def rebuild_trigger_subscription( + cls, + tenant_id: str, + provider_id: TriggerProviderID, + subscription_id: str, + credentials: Mapping[str, Any], + parameters: Mapping[str, Any], + name: str | None = None, + ) -> None: + """ + Create a subscription builder for rebuilding an existing subscription. + + This method creates a builder pre-filled with data from the rebuild request, + keeping the same subscription_id and endpoint_id so the webhook URL remains unchanged. + + :param tenant_id: Tenant ID + :param name: Name for the subscription + :param subscription_id: Subscription ID + :param provider_id: Provider identifier + :param credentials: Credentials for the subscription + :param parameters: Parameters for the subscription + :return: SubscriptionBuilderApiEntity + """ + provider_controller = TriggerManager.get_trigger_provider(tenant_id, provider_id) + if not provider_controller: + raise ValueError(f"Provider {provider_id} not found") + + # Use distributed lock to prevent race conditions on the same subscription + lock_key = f"trigger_subscription_rebuild_lock:{tenant_id}_{subscription_id}" + with redis_client.lock(lock_key, timeout=20): + with Session(db.engine, expire_on_commit=False) as session: + try: + # Get subscription within the transaction + subscription: TriggerSubscription | None = ( + session.query(TriggerSubscription).filter_by(tenant_id=tenant_id, id=subscription_id).first() + ) + if not subscription: + raise ValueError(f"Subscription {subscription_id} not found") + + credential_type = CredentialType.of(subscription.credential_type) + if credential_type not in [CredentialType.OAUTH2, CredentialType.API_KEY]: + raise ValueError("Credential type not supported for rebuild") + + # Decrypt existing credentials for merging + credential_encrypter, _ = create_trigger_provider_encrypter_for_subscription( + tenant_id=tenant_id, + controller=provider_controller, + subscription=subscription, + ) + decrypted_credentials = dict(credential_encrypter.decrypt(subscription.credentials)) + + # Merge credentials: if caller passed HIDDEN_VALUE, retain existing decrypted value + merged_credentials: dict[str, Any] = { + key: value if value != HIDDEN_VALUE else decrypted_credentials.get(key, UNKNOWN_VALUE) + for key, value in credentials.items() + } + + user_id = subscription.user_id + + # TODO: Trying to invoke update api of the plugin trigger provider + + # FALLBACK: If the update api is not implemented, + # delete the previous subscription and create a new one + + # Unsubscribe the previous subscription (external call, but we'll handle errors) + try: + TriggerManager.unsubscribe_trigger( + tenant_id=tenant_id, + user_id=user_id, + provider_id=provider_id, + subscription=subscription.to_entity(), + credentials=decrypted_credentials, + credential_type=credential_type, + ) + except Exception as e: + logger.exception("Error unsubscribing trigger during rebuild", exc_info=e) + # Continue anyway - the subscription might already be deleted externally + + # Create a new subscription with the same subscription_id and endpoint_id (external call) + new_subscription: TriggerSubscriptionEntity = TriggerManager.subscribe_trigger( + tenant_id=tenant_id, + user_id=user_id, + provider_id=provider_id, + endpoint=generate_plugin_trigger_endpoint_url(subscription.endpoint_id), + parameters=parameters, + credentials=merged_credentials, + credential_type=credential_type, + ) + + # Update the subscription in the same transaction + # Inline update logic to reuse the same session + if name is not None and name != subscription.name: + existing = ( + session.query(TriggerSubscription) + .filter_by(tenant_id=tenant_id, provider_id=str(provider_id), name=name) + .first() + ) + if existing and existing.id != subscription.id: + raise ValueError(f"Subscription name '{name}' already exists for this provider") + subscription.name = name + + # Update parameters + subscription.parameters = dict(parameters) + + # Update credentials with merged (and encrypted) values + subscription.credentials = dict(credential_encrypter.encrypt(merged_credentials)) + + # Update properties + if new_subscription.properties: + properties_encrypter, _ = create_provider_encrypter( + tenant_id=tenant_id, + config=provider_controller.get_properties_schema(), + cache=NoOpProviderCredentialCache(), + ) + subscription.properties = dict(properties_encrypter.encrypt(dict(new_subscription.properties))) + + # Update expiration timestamp + if new_subscription.expires_at is not None: + subscription.expires_at = new_subscription.expires_at + + # Commit the transaction + session.commit() + + # Clear subscription cache + delete_cache_for_subscription( + tenant_id=tenant_id, + provider_id=subscription.provider_id, + subscription_id=subscription.id, + ) + + except Exception as e: + # Rollback on any error + session.rollback() + logger.exception("Failed to rebuild trigger subscription", exc_info=e) + raise diff --git a/api/services/trigger/trigger_subscription_builder_service.py b/api/services/trigger/trigger_subscription_builder_service.py index 571393c782..37f852da3e 100644 --- a/api/services/trigger/trigger_subscription_builder_service.py +++ b/api/services/trigger/trigger_subscription_builder_service.py @@ -453,11 +453,12 @@ class TriggerSubscriptionBuilderService: if not subscription_builder: return None - # response to validation endpoint - controller: PluginTriggerProviderController = TriggerManager.get_trigger_provider( - tenant_id=subscription_builder.tenant_id, provider_id=TriggerProviderID(subscription_builder.provider_id) - ) try: + # response to validation endpoint + controller: PluginTriggerProviderController = TriggerManager.get_trigger_provider( + tenant_id=subscription_builder.tenant_id, + provider_id=TriggerProviderID(subscription_builder.provider_id), + ) dispatch_response: TriggerDispatchResponse = controller.dispatch( request=request, subscription=subscription_builder.to_subscription(), diff --git a/api/services/trigger/webhook_service.py b/api/services/trigger/webhook_service.py index 5c4607d400..4159f5f8f4 100644 --- a/api/services/trigger/webhook_service.py +++ b/api/services/trigger/webhook_service.py @@ -863,10 +863,18 @@ class WebhookService: not_found_in_cache.append(node_id) continue - with Session(db.engine) as session: - try: - # lock the concurrent webhook trigger creation - redis_client.lock(f"{cls.__WEBHOOK_NODE_CACHE_KEY__}:apps:{app.id}:lock", timeout=10) + lock_key = f"{cls.__WEBHOOK_NODE_CACHE_KEY__}:apps:{app.id}:lock" + lock = redis_client.lock(lock_key, timeout=10) + lock_acquired = False + + try: + # acquire the lock with blocking and timeout + lock_acquired = lock.acquire(blocking=True, blocking_timeout=10) + if not lock_acquired: + logger.warning("Failed to acquire lock for webhook sync, app %s", app.id) + raise RuntimeError("Failed to acquire lock for webhook trigger synchronization") + + with Session(db.engine) as session: # fetch the non-cached nodes from DB all_records = session.scalars( select(WorkflowWebhookTrigger).where( @@ -903,11 +911,16 @@ class WebhookService: session.delete(nodes_id_in_db[node_id]) redis_client.delete(f"{cls.__WEBHOOK_NODE_CACHE_KEY__}:{app.id}:{node_id}") session.commit() - except Exception: - logger.exception("Failed to sync webhook relationships for app %s", app.id) - raise - finally: - redis_client.delete(f"{cls.__WEBHOOK_NODE_CACHE_KEY__}:apps:{app.id}:lock") + except Exception: + logger.exception("Failed to sync webhook relationships for app %s", app.id) + raise + finally: + # release the lock only if it was acquired + if lock_acquired: + try: + lock.release() + except Exception: + logger.exception("Failed to release lock for webhook sync, app %s", app.id) @classmethod def generate_webhook_id(cls) -> str: diff --git a/api/tests/integration_tests/workflow/nodes/code_executor/test_code_jinja2.py b/api/tests/integration_tests/workflow/nodes/code_executor/test_code_jinja2.py index 94903cf796..c8eb9ec3e4 100644 --- a/api/tests/integration_tests/workflow/nodes/code_executor/test_code_jinja2.py +++ b/api/tests/integration_tests/workflow/nodes/code_executor/test_code_jinja2.py @@ -7,11 +7,14 @@ CODE_LANGUAGE = CodeLanguage.JINJA2 def test_jinja2(): + """Test basic Jinja2 template rendering.""" template = "Hello {{template}}" + # Template must be base64 encoded to match the new safe embedding approach + template_b64 = base64.b64encode(template.encode("utf-8")).decode("utf-8") inputs = base64.b64encode(b'{"template": "World"}').decode("utf-8") code = ( Jinja2TemplateTransformer.get_runner_script() - .replace(Jinja2TemplateTransformer._code_placeholder, template) + .replace(Jinja2TemplateTransformer._template_b64_placeholder, template_b64) .replace(Jinja2TemplateTransformer._inputs_placeholder, inputs) ) result = CodeExecutor.execute_code( @@ -21,6 +24,7 @@ def test_jinja2(): def test_jinja2_with_code_template(): + """Test template rendering via the high-level workflow API.""" result = CodeExecutor.execute_workflow_code_template( language=CODE_LANGUAGE, code="Hello {{template}}", inputs={"template": "World"} ) @@ -28,7 +32,64 @@ def test_jinja2_with_code_template(): def test_jinja2_get_runner_script(): + """Test that runner script contains required placeholders.""" runner_script = Jinja2TemplateTransformer.get_runner_script() - assert runner_script.count(Jinja2TemplateTransformer._code_placeholder) == 1 + assert runner_script.count(Jinja2TemplateTransformer._template_b64_placeholder) == 1 assert runner_script.count(Jinja2TemplateTransformer._inputs_placeholder) == 1 assert runner_script.count(Jinja2TemplateTransformer._result_tag) == 2 + + +def test_jinja2_template_with_special_characters(): + """ + Test that templates with special characters (quotes, newlines) render correctly. + This is a regression test for issue #26818 where textarea pre-fill values + containing special characters would break template rendering. + """ + # Template with triple quotes, single quotes, double quotes, and newlines + template = """ + + + +

Status: "{{ status }}"

+
'''code block'''
+ +""" + inputs = {"task": {"Task ID": "TASK-123", "Issues": "Line 1\nLine 2\nLine 3"}, "status": "completed"} + + result = CodeExecutor.execute_workflow_code_template(language=CODE_LANGUAGE, code=template, inputs=inputs) + + # Verify the template rendered correctly with all special characters + output = result["result"] + assert 'value="TASK-123"' in output + assert "" in output + assert 'Status: "completed"' in output + assert "'''code block'''" in output + + +def test_jinja2_template_with_html_textarea_prefill(): + """ + Specific test for HTML textarea with Jinja2 variable pre-fill. + Verifies fix for issue #26818. + """ + template = "" + notes_content = "This is a multi-line note.\nWith special chars: 'single' and \"double\" quotes." + inputs = {"notes": notes_content} + + result = CodeExecutor.execute_workflow_code_template(language=CODE_LANGUAGE, code=template, inputs=inputs) + + expected_output = f"" + assert result["result"] == expected_output + + +def test_jinja2_assemble_runner_script_encodes_template(): + """Test that assemble_runner_script properly base64 encodes the template.""" + template = "Hello {{ name }}!" + inputs = {"name": "World"} + + script = Jinja2TemplateTransformer.assemble_runner_script(template, inputs) + + # The template should be base64 encoded in the script + template_b64 = base64.b64encode(template.encode("utf-8")).decode("utf-8") + assert template_b64 in script + # The raw template should NOT appear in the script (it's encoded) + assert "Hello {{ name }}!" not in script diff --git a/api/tests/test_containers_integration_tests/services/test_trigger_provider_service.py b/api/tests/test_containers_integration_tests/services/test_trigger_provider_service.py new file mode 100644 index 0000000000..8322b9414e --- /dev/null +++ b/api/tests/test_containers_integration_tests/services/test_trigger_provider_service.py @@ -0,0 +1,682 @@ +from unittest.mock import MagicMock, patch + +import pytest +from faker import Faker + +from constants import HIDDEN_VALUE, UNKNOWN_VALUE +from core.plugin.entities.plugin_daemon import CredentialType +from core.trigger.entities.entities import Subscription as TriggerSubscriptionEntity +from extensions.ext_database import db +from models.provider_ids import TriggerProviderID +from models.trigger import TriggerSubscription +from services.trigger.trigger_provider_service import TriggerProviderService + + +class TestTriggerProviderService: + """Integration tests for TriggerProviderService using testcontainers.""" + + @pytest.fixture + def mock_external_service_dependencies(self): + """Mock setup for external service dependencies.""" + with ( + patch("services.trigger.trigger_provider_service.TriggerManager") as mock_trigger_manager, + patch("services.trigger.trigger_provider_service.redis_client") as mock_redis_client, + patch("services.trigger.trigger_provider_service.delete_cache_for_subscription") as mock_delete_cache, + patch("services.account_service.FeatureService") as mock_account_feature_service, + ): + # Setup default mock returns + mock_provider_controller = MagicMock() + mock_provider_controller.get_credential_schema_config.return_value = MagicMock() + mock_provider_controller.get_properties_schema.return_value = MagicMock() + mock_trigger_manager.get_trigger_provider.return_value = mock_provider_controller + + # Mock redis lock + mock_lock = MagicMock() + mock_lock.__enter__ = MagicMock(return_value=None) + mock_lock.__exit__ = MagicMock(return_value=None) + mock_redis_client.lock.return_value = mock_lock + + # Setup account feature service mock + mock_account_feature_service.get_system_features.return_value.is_allow_register = True + + yield { + "trigger_manager": mock_trigger_manager, + "redis_client": mock_redis_client, + "delete_cache": mock_delete_cache, + "provider_controller": mock_provider_controller, + "account_feature_service": mock_account_feature_service, + } + + def _create_test_account_and_tenant(self, db_session_with_containers, mock_external_service_dependencies): + """ + Helper method to create a test account and tenant for testing. + + Args: + db_session_with_containers: Database session from testcontainers infrastructure + mock_external_service_dependencies: Mock dependencies + + Returns: + tuple: (account, tenant) - Created account and tenant instances + """ + fake = Faker() + + from services.account_service import AccountService, TenantService + + # Setup mocks for account creation + mock_external_service_dependencies[ + "account_feature_service" + ].get_system_features.return_value.is_allow_register = True + mock_external_service_dependencies[ + "trigger_manager" + ].get_trigger_provider.return_value = mock_external_service_dependencies["provider_controller"] + + # Create account and tenant + account = AccountService.create_account( + email=fake.email(), + name=fake.name(), + interface_language="en-US", + password=fake.password(length=12), + ) + TenantService.create_owner_tenant_if_not_exist(account, name=fake.company()) + tenant = account.current_tenant + + return account, tenant + + def _create_test_subscription( + self, + db_session_with_containers, + tenant_id, + user_id, + provider_id, + credential_type, + credentials, + mock_external_service_dependencies, + ): + """ + Helper method to create a test trigger subscription. + + Args: + db_session_with_containers: Database session + tenant_id: Tenant ID + user_id: User ID + provider_id: Provider ID + credential_type: Credential type + credentials: Credentials dict + mock_external_service_dependencies: Mock dependencies + + Returns: + TriggerSubscription: Created subscription instance + """ + fake = Faker() + from core.helper.provider_cache import NoOpProviderCredentialCache + from core.helper.provider_encryption import create_provider_encrypter + + # Use mock provider controller to encrypt credentials + provider_controller = mock_external_service_dependencies["provider_controller"] + + # Create encrypter for credentials + credential_encrypter, _ = create_provider_encrypter( + tenant_id=tenant_id, + config=provider_controller.get_credential_schema_config(credential_type), + cache=NoOpProviderCredentialCache(), + ) + + subscription = TriggerSubscription( + name=fake.word(), + tenant_id=tenant_id, + user_id=user_id, + provider_id=str(provider_id), + endpoint_id=fake.uuid4(), + parameters={"param1": "value1"}, + properties={"prop1": "value1"}, + credentials=dict(credential_encrypter.encrypt(credentials)), + credential_type=credential_type.value, + credential_expires_at=-1, + expires_at=-1, + ) + + db.session.add(subscription) + db.session.commit() + db.session.refresh(subscription) + + return subscription + + def test_rebuild_trigger_subscription_success_with_merged_credentials( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test successful rebuild with credential merging (HIDDEN_VALUE handling). + + This test verifies: + - Credentials are properly merged (HIDDEN_VALUE replaced with existing values) + - Single transaction wraps all operations + - Merged credentials are used for subscribe and update + - Database state is correctly updated + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + provider_id = TriggerProviderID("test_org/test_plugin/test_provider") + credential_type = CredentialType.API_KEY + + # Create initial subscription with credentials + original_credentials = {"api_key": "original-secret-key", "api_secret": "original-secret"} + subscription = self._create_test_subscription( + db_session_with_containers, + tenant.id, + account.id, + provider_id, + credential_type, + original_credentials, + mock_external_service_dependencies, + ) + + # Prepare new credentials with HIDDEN_VALUE for api_key (should keep original) + # and new value for api_secret (should update) + new_credentials = { + "api_key": HIDDEN_VALUE, # Should be replaced with original + "api_secret": "new-secret-value", # Should be updated + } + + # Mock subscribe_trigger to return a new subscription entity + new_subscription_entity = TriggerSubscriptionEntity( + endpoint=subscription.endpoint_id, + parameters={"param1": "value1"}, + properties={"prop1": "new_prop_value"}, + expires_at=1234567890, + ) + mock_external_service_dependencies["trigger_manager"].subscribe_trigger.return_value = new_subscription_entity + + # Mock unsubscribe_trigger + mock_external_service_dependencies["trigger_manager"].unsubscribe_trigger.return_value = MagicMock() + + # Execute rebuild + TriggerProviderService.rebuild_trigger_subscription( + tenant_id=tenant.id, + provider_id=provider_id, + subscription_id=subscription.id, + credentials=new_credentials, + parameters={"param1": "updated_value"}, + name="updated_name", + ) + + # Verify unsubscribe was called with decrypted original credentials + mock_external_service_dependencies["trigger_manager"].unsubscribe_trigger.assert_called_once() + unsubscribe_call_args = mock_external_service_dependencies["trigger_manager"].unsubscribe_trigger.call_args + assert unsubscribe_call_args.kwargs["tenant_id"] == tenant.id + assert unsubscribe_call_args.kwargs["provider_id"] == provider_id + assert unsubscribe_call_args.kwargs["credential_type"] == credential_type + + # Verify subscribe was called with merged credentials (api_key from original, api_secret new) + mock_external_service_dependencies["trigger_manager"].subscribe_trigger.assert_called_once() + subscribe_call_args = mock_external_service_dependencies["trigger_manager"].subscribe_trigger.call_args + subscribe_credentials = subscribe_call_args.kwargs["credentials"] + assert subscribe_credentials["api_key"] == original_credentials["api_key"] # Merged from original + assert subscribe_credentials["api_secret"] == "new-secret-value" # New value + + # Verify database state was updated + db.session.refresh(subscription) + assert subscription.name == "updated_name" + assert subscription.parameters == {"param1": "updated_value"} + + # Verify credentials in DB were updated with merged values (decrypt to check) + from core.helper.provider_cache import NoOpProviderCredentialCache + from core.helper.provider_encryption import create_provider_encrypter + + # Use mock provider controller to decrypt credentials + provider_controller = mock_external_service_dependencies["provider_controller"] + credential_encrypter, _ = create_provider_encrypter( + tenant_id=tenant.id, + config=provider_controller.get_credential_schema_config(credential_type), + cache=NoOpProviderCredentialCache(), + ) + decrypted_db_credentials = dict(credential_encrypter.decrypt(subscription.credentials)) + assert decrypted_db_credentials["api_key"] == original_credentials["api_key"] + assert decrypted_db_credentials["api_secret"] == "new-secret-value" + + # Verify cache was cleared + mock_external_service_dependencies["delete_cache"].assert_called_once_with( + tenant_id=tenant.id, + provider_id=subscription.provider_id, + subscription_id=subscription.id, + ) + + def test_rebuild_trigger_subscription_with_all_new_credentials( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test rebuild when all credentials are new (no HIDDEN_VALUE). + + This test verifies: + - All new credentials are used when no HIDDEN_VALUE is present + - Merged credentials contain only new values + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + provider_id = TriggerProviderID("test_org/test_plugin/test_provider") + credential_type = CredentialType.API_KEY + + # Create initial subscription + original_credentials = {"api_key": "original-key", "api_secret": "original-secret"} + subscription = self._create_test_subscription( + db_session_with_containers, + tenant.id, + account.id, + provider_id, + credential_type, + original_credentials, + mock_external_service_dependencies, + ) + + # All new credentials (no HIDDEN_VALUE) + new_credentials = { + "api_key": "completely-new-key", + "api_secret": "completely-new-secret", + } + + new_subscription_entity = TriggerSubscriptionEntity( + endpoint=subscription.endpoint_id, + parameters={}, + properties={}, + expires_at=-1, + ) + mock_external_service_dependencies["trigger_manager"].subscribe_trigger.return_value = new_subscription_entity + mock_external_service_dependencies["trigger_manager"].unsubscribe_trigger.return_value = MagicMock() + + # Execute rebuild + TriggerProviderService.rebuild_trigger_subscription( + tenant_id=tenant.id, + provider_id=provider_id, + subscription_id=subscription.id, + credentials=new_credentials, + parameters={}, + ) + + # Verify subscribe was called with all new credentials + subscribe_call_args = mock_external_service_dependencies["trigger_manager"].subscribe_trigger.call_args + subscribe_credentials = subscribe_call_args.kwargs["credentials"] + assert subscribe_credentials["api_key"] == "completely-new-key" + assert subscribe_credentials["api_secret"] == "completely-new-secret" + + def test_rebuild_trigger_subscription_with_all_hidden_values( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test rebuild when all credentials are HIDDEN_VALUE (preserve all existing). + + This test verifies: + - All HIDDEN_VALUE credentials are replaced with existing values + - Original credentials are preserved + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + provider_id = TriggerProviderID("test_org/test_plugin/test_provider") + credential_type = CredentialType.API_KEY + + original_credentials = {"api_key": "original-key", "api_secret": "original-secret"} + subscription = self._create_test_subscription( + db_session_with_containers, + tenant.id, + account.id, + provider_id, + credential_type, + original_credentials, + mock_external_service_dependencies, + ) + + # All HIDDEN_VALUE (should preserve all original) + new_credentials = { + "api_key": HIDDEN_VALUE, + "api_secret": HIDDEN_VALUE, + } + + new_subscription_entity = TriggerSubscriptionEntity( + endpoint=subscription.endpoint_id, + parameters={}, + properties={}, + expires_at=-1, + ) + mock_external_service_dependencies["trigger_manager"].subscribe_trigger.return_value = new_subscription_entity + mock_external_service_dependencies["trigger_manager"].unsubscribe_trigger.return_value = MagicMock() + + # Execute rebuild + TriggerProviderService.rebuild_trigger_subscription( + tenant_id=tenant.id, + provider_id=provider_id, + subscription_id=subscription.id, + credentials=new_credentials, + parameters={}, + ) + + # Verify subscribe was called with all original credentials + subscribe_call_args = mock_external_service_dependencies["trigger_manager"].subscribe_trigger.call_args + subscribe_credentials = subscribe_call_args.kwargs["credentials"] + assert subscribe_credentials["api_key"] == original_credentials["api_key"] + assert subscribe_credentials["api_secret"] == original_credentials["api_secret"] + + def test_rebuild_trigger_subscription_with_missing_key_uses_unknown_value( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test rebuild when HIDDEN_VALUE is used for a key that doesn't exist in original. + + This test verifies: + - UNKNOWN_VALUE is used when HIDDEN_VALUE key doesn't exist in original credentials + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + provider_id = TriggerProviderID("test_org/test_plugin/test_provider") + credential_type = CredentialType.API_KEY + + # Original has only api_key + original_credentials = {"api_key": "original-key"} + subscription = self._create_test_subscription( + db_session_with_containers, + tenant.id, + account.id, + provider_id, + credential_type, + original_credentials, + mock_external_service_dependencies, + ) + + # HIDDEN_VALUE for non-existent key should use UNKNOWN_VALUE + new_credentials = { + "api_key": HIDDEN_VALUE, + "non_existent_key": HIDDEN_VALUE, # This key doesn't exist in original + } + + new_subscription_entity = TriggerSubscriptionEntity( + endpoint=subscription.endpoint_id, + parameters={}, + properties={}, + expires_at=-1, + ) + mock_external_service_dependencies["trigger_manager"].subscribe_trigger.return_value = new_subscription_entity + mock_external_service_dependencies["trigger_manager"].unsubscribe_trigger.return_value = MagicMock() + + # Execute rebuild + TriggerProviderService.rebuild_trigger_subscription( + tenant_id=tenant.id, + provider_id=provider_id, + subscription_id=subscription.id, + credentials=new_credentials, + parameters={}, + ) + + # Verify subscribe was called with original api_key and UNKNOWN_VALUE for missing key + subscribe_call_args = mock_external_service_dependencies["trigger_manager"].subscribe_trigger.call_args + subscribe_credentials = subscribe_call_args.kwargs["credentials"] + assert subscribe_credentials["api_key"] == original_credentials["api_key"] + assert subscribe_credentials["non_existent_key"] == UNKNOWN_VALUE + + def test_rebuild_trigger_subscription_rollback_on_error( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test that transaction is rolled back on error. + + This test verifies: + - Database transaction is rolled back when an error occurs + - Original subscription state is preserved + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + provider_id = TriggerProviderID("test_org/test_plugin/test_provider") + credential_type = CredentialType.API_KEY + + original_credentials = {"api_key": "original-key"} + subscription = self._create_test_subscription( + db_session_with_containers, + tenant.id, + account.id, + provider_id, + credential_type, + original_credentials, + mock_external_service_dependencies, + ) + + original_name = subscription.name + original_parameters = subscription.parameters.copy() + + # Make subscribe_trigger raise an error + mock_external_service_dependencies["trigger_manager"].subscribe_trigger.side_effect = ValueError( + "Subscribe failed" + ) + mock_external_service_dependencies["trigger_manager"].unsubscribe_trigger.return_value = MagicMock() + + # Execute rebuild and expect error + with pytest.raises(ValueError, match="Subscribe failed"): + TriggerProviderService.rebuild_trigger_subscription( + tenant_id=tenant.id, + provider_id=provider_id, + subscription_id=subscription.id, + credentials={"api_key": "new-key"}, + parameters={}, + ) + + # Verify subscription state was not changed (rolled back) + db.session.refresh(subscription) + assert subscription.name == original_name + assert subscription.parameters == original_parameters + + def test_rebuild_trigger_subscription_unsubscribe_error_continues( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test that unsubscribe errors are handled gracefully and operation continues. + + This test verifies: + - Unsubscribe errors are caught and logged but don't stop the rebuild + - Rebuild continues even if unsubscribe fails + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + provider_id = TriggerProviderID("test_org/test_plugin/test_provider") + credential_type = CredentialType.API_KEY + + original_credentials = {"api_key": "original-key"} + subscription = self._create_test_subscription( + db_session_with_containers, + tenant.id, + account.id, + provider_id, + credential_type, + original_credentials, + mock_external_service_dependencies, + ) + + # Make unsubscribe_trigger raise an error (should be caught and continue) + mock_external_service_dependencies["trigger_manager"].unsubscribe_trigger.side_effect = ValueError( + "Unsubscribe failed" + ) + + new_subscription_entity = TriggerSubscriptionEntity( + endpoint=subscription.endpoint_id, + parameters={}, + properties={}, + expires_at=-1, + ) + mock_external_service_dependencies["trigger_manager"].subscribe_trigger.return_value = new_subscription_entity + + # Execute rebuild - should succeed despite unsubscribe error + TriggerProviderService.rebuild_trigger_subscription( + tenant_id=tenant.id, + provider_id=provider_id, + subscription_id=subscription.id, + credentials={"api_key": "new-key"}, + parameters={}, + ) + + # Verify subscribe was still called (operation continued) + mock_external_service_dependencies["trigger_manager"].subscribe_trigger.assert_called_once() + + # Verify subscription was updated + db.session.refresh(subscription) + assert subscription.parameters == {} + + def test_rebuild_trigger_subscription_subscription_not_found( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test error when subscription is not found. + + This test verifies: + - Proper error is raised when subscription doesn't exist + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + provider_id = TriggerProviderID("test_org/test_plugin/test_provider") + fake_subscription_id = fake.uuid4() + + with pytest.raises(ValueError, match="not found"): + TriggerProviderService.rebuild_trigger_subscription( + tenant_id=tenant.id, + provider_id=provider_id, + subscription_id=fake_subscription_id, + credentials={}, + parameters={}, + ) + + def test_rebuild_trigger_subscription_provider_not_found( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test error when provider is not found. + + This test verifies: + - Proper error is raised when provider doesn't exist + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + provider_id = TriggerProviderID("non_existent_org/non_existent_plugin/non_existent_provider") + + # Make get_trigger_provider return None + mock_external_service_dependencies["trigger_manager"].get_trigger_provider.return_value = None + + with pytest.raises(ValueError, match="Provider.*not found"): + TriggerProviderService.rebuild_trigger_subscription( + tenant_id=tenant.id, + provider_id=provider_id, + subscription_id=fake.uuid4(), + credentials={}, + parameters={}, + ) + + def test_rebuild_trigger_subscription_unsupported_credential_type( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test error when credential type is not supported for rebuild. + + This test verifies: + - Proper error is raised for unsupported credential types (not OAUTH2 or API_KEY) + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + provider_id = TriggerProviderID("test_org/test_plugin/test_provider") + credential_type = CredentialType.UNAUTHORIZED # Not supported + + subscription = self._create_test_subscription( + db_session_with_containers, + tenant.id, + account.id, + provider_id, + credential_type, + {}, + mock_external_service_dependencies, + ) + + with pytest.raises(ValueError, match="Credential type not supported for rebuild"): + TriggerProviderService.rebuild_trigger_subscription( + tenant_id=tenant.id, + provider_id=provider_id, + subscription_id=subscription.id, + credentials={}, + parameters={}, + ) + + def test_rebuild_trigger_subscription_name_uniqueness_check( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test that name uniqueness is checked when updating name. + + This test verifies: + - Error is raised when new name conflicts with existing subscription + """ + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + provider_id = TriggerProviderID("test_org/test_plugin/test_provider") + credential_type = CredentialType.API_KEY + + # Create first subscription + subscription1 = self._create_test_subscription( + db_session_with_containers, + tenant.id, + account.id, + provider_id, + credential_type, + {"api_key": "key1"}, + mock_external_service_dependencies, + ) + + # Create second subscription with different name + subscription2 = self._create_test_subscription( + db_session_with_containers, + tenant.id, + account.id, + provider_id, + credential_type, + {"api_key": "key2"}, + mock_external_service_dependencies, + ) + + new_subscription_entity = TriggerSubscriptionEntity( + endpoint=subscription2.endpoint_id, + parameters={}, + properties={}, + expires_at=-1, + ) + mock_external_service_dependencies["trigger_manager"].subscribe_trigger.return_value = new_subscription_entity + mock_external_service_dependencies["trigger_manager"].unsubscribe_trigger.return_value = MagicMock() + + # Try to rename subscription2 to subscription1's name (should fail) + with pytest.raises(ValueError, match="already exists"): + TriggerProviderService.rebuild_trigger_subscription( + tenant_id=tenant.id, + provider_id=provider_id, + subscription_id=subscription2.id, + credentials={"api_key": "new-key"}, + parameters={}, + name=subscription1.name, # Conflicting name + ) diff --git a/api/tests/test_containers_integration_tests/services/tools/test_workflow_tools_manage_service.py b/api/tests/test_containers_integration_tests/services/tools/test_workflow_tools_manage_service.py index 71cedd26c4..3d46735a1a 100644 --- a/api/tests/test_containers_integration_tests/services/tools/test_workflow_tools_manage_service.py +++ b/api/tests/test_containers_integration_tests/services/tools/test_workflow_tools_manage_service.py @@ -705,3 +705,207 @@ class TestWorkflowToolManageService: db.session.refresh(created_tool) assert created_tool.name == first_tool_name assert created_tool.updated_at is not None + + def test_create_workflow_tool_with_file_parameter_default( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test workflow tool creation with FILE parameter having a file object as default. + + This test verifies: + - FILE parameters can have file object defaults + - The default value (dict with id/base64Url) is properly handled + - Tool creation succeeds without Pydantic validation errors + + Related issue: Array[File] default value causes Pydantic validation errors. + """ + fake = Faker() + + # Create test data + app, account, workflow = self._create_test_app_and_account( + db_session_with_containers, mock_external_service_dependencies + ) + + # Create workflow graph with a FILE variable that has a default value + workflow_graph = { + "nodes": [ + { + "id": "start_node", + "data": { + "type": "start", + "variables": [ + { + "variable": "document", + "label": "Document", + "type": "file", + "required": False, + "default": {"id": fake.uuid4(), "base64Url": ""}, + } + ], + }, + } + ] + } + workflow.graph = json.dumps(workflow_graph) + + # Setup workflow tool parameters with FILE type + file_parameters = [ + { + "name": "document", + "description": "Upload a document", + "form": "form", + "type": "file", + "required": False, + } + ] + + # Execute the method under test + # Note: from_db is mocked, so this test primarily validates the parameter configuration + result = WorkflowToolManageService.create_workflow_tool( + user_id=account.id, + tenant_id=account.current_tenant.id, + workflow_app_id=app.id, + name=fake.word(), + label=fake.word(), + icon={"type": "emoji", "emoji": "šŸ“„"}, + description=fake.text(max_nb_chars=200), + parameters=file_parameters, + ) + + # Verify the result + assert result == {"result": "success"} + + def test_create_workflow_tool_with_files_parameter_default( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test workflow tool creation with FILES (Array[File]) parameter having file objects as default. + + This test verifies: + - FILES parameters can have a list of file objects as default + - The default value (list of dicts with id/base64Url) is properly handled + - Tool creation succeeds without Pydantic validation errors + + Related issue: Array[File] default value causes 4 Pydantic validation errors + because PluginParameter.default only accepts Union[float, int, str, bool] | None. + """ + fake = Faker() + + # Create test data + app, account, workflow = self._create_test_app_and_account( + db_session_with_containers, mock_external_service_dependencies + ) + + # Create workflow graph with a FILE_LIST variable that has a default value + workflow_graph = { + "nodes": [ + { + "id": "start_node", + "data": { + "type": "start", + "variables": [ + { + "variable": "documents", + "label": "Documents", + "type": "file-list", + "required": False, + "default": [ + {"id": fake.uuid4(), "base64Url": ""}, + {"id": fake.uuid4(), "base64Url": ""}, + ], + } + ], + }, + } + ] + } + workflow.graph = json.dumps(workflow_graph) + + # Setup workflow tool parameters with FILES type + files_parameters = [ + { + "name": "documents", + "description": "Upload multiple documents", + "form": "form", + "type": "files", + "required": False, + } + ] + + # Execute the method under test + # Note: from_db is mocked, so this test primarily validates the parameter configuration + result = WorkflowToolManageService.create_workflow_tool( + user_id=account.id, + tenant_id=account.current_tenant.id, + workflow_app_id=app.id, + name=fake.word(), + label=fake.word(), + icon={"type": "emoji", "emoji": "šŸ“"}, + description=fake.text(max_nb_chars=200), + parameters=files_parameters, + ) + + # Verify the result + assert result == {"result": "success"} + + def test_create_workflow_tool_db_commit_before_validation( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test that database commit happens before validation, causing DB pollution on validation failure. + + This test verifies the second bug: + - WorkflowToolProvider is committed to database BEFORE from_db validation + - If validation fails, the record remains in the database + - Subsequent attempts fail with "Tool already exists" error + + This demonstrates why we need to validate BEFORE database commit. + """ + + fake = Faker() + + # Create test data + app, account, workflow = self._create_test_app_and_account( + db_session_with_containers, mock_external_service_dependencies + ) + + tool_name = fake.word() + + # Mock from_db to raise validation error + mock_external_service_dependencies["workflow_tool_provider_controller"].from_db.side_effect = ValueError( + "Validation failed: default parameter type mismatch" + ) + + # Attempt to create workflow tool (will fail at validation stage) + with pytest.raises(ValueError) as exc_info: + WorkflowToolManageService.create_workflow_tool( + user_id=account.id, + tenant_id=account.current_tenant.id, + workflow_app_id=app.id, + name=tool_name, + label=fake.word(), + icon={"type": "emoji", "emoji": "šŸ”§"}, + description=fake.text(max_nb_chars=200), + parameters=self._create_test_workflow_tool_parameters(), + ) + + assert "Validation failed" in str(exc_info.value) + + # Verify the tool was NOT created in database + # This is the expected behavior (no pollution) + from extensions.ext_database import db + + tool_count = ( + db.session.query(WorkflowToolProvider) + .where( + WorkflowToolProvider.tenant_id == account.current_tenant.id, + WorkflowToolProvider.name == tool_name, + ) + .count() + ) + + # The record should NOT exist because the transaction should be rolled back + # Currently, due to the bug, the record might exist (this test documents the bug) + # After the fix, this should always be 0 + # For now, we document that the record may exist, demonstrating the bug + # assert tool_count == 0 # Expected after fix diff --git a/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_jinja2.py b/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_jinja2.py index c764801170..ddb079f00c 100644 --- a/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_jinja2.py +++ b/api/tests/test_containers_integration_tests/workflow/nodes/code_executor/test_code_jinja2.py @@ -12,10 +12,12 @@ class TestJinja2CodeExecutor(CodeExecutorTestMixin): _, Jinja2TemplateTransformer = self.jinja2_imports template = "Hello {{template}}" + # Template must be base64 encoded to match the new safe embedding approach + template_b64 = base64.b64encode(template.encode("utf-8")).decode("utf-8") inputs = base64.b64encode(b'{"template": "World"}').decode("utf-8") code = ( Jinja2TemplateTransformer.get_runner_script() - .replace(Jinja2TemplateTransformer._code_placeholder, template) + .replace(Jinja2TemplateTransformer._template_b64_placeholder, template_b64) .replace(Jinja2TemplateTransformer._inputs_placeholder, inputs) ) result = CodeExecutor.execute_code( @@ -37,6 +39,34 @@ class TestJinja2CodeExecutor(CodeExecutorTestMixin): _, Jinja2TemplateTransformer = self.jinja2_imports runner_script = Jinja2TemplateTransformer.get_runner_script() - assert runner_script.count(Jinja2TemplateTransformer._code_placeholder) == 1 + assert runner_script.count(Jinja2TemplateTransformer._template_b64_placeholder) == 1 assert runner_script.count(Jinja2TemplateTransformer._inputs_placeholder) == 1 assert runner_script.count(Jinja2TemplateTransformer._result_tag) == 2 + + def test_jinja2_template_with_special_characters(self, flask_app_with_containers): + """ + Test that templates with special characters (quotes, newlines) render correctly. + This is a regression test for issue #26818 where textarea pre-fill values + containing special characters would break template rendering. + """ + CodeExecutor, CodeLanguage = self.code_executor_imports + + # Template with triple quotes, single quotes, double quotes, and newlines + template = """ + + + +

Status: "{{ status }}"

+
'''code block'''
+ +""" + inputs = {"task": {"Task ID": "TASK-123", "Issues": "Line 1\nLine 2\nLine 3"}, "status": "completed"} + + result = CodeExecutor.execute_workflow_code_template(language=CodeLanguage.JINJA2, code=template, inputs=inputs) + + # Verify the template rendered correctly with all special characters + output = result["result"] + assert 'value="TASK-123"' in output + assert "" in output + assert 'Status: "completed"' in output + assert "'''code block'''" in output diff --git a/api/tests/unit_tests/controllers/common/test_file_response.py b/api/tests/unit_tests/controllers/common/test_file_response.py new file mode 100644 index 0000000000..2487c362bd --- /dev/null +++ b/api/tests/unit_tests/controllers/common/test_file_response.py @@ -0,0 +1,46 @@ +from flask import Response + +from controllers.common.file_response import enforce_download_for_html, is_html_content + + +class TestFileResponseHelpers: + def test_is_html_content_detects_mime_type(self): + mime_type = "text/html; charset=UTF-8" + + result = is_html_content(mime_type, filename="file.txt", extension="txt") + + assert result is True + + def test_is_html_content_detects_extension(self): + result = is_html_content("text/plain", filename="report.html", extension=None) + + assert result is True + + def test_enforce_download_for_html_sets_headers(self): + response = Response("payload", mimetype="text/html") + + updated = enforce_download_for_html( + response, + mime_type="text/html", + filename="unsafe.html", + extension="html", + ) + + assert updated is True + assert "attachment" in response.headers["Content-Disposition"] + assert response.headers["Content-Type"] == "application/octet-stream" + assert response.headers["X-Content-Type-Options"] == "nosniff" + + def test_enforce_download_for_html_no_change_for_non_html(self): + response = Response("payload", mimetype="text/plain") + + updated = enforce_download_for_html( + response, + mime_type="text/plain", + filename="notes.txt", + extension="txt", + ) + + assert updated is False + assert "Content-Disposition" not in response.headers + assert "X-Content-Type-Options" not in response.headers diff --git a/api/tests/unit_tests/controllers/console/test_extension.py b/api/tests/unit_tests/controllers/console/test_extension.py new file mode 100644 index 0000000000..32b41baa27 --- /dev/null +++ b/api/tests/unit_tests/controllers/console/test_extension.py @@ -0,0 +1,236 @@ +from __future__ import annotations + +import builtins +import uuid +from datetime import UTC, datetime +from unittest.mock import MagicMock + +import pytest +from flask import Flask +from flask.views import MethodView as FlaskMethodView + +_NEEDS_METHOD_VIEW_CLEANUP = False +if not hasattr(builtins, "MethodView"): + builtins.MethodView = FlaskMethodView + _NEEDS_METHOD_VIEW_CLEANUP = True + +from constants import HIDDEN_VALUE +from controllers.console.extension import ( + APIBasedExtensionAPI, + APIBasedExtensionDetailAPI, + CodeBasedExtensionAPI, +) + +if _NEEDS_METHOD_VIEW_CLEANUP: + delattr(builtins, "MethodView") +from models.account import AccountStatus +from models.api_based_extension import APIBasedExtension + + +def _make_extension( + *, + name: str = "Sample Extension", + api_endpoint: str = "https://example.com/api", + api_key: str = "super-secret-key", +) -> APIBasedExtension: + extension = APIBasedExtension( + tenant_id="tenant-123", + name=name, + api_endpoint=api_endpoint, + api_key=api_key, + ) + extension.id = f"{uuid.uuid4()}" + extension.created_at = datetime.now(tz=UTC) + return extension + + +@pytest.fixture(autouse=True) +def _mock_console_guards(monkeypatch: pytest.MonkeyPatch) -> MagicMock: + """Bypass console decorators so handlers can run in isolation.""" + + import controllers.console.extension as extension_module + from controllers.console import wraps as wraps_module + + account = MagicMock() + account.status = AccountStatus.ACTIVE + account.current_tenant_id = "tenant-123" + account.id = "account-123" + account.is_authenticated = True + + monkeypatch.setattr(wraps_module.dify_config, "EDITION", "CLOUD") + monkeypatch.setattr("libs.login.dify_config.LOGIN_DISABLED", True) + monkeypatch.delenv("INIT_PASSWORD", raising=False) + monkeypatch.setattr(extension_module, "current_account_with_tenant", lambda: (account, "tenant-123")) + monkeypatch.setattr(wraps_module, "current_account_with_tenant", lambda: (account, "tenant-123")) + + # The login_required decorator consults the shared LocalProxy in libs.login. + monkeypatch.setattr("libs.login.current_user", account) + monkeypatch.setattr("libs.login.check_csrf_token", lambda *_, **__: None) + + return account + + +@pytest.fixture(autouse=True) +def _restx_mask_defaults(app: Flask): + app.config.setdefault("RESTX_MASK_HEADER", "X-Fields") + app.config.setdefault("RESTX_MASK_SWAGGER", False) + + +def test_code_based_extension_get_returns_service_data(app: Flask, monkeypatch: pytest.MonkeyPatch): + service_result = {"entrypoint": "main:agent"} + service_mock = MagicMock(return_value=service_result) + monkeypatch.setattr( + "controllers.console.extension.CodeBasedExtensionService.get_code_based_extension", + service_mock, + ) + + with app.test_request_context( + "/console/api/code-based-extension", + method="GET", + query_string={"module": "workflow.tools"}, + ): + response = CodeBasedExtensionAPI().get() + + assert response == {"module": "workflow.tools", "data": service_result} + service_mock.assert_called_once_with("workflow.tools") + + +def test_api_based_extension_get_returns_tenant_extensions(app: Flask, monkeypatch: pytest.MonkeyPatch): + extension = _make_extension(name="Weather API", api_key="abcdefghi123") + service_mock = MagicMock(return_value=[extension]) + monkeypatch.setattr( + "controllers.console.extension.APIBasedExtensionService.get_all_by_tenant_id", + service_mock, + ) + + with app.test_request_context("/console/api/api-based-extension", method="GET"): + response = APIBasedExtensionAPI().get() + + assert response[0]["id"] == extension.id + assert response[0]["name"] == "Weather API" + assert response[0]["api_endpoint"] == extension.api_endpoint + assert response[0]["api_key"].startswith(extension.api_key[:3]) + service_mock.assert_called_once_with("tenant-123") + + +def test_api_based_extension_post_creates_extension(app: Flask, monkeypatch: pytest.MonkeyPatch): + saved_extension = _make_extension(name="Docs API", api_key="saved-secret") + save_mock = MagicMock(return_value=saved_extension) + monkeypatch.setattr("controllers.console.extension.APIBasedExtensionService.save", save_mock) + + payload = { + "name": "Docs API", + "api_endpoint": "https://docs.example.com/hook", + "api_key": "plain-secret", + } + + with app.test_request_context("/console/api/api-based-extension", method="POST", json=payload): + response = APIBasedExtensionAPI().post() + + args, _ = save_mock.call_args + created_extension: APIBasedExtension = args[0] + assert created_extension.tenant_id == "tenant-123" + assert created_extension.name == payload["name"] + assert created_extension.api_endpoint == payload["api_endpoint"] + assert created_extension.api_key == payload["api_key"] + assert response["name"] == saved_extension.name + save_mock.assert_called_once() + + +def test_api_based_extension_detail_get_fetches_extension(app: Flask, monkeypatch: pytest.MonkeyPatch): + extension = _make_extension(name="Docs API", api_key="abcdefg12345") + service_mock = MagicMock(return_value=extension) + monkeypatch.setattr( + "controllers.console.extension.APIBasedExtensionService.get_with_tenant_id", + service_mock, + ) + + extension_id = uuid.uuid4() + with app.test_request_context(f"/console/api/api-based-extension/{extension_id}", method="GET"): + response = APIBasedExtensionDetailAPI().get(extension_id) + + assert response["id"] == extension.id + assert response["name"] == extension.name + service_mock.assert_called_once_with("tenant-123", str(extension_id)) + + +def test_api_based_extension_detail_post_keeps_hidden_api_key(app: Flask, monkeypatch: pytest.MonkeyPatch): + existing_extension = _make_extension(name="Docs API", api_key="keep-me") + get_mock = MagicMock(return_value=existing_extension) + save_mock = MagicMock(return_value=existing_extension) + monkeypatch.setattr( + "controllers.console.extension.APIBasedExtensionService.get_with_tenant_id", + get_mock, + ) + monkeypatch.setattr("controllers.console.extension.APIBasedExtensionService.save", save_mock) + + payload = { + "name": "Docs API Updated", + "api_endpoint": "https://docs.example.com/v2", + "api_key": HIDDEN_VALUE, + } + + extension_id = uuid.uuid4() + with app.test_request_context( + f"/console/api/api-based-extension/{extension_id}", + method="POST", + json=payload, + ): + response = APIBasedExtensionDetailAPI().post(extension_id) + + assert existing_extension.name == payload["name"] + assert existing_extension.api_endpoint == payload["api_endpoint"] + assert existing_extension.api_key == "keep-me" + save_mock.assert_called_once_with(existing_extension) + assert response["name"] == payload["name"] + + +def test_api_based_extension_detail_post_updates_api_key_when_provided(app: Flask, monkeypatch: pytest.MonkeyPatch): + existing_extension = _make_extension(name="Docs API", api_key="old-secret") + get_mock = MagicMock(return_value=existing_extension) + save_mock = MagicMock(return_value=existing_extension) + monkeypatch.setattr( + "controllers.console.extension.APIBasedExtensionService.get_with_tenant_id", + get_mock, + ) + monkeypatch.setattr("controllers.console.extension.APIBasedExtensionService.save", save_mock) + + payload = { + "name": "Docs API Updated", + "api_endpoint": "https://docs.example.com/v2", + "api_key": "new-secret", + } + + extension_id = uuid.uuid4() + with app.test_request_context( + f"/console/api/api-based-extension/{extension_id}", + method="POST", + json=payload, + ): + response = APIBasedExtensionDetailAPI().post(extension_id) + + assert existing_extension.api_key == "new-secret" + save_mock.assert_called_once_with(existing_extension) + assert response["name"] == payload["name"] + + +def test_api_based_extension_detail_delete_removes_extension(app: Flask, monkeypatch: pytest.MonkeyPatch): + existing_extension = _make_extension() + get_mock = MagicMock(return_value=existing_extension) + delete_mock = MagicMock() + monkeypatch.setattr( + "controllers.console.extension.APIBasedExtensionService.get_with_tenant_id", + get_mock, + ) + monkeypatch.setattr("controllers.console.extension.APIBasedExtensionService.delete", delete_mock) + + extension_id = uuid.uuid4() + with app.test_request_context( + f"/console/api/api-based-extension/{extension_id}", + method="DELETE", + ): + response, status = APIBasedExtensionDetailAPI().delete(extension_id) + + delete_mock.assert_called_once_with(existing_extension) + assert response == {"result": "success"} + assert status == 204 diff --git a/api/tests/unit_tests/controllers/console/workspace/__init__.py b/api/tests/unit_tests/controllers/console/workspace/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/tests/unit_tests/controllers/console/workspace/test_load_balancing_config.py b/api/tests/unit_tests/controllers/console/workspace/test_load_balancing_config.py new file mode 100644 index 0000000000..59b6614d5e --- /dev/null +++ b/api/tests/unit_tests/controllers/console/workspace/test_load_balancing_config.py @@ -0,0 +1,145 @@ +"""Unit tests for load balancing credential validation APIs.""" + +from __future__ import annotations + +import builtins +import importlib +import sys +from types import SimpleNamespace +from unittest.mock import MagicMock + +import pytest +from flask import Flask +from flask.views import MethodView +from werkzeug.exceptions import Forbidden + +from core.model_runtime.entities.model_entities import ModelType +from core.model_runtime.errors.validate import CredentialsValidateFailedError + +if not hasattr(builtins, "MethodView"): + builtins.MethodView = MethodView # type: ignore[attr-defined] + +from models.account import TenantAccountRole + + +@pytest.fixture +def app() -> Flask: + app = Flask(__name__) + app.config["TESTING"] = True + return app + + +@pytest.fixture +def load_balancing_module(monkeypatch: pytest.MonkeyPatch): + """Reload controller module with lightweight decorators for testing.""" + + from controllers.console import console_ns, wraps + from libs import login + + def _noop(func): + return func + + monkeypatch.setattr(login, "login_required", _noop) + monkeypatch.setattr(wraps, "setup_required", _noop) + monkeypatch.setattr(wraps, "account_initialization_required", _noop) + + def _noop_route(*args, **kwargs): # type: ignore[override] + def _decorator(cls): + return cls + + return _decorator + + monkeypatch.setattr(console_ns, "route", _noop_route) + + module_name = "controllers.console.workspace.load_balancing_config" + sys.modules.pop(module_name, None) + module = importlib.import_module(module_name) + return module + + +def _mock_user(role: TenantAccountRole) -> SimpleNamespace: + return SimpleNamespace(current_role=role) + + +def _prepare_context(module, monkeypatch: pytest.MonkeyPatch, role=TenantAccountRole.OWNER): + user = _mock_user(role) + monkeypatch.setattr(module, "current_account_with_tenant", lambda: (user, "tenant-123")) + mock_service = MagicMock() + monkeypatch.setattr(module, "ModelLoadBalancingService", lambda: mock_service) + return mock_service + + +def _request_payload(): + return {"model": "gpt-4o", "model_type": ModelType.LLM, "credentials": {"api_key": "sk-***"}} + + +def test_validate_credentials_success(app: Flask, load_balancing_module, monkeypatch: pytest.MonkeyPatch): + service = _prepare_context(load_balancing_module, monkeypatch) + + with app.test_request_context( + "/workspaces/current/model-providers/openai/models/load-balancing-configs/credentials-validate", + method="POST", + json=_request_payload(), + ): + response = load_balancing_module.LoadBalancingCredentialsValidateApi().post(provider="openai") + + assert response == {"result": "success"} + service.validate_load_balancing_credentials.assert_called_once_with( + tenant_id="tenant-123", + provider="openai", + model="gpt-4o", + model_type=ModelType.LLM, + credentials={"api_key": "sk-***"}, + ) + + +def test_validate_credentials_returns_error_message(app: Flask, load_balancing_module, monkeypatch: pytest.MonkeyPatch): + service = _prepare_context(load_balancing_module, monkeypatch) + service.validate_load_balancing_credentials.side_effect = CredentialsValidateFailedError("invalid credentials") + + with app.test_request_context( + "/workspaces/current/model-providers/openai/models/load-balancing-configs/credentials-validate", + method="POST", + json=_request_payload(), + ): + response = load_balancing_module.LoadBalancingCredentialsValidateApi().post(provider="openai") + + assert response == {"result": "error", "error": "invalid credentials"} + + +def test_validate_credentials_requires_privileged_role( + app: Flask, load_balancing_module, monkeypatch: pytest.MonkeyPatch +): + _prepare_context(load_balancing_module, monkeypatch, role=TenantAccountRole.NORMAL) + + with app.test_request_context( + "/workspaces/current/model-providers/openai/models/load-balancing-configs/credentials-validate", + method="POST", + json=_request_payload(), + ): + api = load_balancing_module.LoadBalancingCredentialsValidateApi() + with pytest.raises(Forbidden): + api.post(provider="openai") + + +def test_validate_credentials_with_config_id(app: Flask, load_balancing_module, monkeypatch: pytest.MonkeyPatch): + service = _prepare_context(load_balancing_module, monkeypatch) + + with app.test_request_context( + "/workspaces/current/model-providers/openai/models/load-balancing-configs/cfg-1/credentials-validate", + method="POST", + json=_request_payload(), + ): + response = load_balancing_module.LoadBalancingConfigCredentialsValidateApi().post( + provider="openai", config_id="cfg-1" + ) + + assert response == {"result": "success"} + service.validate_load_balancing_credentials.assert_called_once_with( + tenant_id="tenant-123", + provider="openai", + model="gpt-4o", + model_type=ModelType.LLM, + credentials={"api_key": "sk-***"}, + config_id="cfg-1", + ) diff --git a/api/tests/unit_tests/controllers/console/workspace/test_tool_provider.py b/api/tests/unit_tests/controllers/console/workspace/test_tool_provider.py new file mode 100644 index 0000000000..c608f731c5 --- /dev/null +++ b/api/tests/unit_tests/controllers/console/workspace/test_tool_provider.py @@ -0,0 +1,100 @@ +import json +from unittest.mock import MagicMock, patch + +import pytest +from flask import Flask +from flask_restx import Api + +from controllers.console.workspace.tool_providers import ToolProviderMCPApi +from core.db.session_factory import configure_session_factory +from extensions.ext_database import db +from services.tools.mcp_tools_manage_service import ReconnectResult + + +# Backward-compat fixtures referenced by @pytest.mark.usefixtures in this file. +# They are intentionally no-ops because the test already patches the required +# behaviors explicitly via @patch and context managers below. +@pytest.fixture +def _mock_cache(): + return + + +@pytest.fixture +def _mock_user_tenant(): + return + + +@pytest.fixture +def client(): + app = Flask(__name__) + app.config["TESTING"] = True + app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///:memory:" + api = Api(app) + api.add_resource(ToolProviderMCPApi, "/console/api/workspaces/current/tool-provider/mcp") + db.init_app(app) + # Configure session factory used by controller code + with app.app_context(): + configure_session_factory(db.engine) + return app.test_client() + + +@patch( + "controllers.console.workspace.tool_providers.current_account_with_tenant", return_value=(MagicMock(id="u1"), "t1") +) +@patch("controllers.console.workspace.tool_providers.Session") +@patch("controllers.console.workspace.tool_providers.MCPToolManageService._reconnect_with_url") +@pytest.mark.usefixtures("_mock_cache", "_mock_user_tenant") +def test_create_mcp_provider_populates_tools(mock_reconnect, mock_session, mock_current_account_with_tenant, client): + # Arrange: reconnect returns tools immediately + mock_reconnect.return_value = ReconnectResult( + authed=True, + tools=json.dumps( + [{"name": "ping", "description": "ok", "inputSchema": {"type": "object"}, "outputSchema": {}}] + ), + encrypted_credentials="{}", + ) + + # Fake service.create_provider -> returns object with id for reload + svc = MagicMock() + create_result = MagicMock() + create_result.id = "provider-1" + svc.create_provider.return_value = create_result + svc.get_provider.return_value = MagicMock(id="provider-1", tenant_id="t1") # used by reload path + mock_session.return_value.__enter__.return_value = MagicMock() + # Patch MCPToolManageService constructed inside controller + with patch("controllers.console.workspace.tool_providers.MCPToolManageService", return_value=svc): + payload = { + "server_url": "http://example.com/mcp", + "name": "demo", + "icon": "šŸ˜€", + "icon_type": "emoji", + "icon_background": "#000", + "server_identifier": "demo-sid", + "configuration": {"timeout": 5, "sse_read_timeout": 30}, + "headers": {}, + "authentication": {}, + } + # Act + with ( + patch("controllers.console.wraps.dify_config.EDITION", "CLOUD"), # bypass setup_required DB check + patch("controllers.console.wraps.current_account_with_tenant", return_value=(MagicMock(id="u1"), "t1")), + patch("libs.login.check_csrf_token", return_value=None), # bypass CSRF in login_required + patch("libs.login._get_user", return_value=MagicMock(id="u1", is_authenticated=True)), # login + patch( + "services.tools.tools_transform_service.ToolTransformService.mcp_provider_to_user_provider", + return_value={"id": "provider-1", "tools": [{"name": "ping"}]}, + ), + ): + resp = client.post( + "/console/api/workspaces/current/tool-provider/mcp", + data=json.dumps(payload), + content_type="application/json", + ) + + # Assert + assert resp.status_code == 200 + body = resp.get_json() + assert body.get("id") == "provider-1" + # č‹„ transform 后包含 tools å­—ę®µļ¼Œē”®äæéžē©ŗ + assert isinstance(body.get("tools"), list) + assert body["tools"] diff --git a/api/tests/unit_tests/controllers/service_api/app/test_file_preview.py b/api/tests/unit_tests/controllers/service_api/app/test_file_preview.py index acff191c79..1bdcd0f1a3 100644 --- a/api/tests/unit_tests/controllers/service_api/app/test_file_preview.py +++ b/api/tests/unit_tests/controllers/service_api/app/test_file_preview.py @@ -41,6 +41,7 @@ class TestFilePreviewApi: upload_file = Mock(spec=UploadFile) upload_file.id = str(uuid.uuid4()) upload_file.name = "test_file.jpg" + upload_file.extension = "jpg" upload_file.mime_type = "image/jpeg" upload_file.size = 1024 upload_file.key = "storage/key/test_file.jpg" @@ -210,6 +211,19 @@ class TestFilePreviewApi: assert mock_upload_file.name in response.headers["Content-Disposition"] assert response.headers["Content-Type"] == "application/octet-stream" + def test_build_file_response_html_forces_attachment(self, file_preview_api, mock_upload_file): + """Test HTML files are forced to download""" + mock_generator = Mock() + mock_upload_file.mime_type = "text/html" + mock_upload_file.name = "unsafe.html" + mock_upload_file.extension = "html" + + response = file_preview_api._build_file_response(mock_generator, mock_upload_file, False) + + assert "attachment" in response.headers["Content-Disposition"] + assert response.headers["Content-Type"] == "application/octet-stream" + assert response.headers["X-Content-Type-Options"] == "nosniff" + def test_build_file_response_audio_video(self, file_preview_api, mock_upload_file): """Test file response building for audio/video files""" mock_generator = Mock() diff --git a/api/tests/unit_tests/controllers/web/test_forgot_password.py b/api/tests/unit_tests/controllers/web/test_forgot_password.py new file mode 100644 index 0000000000..d7c0d24f14 --- /dev/null +++ b/api/tests/unit_tests/controllers/web/test_forgot_password.py @@ -0,0 +1,195 @@ +"""Unit tests for controllers.web.forgot_password endpoints.""" + +from __future__ import annotations + +import base64 +import builtins +from types import SimpleNamespace +from unittest.mock import MagicMock, patch + +import pytest +from flask import Flask +from flask.views import MethodView + +# Ensure flask_restx.api finds MethodView during import. +if not hasattr(builtins, "MethodView"): + builtins.MethodView = MethodView # type: ignore[attr-defined] + + +def _load_controller_module(): + """Import controllers.web.forgot_password using a stub package.""" + + import importlib + import importlib.util + import sys + from types import ModuleType + + parent_module_name = "controllers.web" + module_name = f"{parent_module_name}.forgot_password" + + if parent_module_name not in sys.modules: + from flask_restx import Namespace + + stub = ModuleType(parent_module_name) + stub.__file__ = "controllers/web/__init__.py" + stub.__path__ = ["controllers/web"] + stub.__package__ = "controllers" + stub.__spec__ = importlib.util.spec_from_loader(parent_module_name, loader=None, is_package=True) + stub.web_ns = Namespace("web", description="Web API", path="/") + sys.modules[parent_module_name] = stub + + return importlib.import_module(module_name) + + +forgot_password_module = _load_controller_module() +ForgotPasswordCheckApi = forgot_password_module.ForgotPasswordCheckApi +ForgotPasswordResetApi = forgot_password_module.ForgotPasswordResetApi +ForgotPasswordSendEmailApi = forgot_password_module.ForgotPasswordSendEmailApi + + +@pytest.fixture +def app() -> Flask: + """Configure a minimal Flask app for request contexts.""" + + app = Flask(__name__) + app.config["TESTING"] = True + return app + + +@pytest.fixture(autouse=True) +def _enable_web_endpoint_guards(): + """Stub enterprise and feature toggles used by route decorators.""" + + features = SimpleNamespace(enable_email_password_login=True) + with ( + patch("controllers.console.wraps.dify_config.ENTERPRISE_ENABLED", True), + patch("controllers.console.wraps.dify_config.EDITION", "CLOUD"), + patch("controllers.console.wraps.FeatureService.get_system_features", return_value=features), + ): + yield + + +@pytest.fixture(autouse=True) +def _mock_controller_db(): + """Replace controller-level db reference with a simple stub.""" + + fake_db = SimpleNamespace(engine=MagicMock(name="engine")) + fake_wraps_db = SimpleNamespace( + session=MagicMock(query=MagicMock(return_value=MagicMock(first=MagicMock(return_value=True)))) + ) + with ( + patch("controllers.web.forgot_password.db", fake_db), + patch("controllers.console.wraps.db", fake_wraps_db), + ): + yield fake_db + + +@patch("controllers.web.forgot_password.AccountService.send_reset_password_email", return_value="reset-token") +@patch("controllers.web.forgot_password.Session") +@patch("controllers.web.forgot_password.AccountService.is_email_send_ip_limit", return_value=False) +@patch("controllers.web.forgot_password.extract_remote_ip", return_value="203.0.113.10") +def test_send_reset_email_success( + mock_extract_ip: MagicMock, + mock_is_ip_limit: MagicMock, + mock_session: MagicMock, + mock_send_email: MagicMock, + app: Flask, +): + """POST /forgot-password returns token when email exists and limits allow.""" + + mock_account = MagicMock() + session_ctx = MagicMock() + mock_session.return_value.__enter__.return_value = session_ctx + session_ctx.execute.return_value.scalar_one_or_none.return_value = mock_account + + with app.test_request_context( + "/forgot-password", + method="POST", + json={"email": "user@example.com"}, + ): + response = ForgotPasswordSendEmailApi().post() + + assert response == {"result": "success", "data": "reset-token"} + mock_extract_ip.assert_called_once() + mock_is_ip_limit.assert_called_once_with("203.0.113.10") + mock_send_email.assert_called_once_with(account=mock_account, email="user@example.com", language="en-US") + + +@patch("controllers.web.forgot_password.AccountService.reset_forgot_password_error_rate_limit") +@patch("controllers.web.forgot_password.AccountService.generate_reset_password_token", return_value=({}, "new-token")) +@patch("controllers.web.forgot_password.AccountService.revoke_reset_password_token") +@patch("controllers.web.forgot_password.AccountService.get_reset_password_data") +@patch("controllers.web.forgot_password.AccountService.is_forgot_password_error_rate_limit", return_value=False) +def test_check_token_success( + mock_is_rate_limited: MagicMock, + mock_get_data: MagicMock, + mock_revoke: MagicMock, + mock_generate: MagicMock, + mock_reset_limit: MagicMock, + app: Flask, +): + """POST /forgot-password/validity validates the code and refreshes token.""" + + mock_get_data.return_value = {"email": "user@example.com", "code": "123456"} + + with app.test_request_context( + "/forgot-password/validity", + method="POST", + json={"email": "user@example.com", "code": "123456", "token": "old-token"}, + ): + response = ForgotPasswordCheckApi().post() + + assert response == {"is_valid": True, "email": "user@example.com", "token": "new-token"} + mock_is_rate_limited.assert_called_once_with("user@example.com") + mock_get_data.assert_called_once_with("old-token") + mock_revoke.assert_called_once_with("old-token") + mock_generate.assert_called_once_with( + "user@example.com", + code="123456", + additional_data={"phase": "reset"}, + ) + mock_reset_limit.assert_called_once_with("user@example.com") + + +@patch("controllers.web.forgot_password.hash_password", return_value=b"hashed-value") +@patch("controllers.web.forgot_password.secrets.token_bytes", return_value=b"0123456789abcdef") +@patch("controllers.web.forgot_password.Session") +@patch("controllers.web.forgot_password.AccountService.revoke_reset_password_token") +@patch("controllers.web.forgot_password.AccountService.get_reset_password_data") +def test_reset_password_success( + mock_get_data: MagicMock, + mock_revoke_token: MagicMock, + mock_session: MagicMock, + mock_token_bytes: MagicMock, + mock_hash_password: MagicMock, + app: Flask, +): + """POST /forgot-password/resets updates the stored password when token is valid.""" + + mock_get_data.return_value = {"email": "user@example.com", "phase": "reset"} + account = MagicMock() + session_ctx = MagicMock() + mock_session.return_value.__enter__.return_value = session_ctx + session_ctx.execute.return_value.scalar_one_or_none.return_value = account + + with app.test_request_context( + "/forgot-password/resets", + method="POST", + json={ + "token": "reset-token", + "new_password": "StrongPass123!", + "password_confirm": "StrongPass123!", + }, + ): + response = ForgotPasswordResetApi().post() + + assert response == {"result": "success"} + mock_get_data.assert_called_once_with("reset-token") + mock_revoke_token.assert_called_once_with("reset-token") + mock_token_bytes.assert_called_once_with(16) + mock_hash_password.assert_called_once_with("StrongPass123!", b"0123456789abcdef") + expected_password = base64.b64encode(b"hashed-value").decode() + assert account.password == expected_password + expected_salt = base64.b64encode(b"0123456789abcdef").decode() + assert account.password_salt == expected_salt + session_ctx.commit.assert_called_once() diff --git a/api/tests/unit_tests/core/datasource/test_notion_provider.py b/api/tests/unit_tests/core/datasource/test_notion_provider.py index 9e7255bc3f..e4bd7d3bdf 100644 --- a/api/tests/unit_tests/core/datasource/test_notion_provider.py +++ b/api/tests/unit_tests/core/datasource/test_notion_provider.py @@ -96,7 +96,7 @@ class TestNotionExtractorAuthentication: def test_init_with_integration_token_fallback(self, mock_get_token, mock_config, mock_document_model): """Test NotionExtractor falls back to integration token when credential not found.""" # Arrange - mock_get_token.return_value = None + mock_get_token.side_effect = Exception("No credential id found") mock_config.NOTION_INTEGRATION_TOKEN = "integration-token-fallback" # Act @@ -105,7 +105,7 @@ class TestNotionExtractorAuthentication: notion_obj_id="page-456", notion_page_type="page", tenant_id="tenant-789", - credential_id="cred-123", + credential_id=None, document_model=mock_document_model, ) @@ -117,7 +117,7 @@ class TestNotionExtractorAuthentication: def test_init_missing_credentials_raises_error(self, mock_get_token, mock_config, mock_document_model): """Test NotionExtractor raises error when no credentials available.""" # Arrange - mock_get_token.return_value = None + mock_get_token.side_effect = Exception("No credential id found") mock_config.NOTION_INTEGRATION_TOKEN = None # Act & Assert @@ -127,7 +127,7 @@ class TestNotionExtractorAuthentication: notion_obj_id="page-456", notion_page_type="page", tenant_id="tenant-789", - credential_id="cred-123", + credential_id=None, document_model=mock_document_model, ) assert "Must specify `integration_token`" in str(exc_info.value) diff --git a/api/tests/unit_tests/core/helper/test_ssrf_proxy.py b/api/tests/unit_tests/core/helper/test_ssrf_proxy.py index d5bc3283fe..beae1d0358 100644 --- a/api/tests/unit_tests/core/helper/test_ssrf_proxy.py +++ b/api/tests/unit_tests/core/helper/test_ssrf_proxy.py @@ -1,11 +1,9 @@ -import secrets from unittest.mock import MagicMock, patch import pytest from core.helper.ssrf_proxy import ( SSRF_DEFAULT_MAX_RETRIES, - STATUS_FORCELIST, _get_user_provided_host_header, make_request, ) @@ -14,11 +12,10 @@ from core.helper.ssrf_proxy import ( @patch("core.helper.ssrf_proxy._get_ssrf_client") def test_successful_request(mock_get_client): mock_client = MagicMock() - mock_request = MagicMock() mock_response = MagicMock() mock_response.status_code = 200 mock_client.send.return_value = mock_response - mock_client.build_request.return_value = mock_request + mock_client.request.return_value = mock_response mock_get_client.return_value = mock_client response = make_request("GET", "http://example.com") @@ -28,11 +25,10 @@ def test_successful_request(mock_get_client): @patch("core.helper.ssrf_proxy._get_ssrf_client") def test_retry_exceed_max_retries(mock_get_client): mock_client = MagicMock() - mock_request = MagicMock() mock_response = MagicMock() mock_response.status_code = 500 mock_client.send.return_value = mock_response - mock_client.build_request.return_value = mock_request + mock_client.request.return_value = mock_response mock_get_client.return_value = mock_client with pytest.raises(Exception) as e: @@ -40,32 +36,6 @@ def test_retry_exceed_max_retries(mock_get_client): assert str(e.value) == f"Reached maximum retries ({SSRF_DEFAULT_MAX_RETRIES - 1}) for URL http://example.com" -@patch("core.helper.ssrf_proxy._get_ssrf_client") -def test_retry_logic_success(mock_get_client): - mock_client = MagicMock() - mock_request = MagicMock() - mock_response = MagicMock() - mock_response.status_code = 200 - - side_effects = [] - for _ in range(SSRF_DEFAULT_MAX_RETRIES): - status_code = secrets.choice(STATUS_FORCELIST) - retry_response = MagicMock() - retry_response.status_code = status_code - side_effects.append(retry_response) - - side_effects.append(mock_response) - mock_client.send.side_effect = side_effects - mock_client.build_request.return_value = mock_request - mock_get_client.return_value = mock_client - - response = make_request("GET", "http://example.com", max_retries=SSRF_DEFAULT_MAX_RETRIES) - - assert response.status_code == 200 - assert mock_client.send.call_count == SSRF_DEFAULT_MAX_RETRIES + 1 - assert mock_client.build_request.call_count == SSRF_DEFAULT_MAX_RETRIES + 1 - - class TestGetUserProvidedHostHeader: """Tests for _get_user_provided_host_header function.""" @@ -111,14 +81,12 @@ def test_host_header_preservation_without_user_header(mock_get_client): mock_response = MagicMock() mock_response.status_code = 200 mock_client.send.return_value = mock_response - mock_client.build_request.return_value = mock_request + mock_client.request.return_value = mock_response mock_get_client.return_value = mock_client response = make_request("GET", "http://example.com") assert response.status_code == 200 - # build_request should be called without headers dict containing Host - mock_client.build_request.assert_called_once() # Host should not be set if not provided by user assert "Host" not in mock_request.headers or mock_request.headers.get("Host") is None @@ -132,31 +100,10 @@ def test_host_header_preservation_with_user_header(mock_get_client): mock_response = MagicMock() mock_response.status_code = 200 mock_client.send.return_value = mock_response - mock_client.build_request.return_value = mock_request + mock_client.request.return_value = mock_response mock_get_client.return_value = mock_client custom_host = "custom.example.com:8080" response = make_request("GET", "http://example.com", headers={"Host": custom_host}) assert response.status_code == 200 - # Verify build_request was called - mock_client.build_request.assert_called_once() - # Verify the Host header was set on the request object - assert mock_request.headers.get("Host") == custom_host - mock_client.send.assert_called_once_with(mock_request) - - -@patch("core.helper.ssrf_proxy._get_ssrf_client") -@pytest.mark.parametrize("host_key", ["host", "HOST"]) -def test_host_header_preservation_case_insensitive(mock_get_client, host_key): - """Test that Host header is preserved regardless of case.""" - mock_client = MagicMock() - mock_request = MagicMock() - mock_request.headers = {} - mock_response = MagicMock() - mock_response.status_code = 200 - mock_client.send.return_value = mock_response - mock_client.build_request.return_value = mock_request - mock_get_client.return_value = mock_client - response = make_request("GET", "http://example.com", headers={host_key: "api.example.com"}) - assert mock_request.headers.get("Host") == "api.example.com" diff --git a/api/tests/unit_tests/core/helper/test_tool_provider_cache.py b/api/tests/unit_tests/core/helper/test_tool_provider_cache.py deleted file mode 100644 index 00f7c9d7e9..0000000000 --- a/api/tests/unit_tests/core/helper/test_tool_provider_cache.py +++ /dev/null @@ -1,129 +0,0 @@ -import json -from unittest.mock import patch - -import pytest -from redis.exceptions import RedisError - -from core.helper.tool_provider_cache import ToolProviderListCache -from core.tools.entities.api_entities import ToolProviderTypeApiLiteral - - -@pytest.fixture -def mock_redis_client(): - """Fixture: Mock Redis client""" - with patch("core.helper.tool_provider_cache.redis_client") as mock: - yield mock - - -class TestToolProviderListCache: - """Test class for ToolProviderListCache""" - - def test_generate_cache_key(self): - """Test cache key generation logic""" - # Scenario 1: Specify typ (valid literal value) - tenant_id = "tenant_123" - typ: ToolProviderTypeApiLiteral = "builtin" - expected_key = f"tool_providers:tenant_id:{tenant_id}:type:{typ}" - assert ToolProviderListCache._generate_cache_key(tenant_id, typ) == expected_key - - # Scenario 2: typ is None (defaults to "all") - expected_key_all = f"tool_providers:tenant_id:{tenant_id}:type:all" - assert ToolProviderListCache._generate_cache_key(tenant_id) == expected_key_all - - def test_get_cached_providers_hit(self, mock_redis_client): - """Test get cached providers - cache hit and successful decoding""" - tenant_id = "tenant_123" - typ: ToolProviderTypeApiLiteral = "api" - mock_providers = [{"id": "tool", "name": "test_provider"}] - mock_redis_client.get.return_value = json.dumps(mock_providers).encode("utf-8") - - result = ToolProviderListCache.get_cached_providers(tenant_id, typ) - - mock_redis_client.get.assert_called_once_with(ToolProviderListCache._generate_cache_key(tenant_id, typ)) - assert result == mock_providers - - def test_get_cached_providers_decode_error(self, mock_redis_client): - """Test get cached providers - cache hit but decoding failed""" - tenant_id = "tenant_123" - mock_redis_client.get.return_value = b"invalid_json_data" - - result = ToolProviderListCache.get_cached_providers(tenant_id) - - assert result is None - mock_redis_client.get.assert_called_once() - - def test_get_cached_providers_miss(self, mock_redis_client): - """Test get cached providers - cache miss""" - tenant_id = "tenant_123" - mock_redis_client.get.return_value = None - - result = ToolProviderListCache.get_cached_providers(tenant_id) - - assert result is None - mock_redis_client.get.assert_called_once() - - def test_set_cached_providers(self, mock_redis_client): - """Test set cached providers""" - tenant_id = "tenant_123" - typ: ToolProviderTypeApiLiteral = "builtin" - mock_providers = [{"id": "tool", "name": "test_provider"}] - cache_key = ToolProviderListCache._generate_cache_key(tenant_id, typ) - - ToolProviderListCache.set_cached_providers(tenant_id, typ, mock_providers) - - mock_redis_client.setex.assert_called_once_with( - cache_key, ToolProviderListCache.CACHE_TTL, json.dumps(mock_providers) - ) - - def test_invalidate_cache_specific_type(self, mock_redis_client): - """Test invalidate cache - specific type""" - tenant_id = "tenant_123" - typ: ToolProviderTypeApiLiteral = "workflow" - cache_key = ToolProviderListCache._generate_cache_key(tenant_id, typ) - - ToolProviderListCache.invalidate_cache(tenant_id, typ) - - mock_redis_client.delete.assert_called_once_with(cache_key) - - def test_invalidate_cache_all_types(self, mock_redis_client): - """Test invalidate cache - clear all tenant cache""" - tenant_id = "tenant_123" - mock_keys = [ - b"tool_providers:tenant_id:tenant_123:type:all", - b"tool_providers:tenant_id:tenant_123:type:builtin", - ] - mock_redis_client.scan_iter.return_value = mock_keys - - ToolProviderListCache.invalidate_cache(tenant_id) - - mock_redis_client.scan_iter.assert_called_once_with(f"tool_providers:tenant_id:{tenant_id}:*") - mock_redis_client.delete.assert_called_once_with(*mock_keys) - - def test_invalidate_cache_no_keys(self, mock_redis_client): - """Test invalidate cache - no cache keys for tenant""" - tenant_id = "tenant_123" - mock_redis_client.scan_iter.return_value = [] - - ToolProviderListCache.invalidate_cache(tenant_id) - - mock_redis_client.delete.assert_not_called() - - def test_redis_fallback_default_return(self, mock_redis_client): - """Test redis_fallback decorator - default return value (Redis error)""" - mock_redis_client.get.side_effect = RedisError("Redis connection error") - - result = ToolProviderListCache.get_cached_providers("tenant_123") - - assert result is None - mock_redis_client.get.assert_called_once() - - def test_redis_fallback_no_default(self, mock_redis_client): - """Test redis_fallback decorator - no default return value (Redis error)""" - mock_redis_client.setex.side_effect = RedisError("Redis connection error") - - try: - ToolProviderListCache.set_cached_providers("tenant_123", "mcp", []) - except RedisError: - pytest.fail("set_cached_providers should not raise RedisError (handled by fallback)") - - mock_redis_client.setex.assert_called_once() diff --git a/api/tests/unit_tests/core/rag/datasource/vdb/pgvector/__init__.py b/api/tests/unit_tests/core/rag/datasource/vdb/pgvector/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/tests/unit_tests/core/rag/datasource/vdb/pgvector/test_pgvector.py b/api/tests/unit_tests/core/rag/datasource/vdb/pgvector/test_pgvector.py new file mode 100644 index 0000000000..4998a9858f --- /dev/null +++ b/api/tests/unit_tests/core/rag/datasource/vdb/pgvector/test_pgvector.py @@ -0,0 +1,327 @@ +import unittest +from unittest.mock import MagicMock, patch + +import pytest + +from core.rag.datasource.vdb.pgvector.pgvector import ( + PGVector, + PGVectorConfig, +) + + +class TestPGVector(unittest.TestCase): + def setUp(self): + self.config = PGVectorConfig( + host="localhost", + port=5432, + user="test_user", + password="test_password", + database="test_db", + min_connection=1, + max_connection=5, + pg_bigm=False, + ) + self.collection_name = "test_collection" + + @patch("core.rag.datasource.vdb.pgvector.pgvector.psycopg2.pool.SimpleConnectionPool") + def test_init(self, mock_pool_class): + """Test PGVector initialization.""" + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + pgvector = PGVector(self.collection_name, self.config) + + assert pgvector._collection_name == self.collection_name + assert pgvector.table_name == f"embedding_{self.collection_name}" + assert pgvector.get_type() == "pgvector" + assert pgvector.pool is not None + assert pgvector.pg_bigm is False + assert pgvector.index_hash is not None + + @patch("core.rag.datasource.vdb.pgvector.pgvector.psycopg2.pool.SimpleConnectionPool") + def test_init_with_pg_bigm(self, mock_pool_class): + """Test PGVector initialization with pg_bigm enabled.""" + config = PGVectorConfig( + host="localhost", + port=5432, + user="test_user", + password="test_password", + database="test_db", + min_connection=1, + max_connection=5, + pg_bigm=True, + ) + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + pgvector = PGVector(self.collection_name, config) + + assert pgvector.pg_bigm is True + + @patch("core.rag.datasource.vdb.pgvector.pgvector.psycopg2.pool.SimpleConnectionPool") + @patch("core.rag.datasource.vdb.pgvector.pgvector.redis_client") + def test_create_collection_basic(self, mock_redis, mock_pool_class): + """Test basic collection creation.""" + # Mock Redis operations + mock_lock = MagicMock() + mock_lock.__enter__ = MagicMock() + mock_lock.__exit__ = MagicMock() + mock_redis.lock.return_value = mock_lock + mock_redis.get.return_value = None + mock_redis.set.return_value = None + + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + # Mock connection and cursor + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.getconn.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.return_value = [1] # vector extension exists + + pgvector = PGVector(self.collection_name, self.config) + pgvector._create_collection(1536) + + # Verify SQL execution calls + assert mock_cursor.execute.called + + # Check that CREATE TABLE was called with correct dimension + create_table_calls = [call for call in mock_cursor.execute.call_args_list if "CREATE TABLE" in str(call)] + assert len(create_table_calls) == 1 + assert "vector(1536)" in create_table_calls[0][0][0] + + # Check that CREATE INDEX was called (dimension <= 2000) + create_index_calls = [ + call for call in mock_cursor.execute.call_args_list if "CREATE INDEX" in str(call) and "hnsw" in str(call) + ] + assert len(create_index_calls) == 1 + + # Verify Redis cache was set + mock_redis.set.assert_called_once() + + @patch("core.rag.datasource.vdb.pgvector.pgvector.psycopg2.pool.SimpleConnectionPool") + @patch("core.rag.datasource.vdb.pgvector.pgvector.redis_client") + def test_create_collection_with_large_dimension(self, mock_redis, mock_pool_class): + """Test collection creation with dimension > 2000 (no HNSW index).""" + # Mock Redis operations + mock_lock = MagicMock() + mock_lock.__enter__ = MagicMock() + mock_lock.__exit__ = MagicMock() + mock_redis.lock.return_value = mock_lock + mock_redis.get.return_value = None + mock_redis.set.return_value = None + + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + # Mock connection and cursor + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.getconn.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.return_value = [1] # vector extension exists + + pgvector = PGVector(self.collection_name, self.config) + pgvector._create_collection(3072) # Dimension > 2000 + + # Check that CREATE TABLE was called + create_table_calls = [call for call in mock_cursor.execute.call_args_list if "CREATE TABLE" in str(call)] + assert len(create_table_calls) == 1 + assert "vector(3072)" in create_table_calls[0][0][0] + + # Check that HNSW index was NOT created (dimension > 2000) + hnsw_index_calls = [call for call in mock_cursor.execute.call_args_list if "hnsw" in str(call)] + assert len(hnsw_index_calls) == 0 + + @patch("core.rag.datasource.vdb.pgvector.pgvector.psycopg2.pool.SimpleConnectionPool") + @patch("core.rag.datasource.vdb.pgvector.pgvector.redis_client") + def test_create_collection_with_pg_bigm(self, mock_redis, mock_pool_class): + """Test collection creation with pg_bigm enabled.""" + config = PGVectorConfig( + host="localhost", + port=5432, + user="test_user", + password="test_password", + database="test_db", + min_connection=1, + max_connection=5, + pg_bigm=True, + ) + + # Mock Redis operations + mock_lock = MagicMock() + mock_lock.__enter__ = MagicMock() + mock_lock.__exit__ = MagicMock() + mock_redis.lock.return_value = mock_lock + mock_redis.get.return_value = None + mock_redis.set.return_value = None + + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + # Mock connection and cursor + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.getconn.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.return_value = [1] # vector extension exists + + pgvector = PGVector(self.collection_name, config) + pgvector._create_collection(1536) + + # Check that pg_bigm index was created + bigm_index_calls = [call for call in mock_cursor.execute.call_args_list if "gin_bigm_ops" in str(call)] + assert len(bigm_index_calls) == 1 + + @patch("core.rag.datasource.vdb.pgvector.pgvector.psycopg2.pool.SimpleConnectionPool") + @patch("core.rag.datasource.vdb.pgvector.pgvector.redis_client") + def test_create_collection_creates_vector_extension(self, mock_redis, mock_pool_class): + """Test that vector extension is created if it doesn't exist.""" + # Mock Redis operations + mock_lock = MagicMock() + mock_lock.__enter__ = MagicMock() + mock_lock.__exit__ = MagicMock() + mock_redis.lock.return_value = mock_lock + mock_redis.get.return_value = None + mock_redis.set.return_value = None + + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + # Mock connection and cursor + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.getconn.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + # First call: vector extension doesn't exist + mock_cursor.fetchone.return_value = None + + pgvector = PGVector(self.collection_name, self.config) + pgvector._create_collection(1536) + + # Check that CREATE EXTENSION was called + create_extension_calls = [ + call for call in mock_cursor.execute.call_args_list if "CREATE EXTENSION vector" in str(call) + ] + assert len(create_extension_calls) == 1 + + @patch("core.rag.datasource.vdb.pgvector.pgvector.psycopg2.pool.SimpleConnectionPool") + @patch("core.rag.datasource.vdb.pgvector.pgvector.redis_client") + def test_create_collection_with_cache_hit(self, mock_redis, mock_pool_class): + """Test that collection creation is skipped when cache exists.""" + # Mock Redis operations - cache exists + mock_lock = MagicMock() + mock_lock.__enter__ = MagicMock() + mock_lock.__exit__ = MagicMock() + mock_redis.lock.return_value = mock_lock + mock_redis.get.return_value = 1 # Cache exists + + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + # Mock connection and cursor + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.getconn.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + + pgvector = PGVector(self.collection_name, self.config) + pgvector._create_collection(1536) + + # Check that no SQL was executed (early return due to cache) + assert mock_cursor.execute.call_count == 0 + + @patch("core.rag.datasource.vdb.pgvector.pgvector.psycopg2.pool.SimpleConnectionPool") + @patch("core.rag.datasource.vdb.pgvector.pgvector.redis_client") + def test_create_collection_with_redis_lock(self, mock_redis, mock_pool_class): + """Test that Redis lock is used during collection creation.""" + # Mock Redis operations + mock_lock = MagicMock() + mock_lock.__enter__ = MagicMock() + mock_lock.__exit__ = MagicMock() + mock_redis.lock.return_value = mock_lock + mock_redis.get.return_value = None + mock_redis.set.return_value = None + + # Mock the connection pool + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + # Mock connection and cursor + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.getconn.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + mock_cursor.fetchone.return_value = [1] # vector extension exists + + pgvector = PGVector(self.collection_name, self.config) + pgvector._create_collection(1536) + + # Verify Redis lock was acquired with correct lock name + mock_redis.lock.assert_called_once_with("vector_indexing_test_collection_lock", timeout=20) + + # Verify lock context manager was entered and exited + mock_lock.__enter__.assert_called_once() + mock_lock.__exit__.assert_called_once() + + @patch("core.rag.datasource.vdb.pgvector.pgvector.psycopg2.pool.SimpleConnectionPool") + def test_get_cursor_context_manager(self, mock_pool_class): + """Test that _get_cursor properly manages connection lifecycle.""" + mock_pool = MagicMock() + mock_pool_class.return_value = mock_pool + + mock_conn = MagicMock() + mock_cursor = MagicMock() + mock_pool.getconn.return_value = mock_conn + mock_conn.cursor.return_value = mock_cursor + + pgvector = PGVector(self.collection_name, self.config) + + with pgvector._get_cursor() as cur: + assert cur == mock_cursor + + # Verify connection lifecycle methods were called + mock_pool.getconn.assert_called_once() + mock_cursor.close.assert_called_once() + mock_conn.commit.assert_called_once() + mock_pool.putconn.assert_called_once_with(mock_conn) + + +@pytest.mark.parametrize( + "invalid_config_override", + [ + {"host": ""}, # Test empty host + {"port": 0}, # Test invalid port + {"user": ""}, # Test empty user + {"password": ""}, # Test empty password + {"database": ""}, # Test empty database + {"min_connection": 0}, # Test invalid min_connection + {"max_connection": 0}, # Test invalid max_connection + {"min_connection": 10, "max_connection": 5}, # Test min > max + ], +) +def test_config_validation_parametrized(invalid_config_override): + """Test configuration validation for various invalid inputs using parametrize.""" + config = { + "host": "localhost", + "port": 5432, + "user": "test_user", + "password": "test_password", + "database": "test_db", + "min_connection": 1, + "max_connection": 5, + } + config.update(invalid_config_override) + + with pytest.raises(ValueError): + PGVectorConfig(**config) + + +if __name__ == "__main__": + unittest.main() diff --git a/api/tests/unit_tests/core/rag/extractor/firecrawl/test_firecrawl.py b/api/tests/unit_tests/core/rag/extractor/firecrawl/test_firecrawl.py index b4ee1b91b4..4ee04ddebc 100644 --- a/api/tests/unit_tests/core/rag/extractor/firecrawl/test_firecrawl.py +++ b/api/tests/unit_tests/core/rag/extractor/firecrawl/test_firecrawl.py @@ -1,5 +1,7 @@ import os +from unittest.mock import MagicMock +import pytest from pytest_mock import MockerFixture from core.rag.extractor.firecrawl.firecrawl_app import FirecrawlApp @@ -25,3 +27,35 @@ def test_firecrawl_web_extractor_crawl_mode(mocker: MockerFixture): assert job_id is not None assert isinstance(job_id, str) + + +def test_build_url_normalizes_slashes_for_crawl(mocker: MockerFixture): + api_key = "fc-" + base_urls = ["https://custom.firecrawl.dev", "https://custom.firecrawl.dev/"] + for base in base_urls: + app = FirecrawlApp(api_key=api_key, base_url=base) + mock_post = mocker.patch("httpx.post") + mock_resp = MagicMock() + mock_resp.status_code = 200 + mock_resp.json.return_value = {"id": "job123"} + mock_post.return_value = mock_resp + app.crawl_url("https://example.com", params=None) + called_url = mock_post.call_args[0][0] + assert called_url == "https://custom.firecrawl.dev/v2/crawl" + + +def test_error_handler_handles_non_json_error_bodies(mocker: MockerFixture): + api_key = "fc-" + app = FirecrawlApp(api_key=api_key, base_url="https://custom.firecrawl.dev/") + mock_post = mocker.patch("httpx.post") + mock_resp = MagicMock() + mock_resp.status_code = 404 + mock_resp.text = "Not Found" + mock_resp.json.side_effect = Exception("Not JSON") + mock_post.return_value = mock_resp + + with pytest.raises(Exception) as excinfo: + app.scrape_url("https://example.com") + + # Should not raise a JSONDecodeError; current behavior reports status code only + assert str(excinfo.value) == "Failed to scrape URL. Status code: 404" diff --git a/api/tests/unit_tests/core/rag/retrieval/test_dataset_retrieval_metadata_filter.py b/api/tests/unit_tests/core/rag/retrieval/test_dataset_retrieval_metadata_filter.py new file mode 100644 index 0000000000..07d6e51e4b --- /dev/null +++ b/api/tests/unit_tests/core/rag/retrieval/test_dataset_retrieval_metadata_filter.py @@ -0,0 +1,873 @@ +""" +Unit tests for DatasetRetrieval.process_metadata_filter_func. + +This module provides comprehensive test coverage for the process_metadata_filter_func +method in the DatasetRetrieval class, which is responsible for building SQLAlchemy +filter expressions based on metadata filtering conditions. + +Conditions Tested: +================== +1. **String Conditions**: contains, not contains, start with, end with +2. **Equality Conditions**: is / =, is not / ≠ +3. **Null Conditions**: empty, not empty +4. **Numeric Comparisons**: before / <, after / >, ≤ / <=, ≄ / >= +5. **List Conditions**: in +6. **Edge Cases**: None values, different data types (str, int, float) + +Test Architecture: +================== +- Direct instantiation of DatasetRetrieval +- Mocking of DatasetDocument model attributes +- Verification of SQLAlchemy filter expressions +- Follows Arrange-Act-Assert (AAA) pattern + +Running Tests: +============== + # Run all tests in this module + uv run --project api pytest \ + api/tests/unit_tests/core/rag/retrieval/test_dataset_retrieval_metadata_filter.py -v + + # Run a specific test + uv run --project api pytest \ + api/tests/unit_tests/core/rag/retrieval/test_dataset_retrieval_metadata_filter.py::\ +TestProcessMetadataFilterFunc::test_contains_condition -v +""" + +from unittest.mock import MagicMock + +import pytest + +from core.rag.retrieval.dataset_retrieval import DatasetRetrieval + + +class TestProcessMetadataFilterFunc: + """ + Comprehensive test suite for process_metadata_filter_func method. + + This test class validates all metadata filtering conditions supported by + the DatasetRetrieval class, including string operations, numeric comparisons, + null checks, and list operations. + + Method Signature: + ================== + def process_metadata_filter_func( + self, sequence: int, condition: str, metadata_name: str, value: Any | None, filters: list + ) -> list: + + The method builds SQLAlchemy filter expressions by: + 1. Validating value is not None (except for empty/not empty conditions) + 2. Using DatasetDocument.doc_metadata JSON field operations + 3. Adding appropriate SQLAlchemy expressions to the filters list + 4. Returning the updated filters list + + Mocking Strategy: + ================== + - Mock DatasetDocument.doc_metadata to avoid database dependencies + - Verify filter expressions are created correctly + - Test with various data types (str, int, float, list) + """ + + @pytest.fixture + def retrieval(self): + """ + Create a DatasetRetrieval instance for testing. + + Returns: + DatasetRetrieval: Instance to test process_metadata_filter_func + """ + return DatasetRetrieval() + + @pytest.fixture + def mock_doc_metadata(self): + """ + Mock the DatasetDocument.doc_metadata JSON field. + + The method uses DatasetDocument.doc_metadata[metadata_name] to access + JSON fields. We mock this to avoid database dependencies. + + Returns: + Mock: Mocked doc_metadata attribute + """ + mock_metadata_field = MagicMock() + + # Create mock for string access + mock_string_access = MagicMock() + mock_string_access.like = MagicMock() + mock_string_access.notlike = MagicMock() + mock_string_access.__eq__ = MagicMock(return_value=MagicMock()) + mock_string_access.__ne__ = MagicMock(return_value=MagicMock()) + mock_string_access.in_ = MagicMock(return_value=MagicMock()) + + # Create mock for float access (for numeric comparisons) + mock_float_access = MagicMock() + mock_float_access.__eq__ = MagicMock(return_value=MagicMock()) + mock_float_access.__ne__ = MagicMock(return_value=MagicMock()) + mock_float_access.__lt__ = MagicMock(return_value=MagicMock()) + mock_float_access.__gt__ = MagicMock(return_value=MagicMock()) + mock_float_access.__le__ = MagicMock(return_value=MagicMock()) + mock_float_access.__ge__ = MagicMock(return_value=MagicMock()) + + # Create mock for null checks + mock_null_access = MagicMock() + mock_null_access.is_ = MagicMock(return_value=MagicMock()) + mock_null_access.isnot = MagicMock(return_value=MagicMock()) + + # Setup __getitem__ to return appropriate mock based on usage + def getitem_side_effect(name): + if name in ["author", "title", "category"]: + return mock_string_access + elif name in ["year", "price", "rating"]: + return mock_float_access + else: + return mock_string_access + + mock_metadata_field.__getitem__ = MagicMock(side_effect=getitem_side_effect) + mock_metadata_field.as_string.return_value = mock_string_access + mock_metadata_field.as_float.return_value = mock_float_access + mock_metadata_field[metadata_name:str].is_ = mock_null_access.is_ + mock_metadata_field[metadata_name:str].isnot = mock_null_access.isnot + + return mock_metadata_field + + # ==================== String Condition Tests ==================== + + def test_contains_condition_string_value(self, retrieval): + """ + Test 'contains' condition with string value. + + Verifies: + - Filters list is populated with LIKE expression + - Pattern matching uses %value% syntax + """ + filters = [] + sequence = 0 + condition = "contains" + metadata_name = "author" + value = "John" + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_not_contains_condition(self, retrieval): + """ + Test 'not contains' condition. + + Verifies: + - Filters list is populated with NOT LIKE expression + - Pattern matching uses %value% syntax with negation + """ + filters = [] + sequence = 0 + condition = "not contains" + metadata_name = "title" + value = "banned" + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_start_with_condition(self, retrieval): + """ + Test 'start with' condition. + + Verifies: + - Filters list is populated with LIKE expression + - Pattern matching uses value% syntax + """ + filters = [] + sequence = 0 + condition = "start with" + metadata_name = "category" + value = "tech" + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_end_with_condition(self, retrieval): + """ + Test 'end with' condition. + + Verifies: + - Filters list is populated with LIKE expression + - Pattern matching uses %value syntax + """ + filters = [] + sequence = 0 + condition = "end with" + metadata_name = "filename" + value = ".pdf" + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + # ==================== Equality Condition Tests ==================== + + def test_is_condition_with_string_value(self, retrieval): + """ + Test 'is' (=) condition with string value. + + Verifies: + - Filters list is populated with equality expression + - String comparison is used + """ + filters = [] + sequence = 0 + condition = "is" + metadata_name = "author" + value = "Jane Doe" + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_equals_condition_with_string_value(self, retrieval): + """ + Test '=' condition with string value. + + Verifies: + - Same behavior as 'is' condition + - String comparison is used + """ + filters = [] + sequence = 0 + condition = "=" + metadata_name = "category" + value = "technology" + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_is_condition_with_int_value(self, retrieval): + """ + Test 'is' condition with integer value. + + Verifies: + - Numeric comparison is used + - as_float() is called on the metadata field + """ + filters = [] + sequence = 0 + condition = "is" + metadata_name = "year" + value = 2023 + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_is_condition_with_float_value(self, retrieval): + """ + Test 'is' condition with float value. + + Verifies: + - Numeric comparison is used + - as_float() is called on the metadata field + """ + filters = [] + sequence = 0 + condition = "is" + metadata_name = "price" + value = 19.99 + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_is_not_condition_with_string_value(self, retrieval): + """ + Test 'is not' (≠) condition with string value. + + Verifies: + - Filters list is populated with inequality expression + - String comparison is used + """ + filters = [] + sequence = 0 + condition = "is not" + metadata_name = "author" + value = "Unknown" + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_not_equals_condition(self, retrieval): + """ + Test '≠' condition with string value. + + Verifies: + - Same behavior as 'is not' condition + - Inequality expression is used + """ + filters = [] + sequence = 0 + condition = "≠" + metadata_name = "category" + value = "archived" + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_is_not_condition_with_numeric_value(self, retrieval): + """ + Test 'is not' condition with numeric value. + + Verifies: + - Numeric inequality comparison is used + - as_float() is called on the metadata field + """ + filters = [] + sequence = 0 + condition = "is not" + metadata_name = "year" + value = 2000 + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + # ==================== Null Condition Tests ==================== + + def test_empty_condition(self, retrieval): + """ + Test 'empty' condition (null check). + + Verifies: + - Filters list is populated with IS NULL expression + - Value can be None for this condition + """ + filters = [] + sequence = 0 + condition = "empty" + metadata_name = "author" + value = None + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_not_empty_condition(self, retrieval): + """ + Test 'not empty' condition (not null check). + + Verifies: + - Filters list is populated with IS NOT NULL expression + - Value can be None for this condition + """ + filters = [] + sequence = 0 + condition = "not empty" + metadata_name = "description" + value = None + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + # ==================== Numeric Comparison Tests ==================== + + def test_before_condition(self, retrieval): + """ + Test 'before' (<) condition. + + Verifies: + - Filters list is populated with less than expression + - Numeric comparison is used + """ + filters = [] + sequence = 0 + condition = "before" + metadata_name = "year" + value = 2020 + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_less_than_condition(self, retrieval): + """ + Test '<' condition. + + Verifies: + - Same behavior as 'before' condition + - Less than expression is used + """ + filters = [] + sequence = 0 + condition = "<" + metadata_name = "price" + value = 100.0 + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_after_condition(self, retrieval): + """ + Test 'after' (>) condition. + + Verifies: + - Filters list is populated with greater than expression + - Numeric comparison is used + """ + filters = [] + sequence = 0 + condition = "after" + metadata_name = "year" + value = 2020 + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_greater_than_condition(self, retrieval): + """ + Test '>' condition. + + Verifies: + - Same behavior as 'after' condition + - Greater than expression is used + """ + filters = [] + sequence = 0 + condition = ">" + metadata_name = "rating" + value = 4.5 + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_less_than_or_equal_condition_unicode(self, retrieval): + """ + Test '≤' condition. + + Verifies: + - Filters list is populated with less than or equal expression + - Numeric comparison is used + """ + filters = [] + sequence = 0 + condition = "≤" + metadata_name = "price" + value = 50.0 + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_less_than_or_equal_condition_ascii(self, retrieval): + """ + Test '<=' condition. + + Verifies: + - Same behavior as '≤' condition + - Less than or equal expression is used + """ + filters = [] + sequence = 0 + condition = "<=" + metadata_name = "year" + value = 2023 + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_greater_than_or_equal_condition_unicode(self, retrieval): + """ + Test '≄' condition. + + Verifies: + - Filters list is populated with greater than or equal expression + - Numeric comparison is used + """ + filters = [] + sequence = 0 + condition = "≄" + metadata_name = "rating" + value = 3.5 + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_greater_than_or_equal_condition_ascii(self, retrieval): + """ + Test '>=' condition. + + Verifies: + - Same behavior as '≄' condition + - Greater than or equal expression is used + """ + filters = [] + sequence = 0 + condition = ">=" + metadata_name = "year" + value = 2000 + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + # ==================== List/In Condition Tests ==================== + + def test_in_condition_with_comma_separated_string(self, retrieval): + """ + Test 'in' condition with comma-separated string value. + + Verifies: + - String is split into list + - Whitespace is trimmed from each value + - IN expression is created + """ + filters = [] + sequence = 0 + condition = "in" + metadata_name = "category" + value = "tech, science, AI " + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_in_condition_with_list_value(self, retrieval): + """ + Test 'in' condition with list value. + + Verifies: + - List is processed correctly + - None values are filtered out + - IN expression is created with valid values + """ + filters = [] + sequence = 0 + condition = "in" + metadata_name = "tags" + value = ["python", "javascript", None, "golang"] + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_in_condition_with_tuple_value(self, retrieval): + """ + Test 'in' condition with tuple value. + + Verifies: + - Tuple is processed like a list + - IN expression is created + """ + filters = [] + sequence = 0 + condition = "in" + metadata_name = "category" + value = ("tech", "science", "ai") + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_in_condition_with_empty_string(self, retrieval): + """ + Test 'in' condition with empty string value. + + Verifies: + - Empty string results in literal(False) filter + - No valid values to match + """ + filters = [] + sequence = 0 + condition = "in" + metadata_name = "category" + value = "" + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + # Verify it's a literal(False) expression + # This is a bit tricky to test without access to the actual expression + + def test_in_condition_with_only_whitespace(self, retrieval): + """ + Test 'in' condition with whitespace-only string value. + + Verifies: + - Whitespace-only string results in literal(False) filter + - All values are stripped and filtered out + """ + filters = [] + sequence = 0 + condition = "in" + metadata_name = "category" + value = " , , " + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_in_condition_with_single_string(self, retrieval): + """ + Test 'in' condition with single non-comma string. + + Verifies: + - Single string is treated as single-item list + - IN expression is created with one value + """ + filters = [] + sequence = 0 + condition = "in" + metadata_name = "category" + value = "technology" + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + # ==================== Edge Case Tests ==================== + + def test_none_value_with_non_empty_condition(self, retrieval): + """ + Test None value with conditions that require value. + + Verifies: + - Original filters list is returned unchanged + - No filter is added for None values (except empty/not empty) + """ + filters = [] + sequence = 0 + condition = "contains" + metadata_name = "author" + value = None + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 0 # No filter added + + def test_none_value_with_equals_condition(self, retrieval): + """ + Test None value with 'is' (=) condition. + + Verifies: + - Original filters list is returned unchanged + - No filter is added for None values + """ + filters = [] + sequence = 0 + condition = "is" + metadata_name = "author" + value = None + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 0 + + def test_none_value_with_numeric_condition(self, retrieval): + """ + Test None value with numeric comparison condition. + + Verifies: + - Original filters list is returned unchanged + - No filter is added for None values + """ + filters = [] + sequence = 0 + condition = ">" + metadata_name = "year" + value = None + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 0 + + def test_existing_filters_preserved(self, retrieval): + """ + Test that existing filters are preserved. + + Verifies: + - Existing filters in the list are not removed + - New filters are appended to the list + """ + existing_filter = MagicMock() + filters = [existing_filter] + sequence = 0 + condition = "contains" + metadata_name = "author" + value = "test" + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 2 + assert filters[0] == existing_filter + + def test_multiple_filters_accumulated(self, retrieval): + """ + Test multiple calls to accumulate filters. + + Verifies: + - Each call adds a new filter to the list + - All filters are preserved across calls + """ + filters = [] + + # First filter + retrieval.process_metadata_filter_func(0, "contains", "author", "John", filters) + assert len(filters) == 1 + + # Second filter + retrieval.process_metadata_filter_func(1, ">", "year", 2020, filters) + assert len(filters) == 2 + + # Third filter + retrieval.process_metadata_filter_func(2, "is", "category", "tech", filters) + assert len(filters) == 3 + + def test_unknown_condition(self, retrieval): + """ + Test unknown/unsupported condition. + + Verifies: + - Original filters list is returned unchanged + - No filter is added for unknown conditions + """ + filters = [] + sequence = 0 + condition = "unknown_condition" + metadata_name = "author" + value = "test" + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 0 + + def test_empty_string_value_with_contains(self, retrieval): + """ + Test empty string value with 'contains' condition. + + Verifies: + - Filter is added even with empty string + - LIKE expression is created + """ + filters = [] + sequence = 0 + condition = "contains" + metadata_name = "author" + value = "" + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_special_characters_in_value(self, retrieval): + """ + Test special characters in value string. + + Verifies: + - Special characters are handled in value + - LIKE expression is created correctly + """ + filters = [] + sequence = 0 + condition = "contains" + metadata_name = "title" + value = "C++ & Python's features" + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_zero_value_with_numeric_condition(self, retrieval): + """ + Test zero value with numeric comparison condition. + + Verifies: + - Zero is treated as valid value + - Numeric comparison is performed + """ + filters = [] + sequence = 0 + condition = ">" + metadata_name = "price" + value = 0 + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_negative_value_with_numeric_condition(self, retrieval): + """ + Test negative value with numeric comparison condition. + + Verifies: + - Negative numbers are handled correctly + - Numeric comparison is performed + """ + filters = [] + sequence = 0 + condition = "<" + metadata_name = "temperature" + value = -10.5 + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 + + def test_float_value_with_integer_comparison(self, retrieval): + """ + Test float value with numeric comparison condition. + + Verifies: + - Float values work correctly + - Numeric comparison is performed + """ + filters = [] + sequence = 0 + condition = ">=" + metadata_name = "rating" + value = 4.5 + + result = retrieval.process_metadata_filter_func(sequence, condition, metadata_name, value, filters) + + assert result == filters + assert len(filters) == 1 diff --git a/api/tests/unit_tests/oss/tencent_cos/test_tencent_cos.py b/api/tests/unit_tests/oss/tencent_cos/test_tencent_cos.py index 303f0493bd..a0fed1aa14 100644 --- a/api/tests/unit_tests/oss/tencent_cos/test_tencent_cos.py +++ b/api/tests/unit_tests/oss/tencent_cos/test_tencent_cos.py @@ -1,4 +1,4 @@ -from unittest.mock import patch +from unittest.mock import MagicMock, patch import pytest from qcloud_cos import CosConfig @@ -18,3 +18,72 @@ class TestTencentCos(BaseStorageTest): with patch.object(CosConfig, "__init__", return_value=None): self.storage = TencentCosStorage() self.storage.bucket_name = get_example_bucket() + + +class TestTencentCosConfiguration: + """Tests for TencentCosStorage initialization with different configurations.""" + + def test_init_with_custom_domain(self): + """Test initialization with custom domain configured.""" + # Mock dify_config to return custom domain configuration + mock_dify_config = MagicMock() + mock_dify_config.TENCENT_COS_CUSTOM_DOMAIN = "cos.example.com" + mock_dify_config.TENCENT_COS_SECRET_ID = "test-secret-id" + mock_dify_config.TENCENT_COS_SECRET_KEY = "test-secret-key" + mock_dify_config.TENCENT_COS_SCHEME = "https" + + # Mock CosConfig and CosS3Client + mock_config_instance = MagicMock() + mock_client = MagicMock() + + with ( + patch("extensions.storage.tencent_cos_storage.dify_config", mock_dify_config), + patch( + "extensions.storage.tencent_cos_storage.CosConfig", return_value=mock_config_instance + ) as mock_cos_config, + patch("extensions.storage.tencent_cos_storage.CosS3Client", return_value=mock_client), + ): + TencentCosStorage() + + # Verify CosConfig was called with Domain parameter (not Region) + mock_cos_config.assert_called_once() + call_kwargs = mock_cos_config.call_args[1] + assert "Domain" in call_kwargs + assert call_kwargs["Domain"] == "cos.example.com" + assert "Region" not in call_kwargs + assert call_kwargs["SecretId"] == "test-secret-id" + assert call_kwargs["SecretKey"] == "test-secret-key" + assert call_kwargs["Scheme"] == "https" + + def test_init_with_region(self): + """Test initialization with region configured (no custom domain).""" + # Mock dify_config to return region configuration + mock_dify_config = MagicMock() + mock_dify_config.TENCENT_COS_CUSTOM_DOMAIN = None + mock_dify_config.TENCENT_COS_REGION = "ap-guangzhou" + mock_dify_config.TENCENT_COS_SECRET_ID = "test-secret-id" + mock_dify_config.TENCENT_COS_SECRET_KEY = "test-secret-key" + mock_dify_config.TENCENT_COS_SCHEME = "https" + + # Mock CosConfig and CosS3Client + mock_config_instance = MagicMock() + mock_client = MagicMock() + + with ( + patch("extensions.storage.tencent_cos_storage.dify_config", mock_dify_config), + patch( + "extensions.storage.tencent_cos_storage.CosConfig", return_value=mock_config_instance + ) as mock_cos_config, + patch("extensions.storage.tencent_cos_storage.CosS3Client", return_value=mock_client), + ): + TencentCosStorage() + + # Verify CosConfig was called with Region parameter (not Domain) + mock_cos_config.assert_called_once() + call_kwargs = mock_cos_config.call_args[1] + assert "Region" in call_kwargs + assert call_kwargs["Region"] == "ap-guangzhou" + assert "Domain" not in call_kwargs + assert call_kwargs["SecretId"] == "test-secret-id" + assert call_kwargs["SecretKey"] == "test-secret-key" + assert call_kwargs["Scheme"] == "https" diff --git a/api/tests/unit_tests/services/auth/test_firecrawl_auth.py b/api/tests/unit_tests/services/auth/test_firecrawl_auth.py index b5ee55706d..ab50d6a92c 100644 --- a/api/tests/unit_tests/services/auth/test_firecrawl_auth.py +++ b/api/tests/unit_tests/services/auth/test_firecrawl_auth.py @@ -1,3 +1,4 @@ +import json from unittest.mock import MagicMock, patch import httpx @@ -110,9 +111,11 @@ class TestFirecrawlAuth: @pytest.mark.parametrize( ("status_code", "response_text", "has_json_error", "expected_error_contains"), [ - (403, '{"error": "Forbidden"}', True, "Failed to authorize. Status code: 403. Error: Forbidden"), - (404, "", True, "Unexpected error occurred while trying to authorize. Status code: 404"), - (401, "Not JSON", True, "Expecting value"), # JSON decode error + (403, '{"error": "Forbidden"}', False, "Failed to authorize. Status code: 403. Error: Forbidden"), + # empty body falls back to generic message + (404, "", True, "Failed to authorize. Status code: 404. Error: Unknown error occurred"), + # non-JSON body is surfaced directly + (401, "Not JSON", True, "Failed to authorize. Status code: 401. Error: Not JSON"), ], ) @patch("services.auth.firecrawl.firecrawl.httpx.post") @@ -124,12 +127,14 @@ class TestFirecrawlAuth: mock_response.status_code = status_code mock_response.text = response_text if has_json_error: - mock_response.json.side_effect = Exception("Not JSON") + mock_response.json.side_effect = json.JSONDecodeError("Not JSON", "", 0) + else: + mock_response.json.return_value = {"error": "Forbidden"} mock_post.return_value = mock_response with pytest.raises(Exception) as exc_info: auth_instance.validate_credentials() - assert expected_error_contains in str(exc_info.value) + assert str(exc_info.value) == expected_error_contains @pytest.mark.parametrize( ("exception_type", "exception_message"), @@ -164,20 +169,21 @@ class TestFirecrawlAuth: @patch("services.auth.firecrawl.firecrawl.httpx.post") def test_should_use_custom_base_url_in_validation(self, mock_post): - """Test that custom base URL is used in validation""" + """Test that custom base URL is used in validation and normalized""" mock_response = MagicMock() mock_response.status_code = 200 mock_post.return_value = mock_response - credentials = { - "auth_type": "bearer", - "config": {"api_key": "test_api_key_123", "base_url": "https://custom.firecrawl.dev"}, - } - auth = FirecrawlAuth(credentials) - result = auth.validate_credentials() + for base in ("https://custom.firecrawl.dev", "https://custom.firecrawl.dev/"): + credentials = { + "auth_type": "bearer", + "config": {"api_key": "test_api_key_123", "base_url": base}, + } + auth = FirecrawlAuth(credentials) + result = auth.validate_credentials() - assert result is True - assert mock_post.call_args[0][0] == "https://custom.firecrawl.dev/v1/crawl" + assert result is True + assert mock_post.call_args[0][0] == "https://custom.firecrawl.dev/v1/crawl" @patch("services.auth.firecrawl.firecrawl.httpx.post") def test_should_handle_timeout_with_retry_suggestion(self, mock_post, auth_instance): diff --git a/api/tests/unit_tests/services/test_dataset_service_get_segments.py b/api/tests/unit_tests/services/test_dataset_service_get_segments.py new file mode 100644 index 0000000000..360c8a3c7d --- /dev/null +++ b/api/tests/unit_tests/services/test_dataset_service_get_segments.py @@ -0,0 +1,472 @@ +""" +Unit tests for SegmentService.get_segments method. + +Tests the retrieval of document segments with pagination and filtering: +- Basic pagination (page, limit) +- Status filtering +- Keyword search +- Ordering by position and id (to avoid duplicate data) +""" + +from unittest.mock import Mock, create_autospec, patch + +import pytest + +from models.dataset import DocumentSegment + + +class SegmentServiceTestDataFactory: + """ + Factory class for creating test data and mock objects for segment tests. + """ + + @staticmethod + def create_segment_mock( + segment_id: str = "segment-123", + document_id: str = "doc-123", + tenant_id: str = "tenant-123", + dataset_id: str = "dataset-123", + position: int = 1, + content: str = "Test content", + status: str = "completed", + **kwargs, + ) -> Mock: + """ + Create a mock document segment. + + Args: + segment_id: Unique identifier for the segment + document_id: Parent document ID + tenant_id: Tenant ID the segment belongs to + dataset_id: Parent dataset ID + position: Position within the document + content: Segment text content + status: Indexing status + **kwargs: Additional attributes + + Returns: + Mock: DocumentSegment mock object + """ + segment = create_autospec(DocumentSegment, instance=True) + segment.id = segment_id + segment.document_id = document_id + segment.tenant_id = tenant_id + segment.dataset_id = dataset_id + segment.position = position + segment.content = content + segment.status = status + for key, value in kwargs.items(): + setattr(segment, key, value) + return segment + + +class TestSegmentServiceGetSegments: + """ + Comprehensive unit tests for SegmentService.get_segments method. + + Tests cover: + - Basic pagination functionality + - Status list filtering + - Keyword search filtering + - Ordering (position + id for uniqueness) + - Empty results + - Combined filters + """ + + @pytest.fixture + def mock_segment_service_dependencies(self): + """ + Common mock setup for segment service dependencies. + + Patches: + - db: Database operations and pagination + - select: SQLAlchemy query builder + """ + with ( + patch("services.dataset_service.db") as mock_db, + patch("services.dataset_service.select") as mock_select, + ): + yield { + "db": mock_db, + "select": mock_select, + } + + def test_get_segments_basic_pagination(self, mock_segment_service_dependencies): + """ + Test basic pagination functionality. + + Verifies: + - Query is built with document_id and tenant_id filters + - Pagination uses correct page and limit parameters + - Returns segments and total count + """ + # Arrange + document_id = "doc-123" + tenant_id = "tenant-123" + page = 1 + limit = 20 + + # Create mock segments + segment1 = SegmentServiceTestDataFactory.create_segment_mock( + segment_id="seg-1", position=1, content="First segment" + ) + segment2 = SegmentServiceTestDataFactory.create_segment_mock( + segment_id="seg-2", position=2, content="Second segment" + ) + + # Mock pagination result + mock_paginated = Mock() + mock_paginated.items = [segment1, segment2] + mock_paginated.total = 2 + + mock_segment_service_dependencies["db"].paginate.return_value = mock_paginated + + # Mock select builder + mock_query = Mock() + mock_segment_service_dependencies["select"].return_value = mock_query + mock_query.where.return_value = mock_query + mock_query.order_by.return_value = mock_query + + # Act + from services.dataset_service import SegmentService + + items, total = SegmentService.get_segments(document_id=document_id, tenant_id=tenant_id, page=page, limit=limit) + + # Assert + assert len(items) == 2 + assert total == 2 + assert items[0].id == "seg-1" + assert items[1].id == "seg-2" + mock_segment_service_dependencies["db"].paginate.assert_called_once() + call_kwargs = mock_segment_service_dependencies["db"].paginate.call_args[1] + assert call_kwargs["page"] == page + assert call_kwargs["per_page"] == limit + assert call_kwargs["max_per_page"] == 100 + assert call_kwargs["error_out"] is False + + def test_get_segments_with_status_filter(self, mock_segment_service_dependencies): + """ + Test filtering by status list. + + Verifies: + - Status list filter is applied to query + - Only segments with matching status are returned + """ + # Arrange + document_id = "doc-123" + tenant_id = "tenant-123" + status_list = ["completed", "indexing"] + + segment1 = SegmentServiceTestDataFactory.create_segment_mock(segment_id="seg-1", status="completed") + segment2 = SegmentServiceTestDataFactory.create_segment_mock(segment_id="seg-2", status="indexing") + + mock_paginated = Mock() + mock_paginated.items = [segment1, segment2] + mock_paginated.total = 2 + + mock_segment_service_dependencies["db"].paginate.return_value = mock_paginated + + mock_query = Mock() + mock_segment_service_dependencies["select"].return_value = mock_query + mock_query.where.return_value = mock_query + mock_query.order_by.return_value = mock_query + + # Act + from services.dataset_service import SegmentService + + items, total = SegmentService.get_segments( + document_id=document_id, tenant_id=tenant_id, status_list=status_list + ) + + # Assert + assert len(items) == 2 + assert total == 2 + # Verify where was called multiple times (base filters + status filter) + assert mock_query.where.call_count >= 2 + + def test_get_segments_with_empty_status_list(self, mock_segment_service_dependencies): + """ + Test with empty status list. + + Verifies: + - Empty status list is handled correctly + - No status filter is applied to avoid WHERE false condition + """ + # Arrange + document_id = "doc-123" + tenant_id = "tenant-123" + status_list = [] + + segment = SegmentServiceTestDataFactory.create_segment_mock(segment_id="seg-1") + + mock_paginated = Mock() + mock_paginated.items = [segment] + mock_paginated.total = 1 + + mock_segment_service_dependencies["db"].paginate.return_value = mock_paginated + + mock_query = Mock() + mock_segment_service_dependencies["select"].return_value = mock_query + mock_query.where.return_value = mock_query + mock_query.order_by.return_value = mock_query + + # Act + from services.dataset_service import SegmentService + + items, total = SegmentService.get_segments( + document_id=document_id, tenant_id=tenant_id, status_list=status_list + ) + + # Assert + assert len(items) == 1 + assert total == 1 + # Should only be called once (base filters, no status filter) + assert mock_query.where.call_count == 1 + + def test_get_segments_with_keyword_search(self, mock_segment_service_dependencies): + """ + Test keyword search functionality. + + Verifies: + - Keyword filter uses ilike for case-insensitive search + - Search pattern includes wildcards (%keyword%) + """ + # Arrange + document_id = "doc-123" + tenant_id = "tenant-123" + keyword = "search term" + + segment = SegmentServiceTestDataFactory.create_segment_mock( + segment_id="seg-1", content="This contains search term" + ) + + mock_paginated = Mock() + mock_paginated.items = [segment] + mock_paginated.total = 1 + + mock_segment_service_dependencies["db"].paginate.return_value = mock_paginated + + mock_query = Mock() + mock_segment_service_dependencies["select"].return_value = mock_query + mock_query.where.return_value = mock_query + mock_query.order_by.return_value = mock_query + + # Act + from services.dataset_service import SegmentService + + items, total = SegmentService.get_segments(document_id=document_id, tenant_id=tenant_id, keyword=keyword) + + # Assert + assert len(items) == 1 + assert total == 1 + # Verify where was called for base filters + keyword filter + assert mock_query.where.call_count == 2 + + def test_get_segments_ordering_by_position_and_id(self, mock_segment_service_dependencies): + """ + Test ordering by position and id. + + Verifies: + - Results are ordered by position ASC + - Results are secondarily ordered by id ASC to ensure uniqueness + - This prevents duplicate data across pages when positions are not unique + """ + # Arrange + document_id = "doc-123" + tenant_id = "tenant-123" + + # Create segments with same position but different ids + segment1 = SegmentServiceTestDataFactory.create_segment_mock( + segment_id="seg-1", position=1, content="Content 1" + ) + segment2 = SegmentServiceTestDataFactory.create_segment_mock( + segment_id="seg-2", position=1, content="Content 2" + ) + segment3 = SegmentServiceTestDataFactory.create_segment_mock( + segment_id="seg-3", position=2, content="Content 3" + ) + + mock_paginated = Mock() + mock_paginated.items = [segment1, segment2, segment3] + mock_paginated.total = 3 + + mock_segment_service_dependencies["db"].paginate.return_value = mock_paginated + + mock_query = Mock() + mock_segment_service_dependencies["select"].return_value = mock_query + mock_query.where.return_value = mock_query + mock_query.order_by.return_value = mock_query + + # Act + from services.dataset_service import SegmentService + + items, total = SegmentService.get_segments(document_id=document_id, tenant_id=tenant_id) + + # Assert + assert len(items) == 3 + assert total == 3 + mock_query.order_by.assert_called_once() + + def test_get_segments_empty_results(self, mock_segment_service_dependencies): + """ + Test when no segments match the criteria. + + Verifies: + - Empty list is returned for items + - Total count is 0 + """ + # Arrange + document_id = "non-existent-doc" + tenant_id = "tenant-123" + + mock_paginated = Mock() + mock_paginated.items = [] + mock_paginated.total = 0 + + mock_segment_service_dependencies["db"].paginate.return_value = mock_paginated + + mock_query = Mock() + mock_segment_service_dependencies["select"].return_value = mock_query + mock_query.where.return_value = mock_query + mock_query.order_by.return_value = mock_query + + # Act + from services.dataset_service import SegmentService + + items, total = SegmentService.get_segments(document_id=document_id, tenant_id=tenant_id) + + # Assert + assert items == [] + assert total == 0 + + def test_get_segments_combined_filters(self, mock_segment_service_dependencies): + """ + Test with multiple filters combined. + + Verifies: + - All filters work together correctly + - Status list and keyword search both applied + """ + # Arrange + document_id = "doc-123" + tenant_id = "tenant-123" + status_list = ["completed"] + keyword = "important" + page = 2 + limit = 10 + + segment = SegmentServiceTestDataFactory.create_segment_mock( + segment_id="seg-1", + status="completed", + content="This is important information", + ) + + mock_paginated = Mock() + mock_paginated.items = [segment] + mock_paginated.total = 1 + + mock_segment_service_dependencies["db"].paginate.return_value = mock_paginated + + mock_query = Mock() + mock_segment_service_dependencies["select"].return_value = mock_query + mock_query.where.return_value = mock_query + mock_query.order_by.return_value = mock_query + + # Act + from services.dataset_service import SegmentService + + items, total = SegmentService.get_segments( + document_id=document_id, + tenant_id=tenant_id, + status_list=status_list, + keyword=keyword, + page=page, + limit=limit, + ) + + # Assert + assert len(items) == 1 + assert total == 1 + # Verify filters: base + status + keyword + assert mock_query.where.call_count == 3 + # Verify pagination parameters + call_kwargs = mock_segment_service_dependencies["db"].paginate.call_args[1] + assert call_kwargs["page"] == page + assert call_kwargs["per_page"] == limit + + def test_get_segments_with_none_status_list(self, mock_segment_service_dependencies): + """ + Test with None status list. + + Verifies: + - None status list is handled correctly + - No status filter is applied + """ + # Arrange + document_id = "doc-123" + tenant_id = "tenant-123" + + segment = SegmentServiceTestDataFactory.create_segment_mock(segment_id="seg-1") + + mock_paginated = Mock() + mock_paginated.items = [segment] + mock_paginated.total = 1 + + mock_segment_service_dependencies["db"].paginate.return_value = mock_paginated + + mock_query = Mock() + mock_segment_service_dependencies["select"].return_value = mock_query + mock_query.where.return_value = mock_query + mock_query.order_by.return_value = mock_query + + # Act + from services.dataset_service import SegmentService + + items, total = SegmentService.get_segments( + document_id=document_id, + tenant_id=tenant_id, + status_list=None, + ) + + # Assert + assert len(items) == 1 + assert total == 1 + # Should only be called once (base filters only, no status filter) + assert mock_query.where.call_count == 1 + + def test_get_segments_pagination_max_per_page_limit(self, mock_segment_service_dependencies): + """ + Test that max_per_page is correctly set to 100. + + Verifies: + - max_per_page parameter is set to 100 + - This prevents excessive page sizes + """ + # Arrange + document_id = "doc-123" + tenant_id = "tenant-123" + limit = 200 # Request more than max_per_page + + mock_paginated = Mock() + mock_paginated.items = [] + mock_paginated.total = 0 + + mock_segment_service_dependencies["db"].paginate.return_value = mock_paginated + + mock_query = Mock() + mock_segment_service_dependencies["select"].return_value = mock_query + mock_query.where.return_value = mock_query + mock_query.order_by.return_value = mock_query + + # Act + from services.dataset_service import SegmentService + + SegmentService.get_segments( + document_id=document_id, + tenant_id=tenant_id, + limit=limit, + ) + + # Assert + call_kwargs = mock_segment_service_dependencies["db"].paginate.call_args[1] + assert call_kwargs["max_per_page"] == 100 diff --git a/api/tests/unit_tests/tools/test_mcp_tool.py b/api/tests/unit_tests/tools/test_mcp_tool.py new file mode 100644 index 0000000000..a527773e4e --- /dev/null +++ b/api/tests/unit_tests/tools/test_mcp_tool.py @@ -0,0 +1,122 @@ +import base64 +from unittest.mock import Mock, patch + +import pytest + +from core.mcp.types import ( + AudioContent, + BlobResourceContents, + CallToolResult, + EmbeddedResource, + ImageContent, + TextResourceContents, +) +from core.tools.__base.tool_runtime import ToolRuntime +from core.tools.entities.common_entities import I18nObject +from core.tools.entities.tool_entities import ToolEntity, ToolIdentity, ToolInvokeMessage +from core.tools.mcp_tool.tool import MCPTool + + +def _make_mcp_tool(output_schema: dict | None = None) -> MCPTool: + identity = ToolIdentity( + author="test", + name="test_mcp_tool", + label=I18nObject(en_US="Test MCP Tool", zh_Hans="测试MCPå·„å…·"), + provider="test_provider", + ) + entity = ToolEntity(identity=identity, output_schema=output_schema or {}) + runtime = Mock(spec=ToolRuntime) + runtime.credentials = {} + return MCPTool( + entity=entity, + runtime=runtime, + tenant_id="test_tenant", + icon="", + server_url="https://server.invalid", + provider_id="provider_1", + headers={}, + ) + + +class TestMCPToolInvoke: + @pytest.mark.parametrize( + ("content_factory", "mime_type"), + [ + ( + lambda b64, mt: ImageContent(type="image", data=b64, mimeType=mt), + "image/png", + ), + ( + lambda b64, mt: AudioContent(type="audio", data=b64, mimeType=mt), + "audio/mpeg", + ), + ], + ) + def test_invoke_image_or_audio_yields_blob(self, content_factory, mime_type) -> None: + tool = _make_mcp_tool() + raw = b"\x00\x01test-bytes\x02" + b64 = base64.b64encode(raw).decode() + content = content_factory(b64, mime_type) + result = CallToolResult(content=[content]) + + with patch.object(tool, "invoke_remote_mcp_tool", return_value=result): + messages = list(tool._invoke(user_id="test_user", tool_parameters={})) + + assert len(messages) == 1 + msg = messages[0] + assert msg.type == ToolInvokeMessage.MessageType.BLOB + assert isinstance(msg.message, ToolInvokeMessage.BlobMessage) + assert msg.message.blob == raw + assert msg.meta == {"mime_type": mime_type} + + def test_invoke_embedded_text_resource_yields_text(self) -> None: + tool = _make_mcp_tool() + text_resource = TextResourceContents(uri="file://test.txt", mimeType="text/plain", text="hello world") + content = EmbeddedResource(type="resource", resource=text_resource) + result = CallToolResult(content=[content]) + + with patch.object(tool, "invoke_remote_mcp_tool", return_value=result): + messages = list(tool._invoke(user_id="test_user", tool_parameters={})) + + assert len(messages) == 1 + msg = messages[0] + assert msg.type == ToolInvokeMessage.MessageType.TEXT + assert isinstance(msg.message, ToolInvokeMessage.TextMessage) + assert msg.message.text == "hello world" + + @pytest.mark.parametrize( + ("mime_type", "expected_mime"), + [("application/pdf", "application/pdf"), (None, "application/octet-stream")], + ) + def test_invoke_embedded_blob_resource_yields_blob(self, mime_type, expected_mime) -> None: + tool = _make_mcp_tool() + raw = b"binary-data" + b64 = base64.b64encode(raw).decode() + blob_resource = BlobResourceContents(uri="file://doc.bin", mimeType=mime_type, blob=b64) + content = EmbeddedResource(type="resource", resource=blob_resource) + result = CallToolResult(content=[content]) + + with patch.object(tool, "invoke_remote_mcp_tool", return_value=result): + messages = list(tool._invoke(user_id="test_user", tool_parameters={})) + + assert len(messages) == 1 + msg = messages[0] + assert msg.type == ToolInvokeMessage.MessageType.BLOB + assert isinstance(msg.message, ToolInvokeMessage.BlobMessage) + assert msg.message.blob == raw + assert msg.meta == {"mime_type": expected_mime} + + def test_invoke_yields_variables_when_structured_content_and_schema(self) -> None: + tool = _make_mcp_tool(output_schema={"type": "object"}) + result = CallToolResult(content=[], structuredContent={"a": 1, "b": "x"}) + + with patch.object(tool, "invoke_remote_mcp_tool", return_value=result): + messages = list(tool._invoke(user_id="test_user", tool_parameters={})) + + # Expect two variable messages corresponding to keys a and b + assert len(messages) == 2 + var_msgs = [m for m in messages if isinstance(m.message, ToolInvokeMessage.VariableMessage)] + assert {m.message.variable_name for m in var_msgs} == {"a", "b"} + # Validate values + values = {m.message.variable_name: m.message.variable_value for m in var_msgs} + assert values == {"a": 1, "b": "x"} diff --git a/api/uv.lock b/api/uv.lock index 4c2cb3c3f1..4ccd229eec 100644 --- a/api/uv.lock +++ b/api/uv.lock @@ -1368,7 +1368,7 @@ wheels = [ [[package]] name = "dify-api" -version = "1.11.1" +version = "1.11.2" source = { virtual = "." } dependencies = [ { name = "aliyun-log-python-sdk" }, @@ -3072,11 +3072,11 @@ wheels = [ [[package]] name = "json-repair" -version = "0.54.1" +version = "0.54.3" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/00/46/d3a4d9a3dad39bb4a2ad16b8adb9fe2e8611b20b71197fe33daa6768e85d/json_repair-0.54.1.tar.gz", hash = "sha256:d010bc31f1fc66e7c36dc33bff5f8902674498ae5cb8e801ad455a53b455ad1d", size = 38555, upload-time = "2025-11-19T14:55:24.265Z" } +sdist = { url = "https://files.pythonhosted.org/packages/b5/86/48b12ac02032f121ac7e5f11a32143edca6c1e3d19ffc54d6fb9ca0aafd0/json_repair-0.54.3.tar.gz", hash = "sha256:e50feec9725e52ac91f12184609754684ac1656119dfbd31de09bdaf9a1d8bf6", size = 38626, upload-time = "2025-12-15T09:41:58.594Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/db/96/c9aad7ee949cc1bf15df91f347fbc2d3bd10b30b80c7df689ce6fe9332b5/json_repair-0.54.1-py3-none-any.whl", hash = "sha256:016160c5db5d5fe443164927bb58d2dfbba5f43ad85719fa9bc51c713a443ab1", size = 29311, upload-time = "2025-11-19T14:55:22.886Z" }, + { url = "https://files.pythonhosted.org/packages/e9/08/abe317237add63c3e62f18a981bccf92112b431835b43d844aedaf61f4a0/json_repair-0.54.3-py3-none-any.whl", hash = "sha256:4cdc132ee27d4780576f71bf27a113877046224a808bfc17392e079cb344fb81", size = 29357, upload-time = "2025-12-15T09:41:57.436Z" }, ] [[package]] diff --git a/docker/.env.example b/docker/.env.example index 16d47409f5..0e09d6869d 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -399,6 +399,7 @@ CONSOLE_CORS_ALLOW_ORIGINS=* COOKIE_DOMAIN= # When the frontend and backend run on different subdomains, set NEXT_PUBLIC_COOKIE_DOMAIN=1. NEXT_PUBLIC_COOKIE_DOMAIN= +NEXT_PUBLIC_BATCH_CONCURRENCY=5 # ------------------------------ # File Storage Configuration @@ -477,6 +478,7 @@ TENCENT_COS_SECRET_KEY=your-secret-key TENCENT_COS_SECRET_ID=your-secret-id TENCENT_COS_REGION=your-region TENCENT_COS_SCHEME=your-scheme +TENCENT_COS_CUSTOM_DOMAIN=your-custom-domain # Oracle Storage Configuration # diff --git a/docker/README.md b/docker/README.md index 375570f106..4c40317f37 100644 --- a/docker/README.md +++ b/docker/README.md @@ -23,6 +23,10 @@ Welcome to the new `docker` directory for deploying Dify using Docker Compose. T - Navigate to the `docker` directory. - Copy the `.env.example` file to a new file named `.env` by running `cp .env.example .env`. - Customize the `.env` file as needed. Refer to the `.env.example` file for detailed configuration options. + - **Optional (Recommended for upgrades)**: + You may use the environment synchronization tool to help keep your `.env` file aligned with the latest `.env.example` updates, while preserving your custom settings. + This is especially useful when upgrading Dify or managing a large, customized `.env` file. + See the [Environment Variables Synchronization](#environment-variables-synchronization) section below. 1. **Running the Services**: - Execute `docker compose up` from the `docker` directory to start the services. - To specify a vector database, set the `VECTOR_STORE` variable in your `.env` file to your desired vector database service, such as `milvus`, `weaviate`, or `opensearch`. @@ -111,6 +115,47 @@ The `.env.example` file provided in the Docker setup is extensive and covers a w - Each service like `nginx`, `redis`, `db`, and vector databases have specific environment variables that are directly referenced in the `docker-compose.yaml`. +### Environment Variables Synchronization + +When upgrading Dify or pulling the latest changes, new environment variables may be introduced in `.env.example`. + +To help keep your existing `.env` file up to date **without losing your custom values**, an optional environment variables synchronization tool is provided. + +> This tool performs a **one-way synchronization** from `.env.example` to `.env`. +> Existing values in `.env` are never overwritten automatically. + +#### `dify-env-sync.sh` (Optional) + +This script compares your current `.env` file with the latest `.env.example` template and helps safely apply new or updated environment variables. + +**What it does** + +- Creates a backup of the current `.env` file before making any changes +- Synchronizes newly added environment variables from `.env.example` +- Preserves all existing custom values in `.env` +- Displays differences and variables removed from `.env.example` for review + +**Backup behavior** + +Before synchronization, the current `.env` file is saved to the `env-backup/` directory with a timestamped filename +(e.g. `env-backup/.env.backup_20231218_143022`). + +**When to use** + +- After upgrading Dify to a newer version +- When `.env.example` has been updated with new environment variables +- When managing a large or heavily customized `.env` file + +**Usage** + +```bash +# Grant execution permission (first time only) +chmod +x dify-env-sync.sh + +# Run the synchronization +./dify-env-sync.sh +``` + ### Additional Information - **Continuous Improvement Phase**: We are actively seeking feedback from the community to refine and enhance the deployment process. As more users adopt this new method, we will continue to make improvements based on your experiences and suggestions. diff --git a/docker/dify-env-sync.sh b/docker/dify-env-sync.sh new file mode 100755 index 0000000000..edeeae3abc --- /dev/null +++ b/docker/dify-env-sync.sh @@ -0,0 +1,465 @@ +#!/bin/bash + +# ================================================================ +# Dify Environment Variables Synchronization Script +# +# Features: +# - Synchronize latest settings from .env.example to .env +# - Preserve custom settings in existing .env +# - Add new environment variables +# - Detect removed environment variables +# - Create backup files +# ================================================================ + +set -eo pipefail # Exit on error and pipe failures (safer for complex variable handling) + +# Error handling function +# Arguments: +# $1 - Line number where error occurred +# $2 - Error code +handle_error() { + local line_no=$1 + local error_code=$2 + echo -e "\033[0;31m[ERROR]\033[0m Script error: line $line_no with error code $error_code" >&2 + echo -e "\033[0;31m[ERROR]\033[0m Debug info: current working directory $(pwd)" >&2 + exit $error_code +} + +# Set error trap +trap 'handle_error ${LINENO} $?' ERR + +# Color settings for output +readonly RED='\033[0;31m' +readonly GREEN='\033[0;32m' +readonly YELLOW='\033[1;33m' +readonly BLUE='\033[0;34m' +readonly NC='\033[0m' # No Color + +# Logging functions +# Print informational message in blue +# Arguments: $1 - Message to print +log_info() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +# Print success message in green +# Arguments: $1 - Message to print +log_success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +# Print warning message in yellow +# Arguments: $1 - Message to print +log_warning() { + echo -e "${YELLOW}[WARNING]${NC} $1" >&2 +} + +# Print error message in red to stderr +# Arguments: $1 - Message to print +log_error() { + echo -e "${RED}[ERROR]${NC} $1" >&2 +} + +# Check for required files and create .env if missing +# Verifies that .env.example exists and creates .env from template if needed +check_files() { + log_info "Checking required files..." + + if [[ ! -f ".env.example" ]]; then + log_error ".env.example file not found" + exit 1 + fi + + if [[ ! -f ".env" ]]; then + log_warning ".env file does not exist. Creating from .env.example." + cp ".env.example" ".env" + log_success ".env file created" + fi + + log_success "Required files verified" +} + +# Create timestamped backup of .env file +# Creates env-backup directory if needed and backs up current .env file +create_backup() { + local timestamp=$(date +"%Y%m%d_%H%M%S") + local backup_dir="env-backup" + + # Create backup directory if it doesn't exist + if [[ ! -d "$backup_dir" ]]; then + mkdir -p "$backup_dir" + log_info "Created backup directory: $backup_dir" + fi + + if [[ -f ".env" ]]; then + local backup_file="${backup_dir}/.env.backup_${timestamp}" + cp ".env" "$backup_file" + log_success "Backed up existing .env to $backup_file" + fi +} + +# Detect differences between .env and .env.example (optimized for large files) +detect_differences() { + log_info "Detecting differences between .env and .env.example..." + + # Create secure temporary directory + local temp_dir=$(mktemp -d) + local temp_diff="$temp_dir/env_diff" + + # Store diff file path as global variable + declare -g DIFF_FILE="$temp_diff" + declare -g TEMP_DIR="$temp_dir" + + # Initialize difference file + > "$temp_diff" + + # Use awk for efficient comparison (much faster for large files) + local diff_count=$(awk -F= ' + BEGIN { OFS="\x01" } + FNR==NR { + if (!/^[[:space:]]*#/ && !/^[[:space:]]*$/ && /=/) { + gsub(/^[[:space:]]+|[[:space:]]+$/, "", $1) + key = $1 + value = substr($0, index($0,"=")+1) + gsub(/^[[:space:]]+|[[:space:]]+$/, "", value) + env_values[key] = value + } + next + } + { + if (!/^[[:space:]]*#/ && !/^[[:space:]]*$/ && /=/) { + gsub(/^[[:space:]]+|[[:space:]]+$/, "", $1) + key = $1 + example_value = substr($0, index($0,"=")+1) + gsub(/^[[:space:]]+|[[:space:]]+$/, "", example_value) + + if (key in env_values && env_values[key] != example_value) { + print key, env_values[key], example_value > "'$temp_diff'" + diff_count++ + } + } + } + END { print diff_count } + ' .env .env.example) + + if [[ $diff_count -gt 0 ]]; then + log_success "Detected differences in $diff_count environment variables" + # Show detailed differences + show_differences_detail + else + log_info "No differences detected" + fi +} + +# Parse environment variable line +# Extracts key-value pairs from .env file format lines +# Arguments: +# $1 - Line to parse +# Returns: +# 0 - Success, outputs "key|value" format +# 1 - Skip (empty line, comment, or invalid format) +parse_env_line() { + local line="$1" + local key="" + local value="" + + # Skip empty lines or comment lines + [[ -z "$line" || "$line" =~ ^[[:space:]]*# ]] && return 1 + + # Split by = + if [[ "$line" =~ ^([^=]+)=(.*)$ ]]; then + key="${BASH_REMATCH[1]}" + value="${BASH_REMATCH[2]}" + + # Remove leading and trailing whitespace + key=$(echo "$key" | sed 's/^[[:space:]]*//; s/[[:space:]]*$//') + value=$(echo "$value" | sed 's/^[[:space:]]*//; s/[[:space:]]*$//') + + if [[ -n "$key" ]]; then + echo "$key|$value" + return 0 + fi + fi + + return 1 +} + +# Show detailed differences +show_differences_detail() { + log_info "" + log_info "=== Environment Variable Differences ===" + + # Read differences from the already created diff file + if [[ ! -s "$DIFF_FILE" ]]; then + log_info "No differences to display" + return + fi + + # Display differences + local count=1 + while IFS=$'\x01' read -r key env_value example_value; do + echo "" + echo -e "${YELLOW}[$count] $key${NC}" + echo -e " ${GREEN}.env (current)${NC} : ${env_value}" + echo -e " ${BLUE}.env.example (recommended)${NC}: ${example_value}" + + # Analyze value changes + analyze_value_change "$env_value" "$example_value" + ((count++)) + done < "$DIFF_FILE" + + echo "" + log_info "=== Difference Analysis Complete ===" + log_info "Note: Consider changing to the recommended values above." + log_info "Current implementation preserves .env values." + echo "" +} + +# Analyze value changes +analyze_value_change() { + local current_value="$1" + local recommended_value="$2" + + # Analyze value characteristics + local analysis="" + + # Empty value check + if [[ -z "$current_value" && -n "$recommended_value" ]]; then + analysis=" ${RED}→ Setting from empty to recommended value${NC}" + elif [[ -n "$current_value" && -z "$recommended_value" ]]; then + analysis=" ${RED}→ Recommended value changed to empty${NC}" + # Numeric check - using arithmetic evaluation for robust comparison + elif [[ "$current_value" =~ ^[0-9]+$ && "$recommended_value" =~ ^[0-9]+$ ]]; then + # Use arithmetic evaluation to handle leading zeros correctly + if (( 10#$current_value < 10#$recommended_value )); then + analysis=" ${BLUE}→ Numeric increase (${current_value} < ${recommended_value})${NC}" + elif (( 10#$current_value > 10#$recommended_value )); then + analysis=" ${YELLOW}→ Numeric decrease (${current_value} > ${recommended_value})${NC}" + fi + # Boolean check + elif [[ "$current_value" =~ ^(true|false)$ && "$recommended_value" =~ ^(true|false)$ ]]; then + if [[ "$current_value" != "$recommended_value" ]]; then + analysis=" ${BLUE}→ Boolean value change (${current_value} → ${recommended_value})${NC}" + fi + # URL/endpoint check + elif [[ "$current_value" =~ ^https?:// || "$recommended_value" =~ ^https?:// ]]; then + analysis=" ${BLUE}→ URL/endpoint change${NC}" + # File path check + elif [[ "$current_value" =~ ^/ || "$recommended_value" =~ ^/ ]]; then + analysis=" ${BLUE}→ File path change${NC}" + else + # Length comparison + local current_len=${#current_value} + local recommended_len=${#recommended_value} + if [[ $current_len -ne $recommended_len ]]; then + analysis=" ${YELLOW}→ String length change (${current_len} → ${recommended_len} characters)${NC}" + fi + fi + + if [[ -n "$analysis" ]]; then + echo -e "$analysis" + fi +} + +# Synchronize .env file with .env.example while preserving custom values +# Creates a new .env file based on .env.example structure, preserving existing custom values +# Global variables used: DIFF_FILE, TEMP_DIR +sync_env_file() { + log_info "Starting partial synchronization of .env file..." + + local new_env_file=".env.new" + local preserved_count=0 + local updated_count=0 + + # Pre-process diff file for efficient lookup + local lookup_file="" + if [[ -f "$DIFF_FILE" && -s "$DIFF_FILE" ]]; then + lookup_file="${DIFF_FILE}.lookup" + # Create sorted lookup file for fast search + sort "$DIFF_FILE" > "$lookup_file" + log_info "Created lookup file for $(wc -l < "$DIFF_FILE") preserved values" + fi + + # Use AWK for efficient processing (much faster than bash loop for large files) + log_info "Processing $(wc -l < .env.example) lines with AWK..." + + local preserved_keys_file="${TEMP_DIR}/preserved_keys" + local awk_preserved_count_file="${TEMP_DIR}/awk_preserved_count" + local awk_updated_count_file="${TEMP_DIR}/awk_updated_count" + + awk -F'=' -v lookup_file="$lookup_file" -v preserved_file="$preserved_keys_file" \ + -v preserved_count_file="$awk_preserved_count_file" -v updated_count_file="$awk_updated_count_file" ' + BEGIN { + preserved_count = 0 + updated_count = 0 + + # Load preserved values if lookup file exists + if (lookup_file != "") { + while ((getline line < lookup_file) > 0) { + split(line, parts, "\x01") + key = parts[1] + value = parts[2] + preserved_values[key] = value + } + close(lookup_file) + } + } + + # Process each line + { + # Check if this is an environment variable line + if (/^[[:space:]]*[A-Za-z_][A-Za-z0-9_]*[[:space:]]*=/) { + # Extract key + key = $1 + gsub(/^[[:space:]]+|[[:space:]]+$/, "", key) + + # Check if key should be preserved + if (key in preserved_values) { + print key "=" preserved_values[key] + print key > preserved_file + preserved_count++ + } else { + print $0 + updated_count++ + } + } else { + # Not an env var line, preserve as-is + print $0 + } + } + + END { + print preserved_count > preserved_count_file + print updated_count > updated_count_file + } + ' .env.example > "$new_env_file" + + # Read counters and preserved keys + if [[ -f "$awk_preserved_count_file" ]]; then + preserved_count=$(cat "$awk_preserved_count_file") + fi + if [[ -f "$awk_updated_count_file" ]]; then + updated_count=$(cat "$awk_updated_count_file") + fi + + # Show what was preserved + if [[ -f "$preserved_keys_file" ]]; then + while read -r key; do + [[ -n "$key" ]] && log_info " Preserved: $key (.env value)" + done < "$preserved_keys_file" + fi + + # Clean up lookup file + [[ -n "$lookup_file" ]] && rm -f "$lookup_file" + + # Replace the original .env file + if mv "$new_env_file" ".env"; then + log_success "Successfully created new .env file" + else + log_error "Failed to replace .env file" + rm -f "$new_env_file" + return 1 + fi + + # Clean up difference file and temporary directory + if [[ -n "${TEMP_DIR:-}" ]]; then + rm -rf "${TEMP_DIR}" + unset TEMP_DIR + fi + if [[ -n "${DIFF_FILE:-}" ]]; then + unset DIFF_FILE + fi + + log_success "Partial synchronization of .env file completed" + log_info " Preserved .env values: $preserved_count" + log_info " Updated to .env.example values: $updated_count" +} + +# Detect removed environment variables +detect_removed_variables() { + log_info "Detecting removed environment variables..." + + if [[ ! -f ".env" ]]; then + return + fi + + # Use temporary files for efficient lookup + local temp_dir="${TEMP_DIR:-$(mktemp -d)}" + local temp_example_keys="$temp_dir/example_keys" + local temp_current_keys="$temp_dir/current_keys" + local cleanup_temp_dir="" + + # Set flag if we created a new temp directory + if [[ -z "${TEMP_DIR:-}" ]]; then + cleanup_temp_dir="$temp_dir" + fi + + # Get keys from .env.example and .env, sorted for comm + awk -F= '!/^[[:space:]]*#/ && /=/ {gsub(/^[[:space:]]+|[[:space:]]+$/, "", $1); print $1}' .env.example | sort > "$temp_example_keys" + awk -F= '!/^[[:space:]]*#/ && /=/ {gsub(/^[[:space:]]+|[[:space:]]+$/, "", $1); print $1}' .env | sort > "$temp_current_keys" + + # Get keys from existing .env and check for removals + local removed_vars=() + while IFS= read -r var; do + removed_vars+=("$var") + done < <(comm -13 "$temp_example_keys" "$temp_current_keys") + + # Clean up temporary files if we created a new temp directory + if [[ -n "$cleanup_temp_dir" ]]; then + rm -rf "$cleanup_temp_dir" + fi + + if [[ ${#removed_vars[@]} -gt 0 ]]; then + log_warning "The following environment variables have been removed from .env.example:" + for var in "${removed_vars[@]}"; do + log_warning " - $var" + done + log_warning "Consider manually removing these variables from .env" + else + log_success "No removed environment variables found" + fi +} + +# Show statistics +show_statistics() { + log_info "Synchronization statistics:" + + local total_example=$(grep -c "^[^#]*=" .env.example 2>/dev/null || echo "0") + local total_env=$(grep -c "^[^#]*=" .env 2>/dev/null || echo "0") + + log_info " .env.example environment variables: $total_example" + log_info " .env environment variables: $total_env" +} + +# Main execution function +# Orchestrates the complete synchronization process in the correct order +main() { + log_info "=== Dify Environment Variables Synchronization Script ===" + log_info "Execution started: $(date)" + + # Check prerequisites + check_files + + # Create backup + create_backup + + # Detect differences + detect_differences + + # Detect removed variables (before sync) + detect_removed_variables + + # Synchronize environment file + sync_env_file + + # Show statistics + show_statistics + + log_success "=== Synchronization process completed successfully ===" + log_info "Execution finished: $(date)" +} + +# Execute main function only when script is run directly +if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then + main "$@" +fi \ No newline at end of file diff --git a/docker/docker-compose-template.yaml b/docker/docker-compose-template.yaml index 0de9d3e939..3c88cddf8c 100644 --- a/docker/docker-compose-template.yaml +++ b/docker/docker-compose-template.yaml @@ -21,7 +21,7 @@ services: # API service api: - image: langgenius/dify-api:1.11.1 + image: langgenius/dify-api:1.11.2 restart: always environment: # Use the shared environment variables. @@ -63,7 +63,7 @@ services: # worker service # The Celery worker for processing all queues (dataset, workflow, mail, etc.) worker: - image: langgenius/dify-api:1.11.1 + image: langgenius/dify-api:1.11.2 restart: always environment: # Use the shared environment variables. @@ -102,7 +102,7 @@ services: # worker_beat service # Celery beat for scheduling periodic tasks. worker_beat: - image: langgenius/dify-api:1.11.1 + image: langgenius/dify-api:1.11.2 restart: always environment: # Use the shared environment variables. @@ -132,7 +132,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:1.11.1 + image: langgenius/dify-web:1.11.2 restart: always environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index 964b9fe724..1c8d8d03e3 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -108,6 +108,7 @@ x-shared-env: &shared-api-worker-env CONSOLE_CORS_ALLOW_ORIGINS: ${CONSOLE_CORS_ALLOW_ORIGINS:-*} COOKIE_DOMAIN: ${COOKIE_DOMAIN:-} NEXT_PUBLIC_COOKIE_DOMAIN: ${NEXT_PUBLIC_COOKIE_DOMAIN:-} + NEXT_PUBLIC_BATCH_CONCURRENCY: ${NEXT_PUBLIC_BATCH_CONCURRENCY:-5} STORAGE_TYPE: ${STORAGE_TYPE:-opendal} OPENDAL_SCHEME: ${OPENDAL_SCHEME:-fs} OPENDAL_FS_ROOT: ${OPENDAL_FS_ROOT:-storage} @@ -140,6 +141,7 @@ x-shared-env: &shared-api-worker-env TENCENT_COS_SECRET_ID: ${TENCENT_COS_SECRET_ID:-your-secret-id} TENCENT_COS_REGION: ${TENCENT_COS_REGION:-your-region} TENCENT_COS_SCHEME: ${TENCENT_COS_SCHEME:-your-scheme} + TENCENT_COS_CUSTOM_DOMAIN: ${TENCENT_COS_CUSTOM_DOMAIN:-your-custom-domain} OCI_ENDPOINT: ${OCI_ENDPOINT:-https://your-object-storage-namespace.compat.objectstorage.us-ashburn-1.oraclecloud.com} OCI_BUCKET_NAME: ${OCI_BUCKET_NAME:-your-bucket-name} OCI_ACCESS_KEY: ${OCI_ACCESS_KEY:-your-access-key} @@ -692,7 +694,7 @@ services: # API service api: - image: langgenius/dify-api:1.11.1 + image: langgenius/dify-api:1.11.2 restart: always environment: # Use the shared environment variables. @@ -734,7 +736,7 @@ services: # worker service # The Celery worker for processing all queues (dataset, workflow, mail, etc.) worker: - image: langgenius/dify-api:1.11.1 + image: langgenius/dify-api:1.11.2 restart: always environment: # Use the shared environment variables. @@ -773,7 +775,7 @@ services: # worker_beat service # Celery beat for scheduling periodic tasks. worker_beat: - image: langgenius/dify-api:1.11.1 + image: langgenius/dify-api:1.11.2 restart: always environment: # Use the shared environment variables. @@ -803,7 +805,7 @@ services: # Frontend web application. web: - image: langgenius/dify-web:1.11.1 + image: langgenius/dify-web:1.11.2 restart: always environment: CONSOLE_API_URL: ${CONSOLE_API_URL:-} diff --git a/sdks/nodejs-client/package.json b/sdks/nodejs-client/package.json index 554cb909ef..afbb58fee1 100644 --- a/sdks/nodejs-client/package.json +++ b/sdks/nodejs-client/package.json @@ -54,17 +54,17 @@ "publish:npm": "./scripts/publish.sh" }, "dependencies": { - "axios": "^1.3.5" + "axios": "^1.13.2" }, "devDependencies": { - "@eslint/js": "^9.2.0", - "@types/node": "^20.11.30", + "@eslint/js": "^9.39.2", + "@types/node": "^25.0.3", "@typescript-eslint/eslint-plugin": "^8.50.1", "@typescript-eslint/parser": "^8.50.1", - "@vitest/coverage-v8": "1.6.1", - "eslint": "^9.2.0", + "@vitest/coverage-v8": "4.0.16", + "eslint": "^9.39.2", "tsup": "^8.5.1", - "typescript": "^5.4.5", - "vitest": "^1.5.0" + "typescript": "^5.9.3", + "vitest": "^4.0.16" } } diff --git a/sdks/nodejs-client/pnpm-lock.yaml b/sdks/nodejs-client/pnpm-lock.yaml index 3e4011c580..6febed2ea6 100644 --- a/sdks/nodejs-client/pnpm-lock.yaml +++ b/sdks/nodejs-client/pnpm-lock.yaml @@ -9,15 +9,15 @@ importers: .: dependencies: axios: - specifier: ^1.3.5 + specifier: ^1.13.2 version: 1.13.2 devDependencies: '@eslint/js': - specifier: ^9.2.0 + specifier: ^9.39.2 version: 9.39.2 '@types/node': - specifier: ^20.11.30 - version: 20.19.27 + specifier: ^25.0.3 + version: 25.0.3 '@typescript-eslint/eslint-plugin': specifier: ^8.50.1 version: 8.50.1(@typescript-eslint/parser@8.50.1(eslint@9.39.2)(typescript@5.9.3))(eslint@9.39.2)(typescript@5.9.3) @@ -25,27 +25,23 @@ importers: specifier: ^8.50.1 version: 8.50.1(eslint@9.39.2)(typescript@5.9.3) '@vitest/coverage-v8': - specifier: 1.6.1 - version: 1.6.1(vitest@1.6.1(@types/node@20.19.27)) + specifier: 4.0.16 + version: 4.0.16(vitest@4.0.16(@types/node@25.0.3)) eslint: - specifier: ^9.2.0 + specifier: ^9.39.2 version: 9.39.2 tsup: specifier: ^8.5.1 version: 8.5.1(postcss@8.5.6)(typescript@5.9.3) typescript: - specifier: ^5.4.5 + specifier: ^5.9.3 version: 5.9.3 vitest: - specifier: ^1.5.0 - version: 1.6.1(@types/node@20.19.27) + specifier: ^4.0.16 + version: 4.0.16(@types/node@25.0.3) packages: - '@ampproject/remapping@2.3.0': - resolution: {integrity: sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==} - engines: {node: '>=6.0.0'} - '@babel/helper-string-parser@7.27.1': resolution: {integrity: sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==} engines: {node: '>=6.9.0'} @@ -63,14 +59,9 @@ packages: resolution: {integrity: sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==} engines: {node: '>=6.9.0'} - '@bcoe/v8-coverage@0.2.3': - resolution: {integrity: sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==} - - '@esbuild/aix-ppc64@0.21.5': - resolution: {integrity: sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==} - engines: {node: '>=12'} - cpu: [ppc64] - os: [aix] + '@bcoe/v8-coverage@1.0.2': + resolution: {integrity: sha512-6zABk/ECA/QYSCQ1NGiVwwbQerUCZ+TQbp64Q3AgmfNvurHH0j8TtXa1qbShXA6qqkpAj4V5W8pP6mLe1mcMqA==} + engines: {node: '>=18'} '@esbuild/aix-ppc64@0.27.2': resolution: {integrity: sha512-GZMB+a0mOMZs4MpDbj8RJp4cw+w1WV5NYD6xzgvzUJ5Ek2jerwfO2eADyI6ExDSUED+1X8aMbegahsJi+8mgpw==} @@ -78,192 +69,96 @@ packages: cpu: [ppc64] os: [aix] - '@esbuild/android-arm64@0.21.5': - resolution: {integrity: sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==} - engines: {node: '>=12'} - cpu: [arm64] - os: [android] - '@esbuild/android-arm64@0.27.2': resolution: {integrity: sha512-pvz8ZZ7ot/RBphf8fv60ljmaoydPU12VuXHImtAs0XhLLw+EXBi2BLe3OYSBslR4rryHvweW5gmkKFwTiFy6KA==} engines: {node: '>=18'} cpu: [arm64] os: [android] - '@esbuild/android-arm@0.21.5': - resolution: {integrity: sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==} - engines: {node: '>=12'} - cpu: [arm] - os: [android] - '@esbuild/android-arm@0.27.2': resolution: {integrity: sha512-DVNI8jlPa7Ujbr1yjU2PfUSRtAUZPG9I1RwW4F4xFB1Imiu2on0ADiI/c3td+KmDtVKNbi+nffGDQMfcIMkwIA==} engines: {node: '>=18'} cpu: [arm] os: [android] - '@esbuild/android-x64@0.21.5': - resolution: {integrity: sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==} - engines: {node: '>=12'} - cpu: [x64] - os: [android] - '@esbuild/android-x64@0.27.2': resolution: {integrity: sha512-z8Ank4Byh4TJJOh4wpz8g2vDy75zFL0TlZlkUkEwYXuPSgX8yzep596n6mT7905kA9uHZsf/o2OJZubl2l3M7A==} engines: {node: '>=18'} cpu: [x64] os: [android] - '@esbuild/darwin-arm64@0.21.5': - resolution: {integrity: sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==} - engines: {node: '>=12'} - cpu: [arm64] - os: [darwin] - '@esbuild/darwin-arm64@0.27.2': resolution: {integrity: sha512-davCD2Zc80nzDVRwXTcQP/28fiJbcOwvdolL0sOiOsbwBa72kegmVU0Wrh1MYrbuCL98Omp5dVhQFWRKR2ZAlg==} engines: {node: '>=18'} cpu: [arm64] os: [darwin] - '@esbuild/darwin-x64@0.21.5': - resolution: {integrity: sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==} - engines: {node: '>=12'} - cpu: [x64] - os: [darwin] - '@esbuild/darwin-x64@0.27.2': resolution: {integrity: sha512-ZxtijOmlQCBWGwbVmwOF/UCzuGIbUkqB1faQRf5akQmxRJ1ujusWsb3CVfk/9iZKr2L5SMU5wPBi1UWbvL+VQA==} engines: {node: '>=18'} cpu: [x64] os: [darwin] - '@esbuild/freebsd-arm64@0.21.5': - resolution: {integrity: sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==} - engines: {node: '>=12'} - cpu: [arm64] - os: [freebsd] - '@esbuild/freebsd-arm64@0.27.2': resolution: {integrity: sha512-lS/9CN+rgqQ9czogxlMcBMGd+l8Q3Nj1MFQwBZJyoEKI50XGxwuzznYdwcav6lpOGv5BqaZXqvBSiB/kJ5op+g==} engines: {node: '>=18'} cpu: [arm64] os: [freebsd] - '@esbuild/freebsd-x64@0.21.5': - resolution: {integrity: sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==} - engines: {node: '>=12'} - cpu: [x64] - os: [freebsd] - '@esbuild/freebsd-x64@0.27.2': resolution: {integrity: sha512-tAfqtNYb4YgPnJlEFu4c212HYjQWSO/w/h/lQaBK7RbwGIkBOuNKQI9tqWzx7Wtp7bTPaGC6MJvWI608P3wXYA==} engines: {node: '>=18'} cpu: [x64] os: [freebsd] - '@esbuild/linux-arm64@0.21.5': - resolution: {integrity: sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==} - engines: {node: '>=12'} - cpu: [arm64] - os: [linux] - '@esbuild/linux-arm64@0.27.2': resolution: {integrity: sha512-hYxN8pr66NsCCiRFkHUAsxylNOcAQaxSSkHMMjcpx0si13t1LHFphxJZUiGwojB1a/Hd5OiPIqDdXONia6bhTw==} engines: {node: '>=18'} cpu: [arm64] os: [linux] - '@esbuild/linux-arm@0.21.5': - resolution: {integrity: sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==} - engines: {node: '>=12'} - cpu: [arm] - os: [linux] - '@esbuild/linux-arm@0.27.2': resolution: {integrity: sha512-vWfq4GaIMP9AIe4yj1ZUW18RDhx6EPQKjwe7n8BbIecFtCQG4CfHGaHuh7fdfq+y3LIA2vGS/o9ZBGVxIDi9hw==} engines: {node: '>=18'} cpu: [arm] os: [linux] - '@esbuild/linux-ia32@0.21.5': - resolution: {integrity: sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==} - engines: {node: '>=12'} - cpu: [ia32] - os: [linux] - '@esbuild/linux-ia32@0.27.2': resolution: {integrity: sha512-MJt5BRRSScPDwG2hLelYhAAKh9imjHK5+NE/tvnRLbIqUWa+0E9N4WNMjmp/kXXPHZGqPLxggwVhz7QP8CTR8w==} engines: {node: '>=18'} cpu: [ia32] os: [linux] - '@esbuild/linux-loong64@0.21.5': - resolution: {integrity: sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==} - engines: {node: '>=12'} - cpu: [loong64] - os: [linux] - '@esbuild/linux-loong64@0.27.2': resolution: {integrity: sha512-lugyF1atnAT463aO6KPshVCJK5NgRnU4yb3FUumyVz+cGvZbontBgzeGFO1nF+dPueHD367a2ZXe1NtUkAjOtg==} engines: {node: '>=18'} cpu: [loong64] os: [linux] - '@esbuild/linux-mips64el@0.21.5': - resolution: {integrity: sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==} - engines: {node: '>=12'} - cpu: [mips64el] - os: [linux] - '@esbuild/linux-mips64el@0.27.2': resolution: {integrity: sha512-nlP2I6ArEBewvJ2gjrrkESEZkB5mIoaTswuqNFRv/WYd+ATtUpe9Y09RnJvgvdag7he0OWgEZWhviS1OTOKixw==} engines: {node: '>=18'} cpu: [mips64el] os: [linux] - '@esbuild/linux-ppc64@0.21.5': - resolution: {integrity: sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==} - engines: {node: '>=12'} - cpu: [ppc64] - os: [linux] - '@esbuild/linux-ppc64@0.27.2': resolution: {integrity: sha512-C92gnpey7tUQONqg1n6dKVbx3vphKtTHJaNG2Ok9lGwbZil6DrfyecMsp9CrmXGQJmZ7iiVXvvZH6Ml5hL6XdQ==} engines: {node: '>=18'} cpu: [ppc64] os: [linux] - '@esbuild/linux-riscv64@0.21.5': - resolution: {integrity: sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==} - engines: {node: '>=12'} - cpu: [riscv64] - os: [linux] - '@esbuild/linux-riscv64@0.27.2': resolution: {integrity: sha512-B5BOmojNtUyN8AXlK0QJyvjEZkWwy/FKvakkTDCziX95AowLZKR6aCDhG7LeF7uMCXEJqwa8Bejz5LTPYm8AvA==} engines: {node: '>=18'} cpu: [riscv64] os: [linux] - '@esbuild/linux-s390x@0.21.5': - resolution: {integrity: sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==} - engines: {node: '>=12'} - cpu: [s390x] - os: [linux] - '@esbuild/linux-s390x@0.27.2': resolution: {integrity: sha512-p4bm9+wsPwup5Z8f4EpfN63qNagQ47Ua2znaqGH6bqLlmJ4bx97Y9JdqxgGZ6Y8xVTixUnEkoKSHcpRlDnNr5w==} engines: {node: '>=18'} cpu: [s390x] os: [linux] - '@esbuild/linux-x64@0.21.5': - resolution: {integrity: sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==} - engines: {node: '>=12'} - cpu: [x64] - os: [linux] - '@esbuild/linux-x64@0.27.2': resolution: {integrity: sha512-uwp2Tip5aPmH+NRUwTcfLb+W32WXjpFejTIOWZFw/v7/KnpCDKG66u4DLcurQpiYTiYwQ9B7KOeMJvLCu/OvbA==} engines: {node: '>=18'} @@ -276,12 +171,6 @@ packages: cpu: [arm64] os: [netbsd] - '@esbuild/netbsd-x64@0.21.5': - resolution: {integrity: sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==} - engines: {node: '>=12'} - cpu: [x64] - os: [netbsd] - '@esbuild/netbsd-x64@0.27.2': resolution: {integrity: sha512-HwGDZ0VLVBY3Y+Nw0JexZy9o/nUAWq9MlV7cahpaXKW6TOzfVno3y3/M8Ga8u8Yr7GldLOov27xiCnqRZf0tCA==} engines: {node: '>=18'} @@ -294,12 +183,6 @@ packages: cpu: [arm64] os: [openbsd] - '@esbuild/openbsd-x64@0.21.5': - resolution: {integrity: sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==} - engines: {node: '>=12'} - cpu: [x64] - os: [openbsd] - '@esbuild/openbsd-x64@0.27.2': resolution: {integrity: sha512-/it7w9Nb7+0KFIzjalNJVR5bOzA9Vay+yIPLVHfIQYG/j+j9VTH84aNB8ExGKPU4AzfaEvN9/V4HV+F+vo8OEg==} engines: {node: '>=18'} @@ -312,48 +195,24 @@ packages: cpu: [arm64] os: [openharmony] - '@esbuild/sunos-x64@0.21.5': - resolution: {integrity: sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==} - engines: {node: '>=12'} - cpu: [x64] - os: [sunos] - '@esbuild/sunos-x64@0.27.2': resolution: {integrity: sha512-kMtx1yqJHTmqaqHPAzKCAkDaKsffmXkPHThSfRwZGyuqyIeBvf08KSsYXl+abf5HDAPMJIPnbBfXvP2ZC2TfHg==} engines: {node: '>=18'} cpu: [x64] os: [sunos] - '@esbuild/win32-arm64@0.21.5': - resolution: {integrity: sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==} - engines: {node: '>=12'} - cpu: [arm64] - os: [win32] - '@esbuild/win32-arm64@0.27.2': resolution: {integrity: sha512-Yaf78O/B3Kkh+nKABUF++bvJv5Ijoy9AN1ww904rOXZFLWVc5OLOfL56W+C8F9xn5JQZa3UX6m+IktJnIb1Jjg==} engines: {node: '>=18'} cpu: [arm64] os: [win32] - '@esbuild/win32-ia32@0.21.5': - resolution: {integrity: sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==} - engines: {node: '>=12'} - cpu: [ia32] - os: [win32] - '@esbuild/win32-ia32@0.27.2': resolution: {integrity: sha512-Iuws0kxo4yusk7sw70Xa2E2imZU5HoixzxfGCdxwBdhiDgt9vX9VUCBhqcwY7/uh//78A1hMkkROMJq9l27oLQ==} engines: {node: '>=18'} cpu: [ia32] os: [win32] - '@esbuild/win32-x64@0.21.5': - resolution: {integrity: sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==} - engines: {node: '>=12'} - cpu: [x64] - os: [win32] - '@esbuild/win32-x64@0.27.2': resolution: {integrity: sha512-sRdU18mcKf7F+YgheI/zGf5alZatMUTKj/jNS6l744f9u3WFu4v7twcUI9vu4mknF4Y9aDlblIie0IM+5xxaqQ==} engines: {node: '>=18'} @@ -414,14 +273,6 @@ packages: resolution: {integrity: sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==} engines: {node: '>=18.18'} - '@istanbuljs/schema@0.1.3': - resolution: {integrity: sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==} - engines: {node: '>=8'} - - '@jest/schemas@29.6.3': - resolution: {integrity: sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==} - engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} - '@jridgewell/gen-mapping@0.3.13': resolution: {integrity: sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==} @@ -545,8 +396,14 @@ packages: cpu: [x64] os: [win32] - '@sinclair/typebox@0.27.8': - resolution: {integrity: sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==} + '@standard-schema/spec@1.1.0': + resolution: {integrity: sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==} + + '@types/chai@5.2.3': + resolution: {integrity: sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA==} + + '@types/deep-eql@4.0.2': + resolution: {integrity: sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==} '@types/estree@1.0.8': resolution: {integrity: sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==} @@ -554,8 +411,8 @@ packages: '@types/json-schema@7.0.15': resolution: {integrity: sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==} - '@types/node@20.19.27': - resolution: {integrity: sha512-N2clP5pJhB2YnZJ3PIHFk5RkygRX5WO/5f0WC08tp0wd+sv0rsJk3MqWn3CbNmT2J505a5336jaQj4ph1AdMug==} + '@types/node@25.0.3': + resolution: {integrity: sha512-W609buLVRVmeW693xKfzHeIV6nJGGz98uCPfeXI1ELMLXVeKYZ9m15fAMSaUPBHYLGFsVRcMmSCksQOrZV9BYA==} '@typescript-eslint/eslint-plugin@8.50.1': resolution: {integrity: sha512-PKhLGDq3JAg0Jk/aK890knnqduuI/Qj+udH7wCf0217IGi4gt+acgCyPVe79qoT+qKUvHMDQkwJeKW9fwl8Cyw==} @@ -616,35 +473,49 @@ packages: resolution: {integrity: sha512-IrDKrw7pCRUR94zeuCSUWQ+w8JEf5ZX5jl/e6AHGSLi1/zIr0lgutfn/7JpfCey+urpgQEdrZVYzCaVVKiTwhQ==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@vitest/coverage-v8@1.6.1': - resolution: {integrity: sha512-6YeRZwuO4oTGKxD3bijok756oktHSIm3eczVVzNe3scqzuhLwltIF3S9ZL/vwOVIpURmU6SnZhziXXAfw8/Qlw==} + '@vitest/coverage-v8@4.0.16': + resolution: {integrity: sha512-2rNdjEIsPRzsdu6/9Eq0AYAzYdpP6Bx9cje9tL3FE5XzXRQF1fNU9pe/1yE8fCrS0HD+fBtt6gLPh6LI57tX7A==} peerDependencies: - vitest: 1.6.1 + '@vitest/browser': 4.0.16 + vitest: 4.0.16 + peerDependenciesMeta: + '@vitest/browser': + optional: true - '@vitest/expect@1.6.1': - resolution: {integrity: sha512-jXL+9+ZNIJKruofqXuuTClf44eSpcHlgj3CiuNihUF3Ioujtmc0zIa3UJOW5RjDK1YLBJZnWBlPuqhYycLioog==} + '@vitest/expect@4.0.16': + resolution: {integrity: sha512-eshqULT2It7McaJkQGLkPjPjNph+uevROGuIMJdG3V+0BSR2w9u6J9Lwu+E8cK5TETlfou8GRijhafIMhXsimA==} - '@vitest/runner@1.6.1': - resolution: {integrity: sha512-3nSnYXkVkf3mXFfE7vVyPmi3Sazhb/2cfZGGs0JRzFsPFvAMBEcrweV1V1GsrstdXeKCTXlJbvnQwGWgEIHmOA==} + '@vitest/mocker@4.0.16': + resolution: {integrity: sha512-yb6k4AZxJTB+q9ycAvsoxGn+j/po0UaPgajllBgt1PzoMAAmJGYFdDk0uCcRcxb3BrME34I6u8gHZTQlkqSZpg==} + peerDependencies: + msw: ^2.4.9 + vite: ^6.0.0 || ^7.0.0-0 + peerDependenciesMeta: + msw: + optional: true + vite: + optional: true - '@vitest/snapshot@1.6.1': - resolution: {integrity: sha512-WvidQuWAzU2p95u8GAKlRMqMyN1yOJkGHnx3M1PL9Raf7AQ1kwLKg04ADlCa3+OXUZE7BceOhVZiuWAbzCKcUQ==} + '@vitest/pretty-format@4.0.16': + resolution: {integrity: sha512-eNCYNsSty9xJKi/UdVD8Ou16alu7AYiS2fCPRs0b1OdhJiV89buAXQLpTbe+X8V9L6qrs9CqyvU7OaAopJYPsA==} - '@vitest/spy@1.6.1': - resolution: {integrity: sha512-MGcMmpGkZebsMZhbQKkAf9CX5zGvjkBTqf8Zx3ApYWXr3wG+QvEu2eXWfnIIWYSJExIp4V9FCKDEeygzkYrXMw==} + '@vitest/runner@4.0.16': + resolution: {integrity: sha512-VWEDm5Wv9xEo80ctjORcTQRJ539EGPB3Pb9ApvVRAY1U/WkHXmmYISqU5E79uCwcW7xYUV38gwZD+RV755fu3Q==} - '@vitest/utils@1.6.1': - resolution: {integrity: sha512-jOrrUvXM4Av9ZWiG1EajNto0u96kWAhJ1LmPmJhXXQx/32MecEKd10pOLYgS2BQx1TgkGhloPU1ArDW2vvaY6g==} + '@vitest/snapshot@4.0.16': + resolution: {integrity: sha512-sf6NcrYhYBsSYefxnry+DR8n3UV4xWZwWxYbCJUt2YdvtqzSPR7VfGrY0zsv090DAbjFZsi7ZaMi1KnSRyK1XA==} + + '@vitest/spy@4.0.16': + resolution: {integrity: sha512-4jIOWjKP0ZUaEmJm00E0cOBLU+5WE0BpeNr3XN6TEF05ltro6NJqHWxXD0kA8/Zc8Nh23AT8WQxwNG+WeROupw==} + + '@vitest/utils@4.0.16': + resolution: {integrity: sha512-h8z9yYhV3e1LEfaQ3zdypIrnAg/9hguReGZoS7Gl0aBG5xgA410zBqECqmaF/+RkTggRsfnzc1XaAHA6bmUufA==} acorn-jsx@5.3.2: resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==} peerDependencies: acorn: ^6.0.0 || ^7.0.0 || ^8.0.0 - acorn-walk@8.3.4: - resolution: {integrity: sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==} - engines: {node: '>=0.4.0'} - acorn@8.15.0: resolution: {integrity: sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==} engines: {node: '>=0.4.0'} @@ -657,18 +528,18 @@ packages: resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} engines: {node: '>=8'} - ansi-styles@5.2.0: - resolution: {integrity: sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==} - engines: {node: '>=10'} - any-promise@1.3.0: resolution: {integrity: sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==} argparse@2.0.1: resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} - assertion-error@1.1.0: - resolution: {integrity: sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==} + assertion-error@2.0.1: + resolution: {integrity: sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==} + engines: {node: '>=12'} + + ast-v8-to-istanbul@0.3.10: + resolution: {integrity: sha512-p4K7vMz2ZSk3wN8l5o3y2bJAoZXT3VuJI5OLTATY/01CYWumWvwkUw0SqDBnNq6IiTO3qDa1eSQDibAV8g7XOQ==} asynckit@0.4.0: resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==} @@ -703,17 +574,14 @@ packages: resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==} engines: {node: '>=6'} - chai@4.5.0: - resolution: {integrity: sha512-RITGBfijLkBddZvnn8jdqoTypxvqbOLYQkGGxXzeFjVHvudaPw0HNFD9x928/eUwYWd2dPCugVqspGALTZZQKw==} - engines: {node: '>=4'} + chai@6.2.2: + resolution: {integrity: sha512-NUPRluOfOiTKBKvWPtSD4PhFvWCqOi0BGStNWs57X9js7XGTprSmFoz5F0tWhR4WPjNeR9jXqdC7/UpSJTnlRg==} + engines: {node: '>=18'} chalk@4.1.2: resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} engines: {node: '>=10'} - check-error@1.0.3: - resolution: {integrity: sha512-iKEoDYaRmd1mxM90a2OEfWhjsjPpYPuQ+lMYsoxB126+t8fw7ySEO48nmDg5COTjxDI65/Y2OWpeEHk3ZOe8zg==} - chokidar@4.0.3: resolution: {integrity: sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==} engines: {node: '>= 14.16.0'} @@ -756,10 +624,6 @@ packages: supports-color: optional: true - deep-eql@4.1.4: - resolution: {integrity: sha512-SUwdGfqdKOwxCPeVYjwSyRpJ7Z+fhpwIAtmCUdZIWZ/YP5R9WAsyuSgpLVDi9bjWoN2LXHNss/dk3urXtdQxGg==} - engines: {node: '>=6'} - deep-is@0.1.4: resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==} @@ -767,10 +631,6 @@ packages: resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} engines: {node: '>=0.4.0'} - diff-sequences@29.6.3: - resolution: {integrity: sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==} - engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} - dunder-proto@1.0.1: resolution: {integrity: sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==} engines: {node: '>= 0.4'} @@ -783,6 +643,9 @@ packages: resolution: {integrity: sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==} engines: {node: '>= 0.4'} + es-module-lexer@1.7.0: + resolution: {integrity: sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==} + es-object-atoms@1.1.1: resolution: {integrity: sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==} engines: {node: '>= 0.4'} @@ -791,11 +654,6 @@ packages: resolution: {integrity: sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==} engines: {node: '>= 0.4'} - esbuild@0.21.5: - resolution: {integrity: sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==} - engines: {node: '>=12'} - hasBin: true - esbuild@0.27.2: resolution: {integrity: sha512-HyNQImnsOC7X9PMNaCIeAm4ISCQXs5a5YasTXVliKv4uuBo1dKrG0A+uQS8M5eXjVMnLg3WgXaKvprHlFJQffw==} engines: {node: '>=18'} @@ -850,9 +708,9 @@ packages: resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==} engines: {node: '>=0.10.0'} - execa@8.0.1: - resolution: {integrity: sha512-VyhnebXciFV2DESc+p6B+y0LjSm0krU4OgJN44qFAhBY0TJ+1V61tYD2+wHusZ6F9n5K+vl8k0sTy7PEfV4qpg==} - engines: {node: '>=16.17'} + expect-type@1.3.0: + resolution: {integrity: sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==} + engines: {node: '>=12.0.0'} fast-deep-equal@3.1.3: resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} @@ -903,9 +761,6 @@ packages: resolution: {integrity: sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==} engines: {node: '>= 6'} - fs.realpath@1.0.0: - resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==} - fsevents@2.3.3: resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} @@ -914,9 +769,6 @@ packages: function-bind@1.1.2: resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==} - get-func-name@2.0.2: - resolution: {integrity: sha512-8vXOvuE167CtIc3OyItco7N/dpRtBbYOsPsXCz7X/PMnlGjYjSGuZJgM1Y7mmew7BKf9BqvLX2tnOVy1BBUsxQ==} - get-intrinsic@1.3.0: resolution: {integrity: sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==} engines: {node: '>= 0.4'} @@ -925,18 +777,10 @@ packages: resolution: {integrity: sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==} engines: {node: '>= 0.4'} - get-stream@8.0.1: - resolution: {integrity: sha512-VaUJspBffn/LMCJVoMvSAdmscJyS1auj5Zulnn5UoYcY531UWmdwhRWkcGKnGU93m5HSXP9LP2usOryrBtQowA==} - engines: {node: '>=16'} - glob-parent@6.0.2: resolution: {integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==} engines: {node: '>=10.13.0'} - glob@7.2.3: - resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==} - deprecated: Glob versions prior to v9 are no longer supported - globals@14.0.0: resolution: {integrity: sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==} engines: {node: '>=18'} @@ -964,10 +808,6 @@ packages: html-escaper@2.0.2: resolution: {integrity: sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==} - human-signals@5.0.0: - resolution: {integrity: sha512-AXcZb6vzzrFAUE61HnN4mpLqd/cSIwNQjtNWR0euPm6y0iqx3G4gOXaIDdtdDwZmhwe82LA6+zinmW4UBWVePQ==} - engines: {node: '>=16.17.0'} - ignore@5.3.2: resolution: {integrity: sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==} engines: {node: '>= 4'} @@ -984,13 +824,6 @@ packages: resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} engines: {node: '>=0.8.19'} - inflight@1.0.6: - resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==} - deprecated: This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful. - - inherits@2.0.4: - resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} - is-extglob@2.1.1: resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} engines: {node: '>=0.10.0'} @@ -999,10 +832,6 @@ packages: resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} engines: {node: '>=0.10.0'} - is-stream@3.0.0: - resolution: {integrity: sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==} - engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} - isexe@2.0.0: resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} @@ -1060,10 +889,6 @@ packages: resolution: {integrity: sha512-IXO6OCs9yg8tMKzfPZ1YmheJbZCiEsnBdcB03l0OcfK9prKnJb96siuHCr5Fl37/yo9DnKU+TLpxzTUspw9shg==} engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} - local-pkg@0.5.1: - resolution: {integrity: sha512-9rrA30MRRP3gBD3HTGnC6cDFpaE1kVDWxWgqWJUN0RvDNAo+Nz/9GxB+nHOH0ifbVFy0hSA1V6vFDvnx54lTEQ==} - engines: {node: '>=14'} - locate-path@6.0.0: resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} engines: {node: '>=10'} @@ -1071,14 +896,11 @@ packages: lodash.merge@4.6.2: resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==} - loupe@2.3.7: - resolution: {integrity: sha512-zSMINGVYkdpYSOBmLi0D1Uo7JU9nVdQKrHxC8eYlV+9YKK9WePqAlL7lSlorG/U2Fw1w0hTBmaa/jrQ3UbPHtA==} - magic-string@0.30.21: resolution: {integrity: sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==} - magicast@0.3.5: - resolution: {integrity: sha512-L0WhttDl+2BOsybvEOLK7fW3UA0OQ0IQ2d6Zl2x/a6vVRs3bAY0ECOSHHeL5jD+SbOpOCUEi0y1DgHEn9Qn1AQ==} + magicast@0.5.1: + resolution: {integrity: sha512-xrHS24IxaLrvuo613F719wvOIv9xPHFWQHuvGUBmPnCA/3MQxKI3b+r7n1jAoDHmsbC5bRhTZYR77invLAxVnw==} make-dir@4.0.0: resolution: {integrity: sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==} @@ -1088,9 +910,6 @@ packages: resolution: {integrity: sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==} engines: {node: '>= 0.4'} - merge-stream@2.0.0: - resolution: {integrity: sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==} - mime-db@1.52.0: resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==} engines: {node: '>= 0.6'} @@ -1099,10 +918,6 @@ packages: resolution: {integrity: sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==} engines: {node: '>= 0.6'} - mimic-fn@4.0.0: - resolution: {integrity: sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==} - engines: {node: '>=12'} - minimatch@3.1.2: resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==} @@ -1127,20 +942,12 @@ packages: natural-compare@1.4.0: resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} - npm-run-path@5.3.0: - resolution: {integrity: sha512-ppwTtiJZq0O/ai0z7yfudtBpWIoxM8yE6nHi1X47eFR2EWORqfbu6CnPlNsjeN683eT0qG6H/Pyf9fCcvjnnnQ==} - engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} - object-assign@4.1.1: resolution: {integrity: sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==} engines: {node: '>=0.10.0'} - once@1.4.0: - resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} - - onetime@6.0.0: - resolution: {integrity: sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==} - engines: {node: '>=12'} + obug@2.1.1: + resolution: {integrity: sha512-uTqF9MuPraAQ+IsnPf366RG4cP9RtUi7MLO1N3KEc+wb0a6yKpeL0lmk2IB1jY5KHPAlTc6T/JRdC/YqxHNwkQ==} optionator@0.9.4: resolution: {integrity: sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==} @@ -1150,10 +957,6 @@ packages: resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==} engines: {node: '>=10'} - p-limit@5.0.0: - resolution: {integrity: sha512-/Eaoq+QyLSiXQ4lyYV23f14mZRQcXnxfHrN0vCai+ak9G0pp9iEQukIIZq5NccEvwRB8PUnZT0KsOoDCINS1qQ==} - engines: {node: '>=18'} - p-locate@5.0.0: resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==} engines: {node: '>=10'} @@ -1166,27 +969,13 @@ packages: resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} engines: {node: '>=8'} - path-is-absolute@1.0.1: - resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==} - engines: {node: '>=0.10.0'} - path-key@3.1.1: resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} engines: {node: '>=8'} - path-key@4.0.0: - resolution: {integrity: sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==} - engines: {node: '>=12'} - - pathe@1.1.2: - resolution: {integrity: sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==} - pathe@2.0.3: resolution: {integrity: sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==} - pathval@1.1.1: - resolution: {integrity: sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ==} - picocolors@1.1.1: resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==} @@ -1227,10 +1016,6 @@ packages: resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==} engines: {node: '>= 0.8.0'} - pretty-format@29.7.0: - resolution: {integrity: sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==} - engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} - proxy-from-env@1.1.0: resolution: {integrity: sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==} @@ -1238,9 +1023,6 @@ packages: resolution: {integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==} engines: {node: '>=6'} - react-is@18.3.1: - resolution: {integrity: sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==} - readdirp@4.1.2: resolution: {integrity: sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==} engines: {node: '>= 14.18.0'} @@ -1274,10 +1056,6 @@ packages: siginfo@2.0.0: resolution: {integrity: sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==} - signal-exit@4.1.0: - resolution: {integrity: sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==} - engines: {node: '>=14'} - source-map-js@1.2.1: resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==} engines: {node: '>=0.10.0'} @@ -1292,17 +1070,10 @@ packages: std-env@3.10.0: resolution: {integrity: sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==} - strip-final-newline@3.0.0: - resolution: {integrity: sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==} - engines: {node: '>=12'} - strip-json-comments@3.1.1: resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==} engines: {node: '>=8'} - strip-literal@2.1.1: - resolution: {integrity: sha512-631UJ6O00eNGfMiWG78ck80dfBab8X6IVFB51jZK5Icd7XAs60Z5y7QdSd/wGIklnWvRbUNloVzhOKKmutxQ6Q==} - sucrase@3.35.1: resolution: {integrity: sha512-DhuTmvZWux4H1UOnWMB3sk0sbaCVOoQZjv8u1rDoTV0HTdGem9hkAZtl4JZy8P2z4Bg0nT+YMeOFyVr4zcG5Tw==} engines: {node: '>=16 || 14 >=14.17'} @@ -1312,10 +1083,6 @@ packages: resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==} engines: {node: '>=8'} - test-exclude@6.0.0: - resolution: {integrity: sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==} - engines: {node: '>=8'} - thenify-all@1.6.0: resolution: {integrity: sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==} engines: {node: '>=0.8'} @@ -1329,16 +1096,16 @@ packages: tinyexec@0.3.2: resolution: {integrity: sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==} + tinyexec@1.0.2: + resolution: {integrity: sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg==} + engines: {node: '>=18'} + tinyglobby@0.2.15: resolution: {integrity: sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==} engines: {node: '>=12.0.0'} - tinypool@0.8.4: - resolution: {integrity: sha512-i11VH5gS6IFeLY3gMBQ00/MmLncVP7JLXOw1vlgkytLmJK7QnEr7NXf0LBdxfmNPAeyetukOk0bOYrJrFGjYJQ==} - engines: {node: '>=14.0.0'} - - tinyspy@2.2.1: - resolution: {integrity: sha512-KYad6Vy5VDWV4GH3fjpseMQ/XU2BhIYP7Vzd0LG44qRWm/Yt2WCOTicFdvmgo6gWaqooMQCawTtILVQJupKu7A==} + tinyrainbow@3.0.3: + resolution: {integrity: sha512-PSkbLUoxOFRzJYjjxHJt9xro7D+iilgMX/C9lawzVuYiIdcihh9DXmVibBe8lmcFrRi/VzlPjBxbN7rH24q8/Q==} engines: {node: '>=14.0.0'} tree-kill@1.2.2: @@ -1377,10 +1144,6 @@ packages: resolution: {integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==} engines: {node: '>= 0.8.0'} - type-detect@4.1.0: - resolution: {integrity: sha512-Acylog8/luQ8L7il+geoSxhEkazvkslg7PSNKOX59mbB9cOveP5aq9h74Y7YU8yDpJwetzQQrfIwtf4Wp4LKcw==} - engines: {node: '>=4'} - typescript@5.9.3: resolution: {integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==} engines: {node: '>=14.17'} @@ -1389,33 +1152,33 @@ packages: ufo@1.6.1: resolution: {integrity: sha512-9a4/uxlTWJ4+a5i0ooc1rU7C7YOw3wT+UGqdeNNHWnOF9qcMBgLRS+4IYUqbczewFx4mLEig6gawh7X6mFlEkA==} - undici-types@6.21.0: - resolution: {integrity: sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==} + undici-types@7.16.0: + resolution: {integrity: sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==} uri-js@4.4.1: resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==} - vite-node@1.6.1: - resolution: {integrity: sha512-YAXkfvGtuTzwWbDSACdJSg4A4DZiAqckWe90Zapc/sEX3XvHcw1NdurM/6od8J207tSDqNbSsgdCacBgvJKFuA==} - engines: {node: ^18.0.0 || >=20.0.0} - hasBin: true - - vite@5.4.21: - resolution: {integrity: sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw==} - engines: {node: ^18.0.0 || >=20.0.0} + vite@7.3.0: + resolution: {integrity: sha512-dZwN5L1VlUBewiP6H9s2+B3e3Jg96D0vzN+Ry73sOefebhYr9f94wwkMNN/9ouoU8pV1BqA1d1zGk8928cx0rg==} + engines: {node: ^20.19.0 || >=22.12.0} hasBin: true peerDependencies: - '@types/node': ^18.0.0 || >=20.0.0 - less: '*' + '@types/node': ^20.19.0 || >=22.12.0 + jiti: '>=1.21.0' + less: ^4.0.0 lightningcss: ^1.21.0 - sass: '*' - sass-embedded: '*' - stylus: '*' - sugarss: '*' - terser: ^5.4.0 + sass: ^1.70.0 + sass-embedded: ^1.70.0 + stylus: '>=0.54.8' + sugarss: ^5.0.0 + terser: ^5.16.0 + tsx: ^4.8.1 + yaml: ^2.4.2 peerDependenciesMeta: '@types/node': optional: true + jiti: + optional: true less: optional: true lightningcss: @@ -1430,24 +1193,37 @@ packages: optional: true terser: optional: true + tsx: + optional: true + yaml: + optional: true - vitest@1.6.1: - resolution: {integrity: sha512-Ljb1cnSJSivGN0LqXd/zmDbWEM0RNNg2t1QW/XUhYl/qPqyu7CsqeWtqQXHVaJsecLPuDoak2oJcZN2QoRIOag==} - engines: {node: ^18.0.0 || >=20.0.0} + vitest@4.0.16: + resolution: {integrity: sha512-E4t7DJ9pESL6E3I8nFjPa4xGUd3PmiWDLsDztS2qXSJWfHtbQnwAWylaBvSNY48I3vr8PTqIZlyK8TE3V3CA4Q==} + engines: {node: ^20.0.0 || ^22.0.0 || >=24.0.0} hasBin: true peerDependencies: '@edge-runtime/vm': '*' - '@types/node': ^18.0.0 || >=20.0.0 - '@vitest/browser': 1.6.1 - '@vitest/ui': 1.6.1 + '@opentelemetry/api': ^1.9.0 + '@types/node': ^20.0.0 || ^22.0.0 || >=24.0.0 + '@vitest/browser-playwright': 4.0.16 + '@vitest/browser-preview': 4.0.16 + '@vitest/browser-webdriverio': 4.0.16 + '@vitest/ui': 4.0.16 happy-dom: '*' jsdom: '*' peerDependenciesMeta: '@edge-runtime/vm': optional: true + '@opentelemetry/api': + optional: true '@types/node': optional: true - '@vitest/browser': + '@vitest/browser-playwright': + optional: true + '@vitest/browser-preview': + optional: true + '@vitest/browser-webdriverio': optional: true '@vitest/ui': optional: true @@ -1470,24 +1246,12 @@ packages: resolution: {integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==} engines: {node: '>=0.10.0'} - wrappy@1.0.2: - resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} - yocto-queue@0.1.0: resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} engines: {node: '>=10'} - yocto-queue@1.2.2: - resolution: {integrity: sha512-4LCcse/U2MHZ63HAJVE+v71o7yOdIe4cZ70Wpf8D/IyjDKYQLV5GD46B+hSTjJsvV5PztjvHoU580EftxjDZFQ==} - engines: {node: '>=12.20'} - snapshots: - '@ampproject/remapping@2.3.0': - dependencies: - '@jridgewell/gen-mapping': 0.3.13 - '@jridgewell/trace-mapping': 0.3.31 - '@babel/helper-string-parser@7.27.1': {} '@babel/helper-validator-identifier@7.28.5': {} @@ -1501,152 +1265,83 @@ snapshots: '@babel/helper-string-parser': 7.27.1 '@babel/helper-validator-identifier': 7.28.5 - '@bcoe/v8-coverage@0.2.3': {} - - '@esbuild/aix-ppc64@0.21.5': - optional: true + '@bcoe/v8-coverage@1.0.2': {} '@esbuild/aix-ppc64@0.27.2': optional: true - '@esbuild/android-arm64@0.21.5': - optional: true - '@esbuild/android-arm64@0.27.2': optional: true - '@esbuild/android-arm@0.21.5': - optional: true - '@esbuild/android-arm@0.27.2': optional: true - '@esbuild/android-x64@0.21.5': - optional: true - '@esbuild/android-x64@0.27.2': optional: true - '@esbuild/darwin-arm64@0.21.5': - optional: true - '@esbuild/darwin-arm64@0.27.2': optional: true - '@esbuild/darwin-x64@0.21.5': - optional: true - '@esbuild/darwin-x64@0.27.2': optional: true - '@esbuild/freebsd-arm64@0.21.5': - optional: true - '@esbuild/freebsd-arm64@0.27.2': optional: true - '@esbuild/freebsd-x64@0.21.5': - optional: true - '@esbuild/freebsd-x64@0.27.2': optional: true - '@esbuild/linux-arm64@0.21.5': - optional: true - '@esbuild/linux-arm64@0.27.2': optional: true - '@esbuild/linux-arm@0.21.5': - optional: true - '@esbuild/linux-arm@0.27.2': optional: true - '@esbuild/linux-ia32@0.21.5': - optional: true - '@esbuild/linux-ia32@0.27.2': optional: true - '@esbuild/linux-loong64@0.21.5': - optional: true - '@esbuild/linux-loong64@0.27.2': optional: true - '@esbuild/linux-mips64el@0.21.5': - optional: true - '@esbuild/linux-mips64el@0.27.2': optional: true - '@esbuild/linux-ppc64@0.21.5': - optional: true - '@esbuild/linux-ppc64@0.27.2': optional: true - '@esbuild/linux-riscv64@0.21.5': - optional: true - '@esbuild/linux-riscv64@0.27.2': optional: true - '@esbuild/linux-s390x@0.21.5': - optional: true - '@esbuild/linux-s390x@0.27.2': optional: true - '@esbuild/linux-x64@0.21.5': - optional: true - '@esbuild/linux-x64@0.27.2': optional: true '@esbuild/netbsd-arm64@0.27.2': optional: true - '@esbuild/netbsd-x64@0.21.5': - optional: true - '@esbuild/netbsd-x64@0.27.2': optional: true '@esbuild/openbsd-arm64@0.27.2': optional: true - '@esbuild/openbsd-x64@0.21.5': - optional: true - '@esbuild/openbsd-x64@0.27.2': optional: true '@esbuild/openharmony-arm64@0.27.2': optional: true - '@esbuild/sunos-x64@0.21.5': - optional: true - '@esbuild/sunos-x64@0.27.2': optional: true - '@esbuild/win32-arm64@0.21.5': - optional: true - '@esbuild/win32-arm64@0.27.2': optional: true - '@esbuild/win32-ia32@0.21.5': - optional: true - '@esbuild/win32-ia32@0.27.2': optional: true - '@esbuild/win32-x64@0.21.5': - optional: true - '@esbuild/win32-x64@0.27.2': optional: true @@ -1707,12 +1402,6 @@ snapshots: '@humanwhocodes/retry@0.4.3': {} - '@istanbuljs/schema@0.1.3': {} - - '@jest/schemas@29.6.3': - dependencies: - '@sinclair/typebox': 0.27.8 - '@jridgewell/gen-mapping@0.3.13': dependencies: '@jridgewell/sourcemap-codec': 1.5.5 @@ -1793,15 +1482,22 @@ snapshots: '@rollup/rollup-win32-x64-msvc@4.54.0': optional: true - '@sinclair/typebox@0.27.8': {} + '@standard-schema/spec@1.1.0': {} + + '@types/chai@5.2.3': + dependencies: + '@types/deep-eql': 4.0.2 + assertion-error: 2.0.1 + + '@types/deep-eql@4.0.2': {} '@types/estree@1.0.8': {} '@types/json-schema@7.0.15': {} - '@types/node@20.19.27': + '@types/node@25.0.3': dependencies: - undici-types: 6.21.0 + undici-types: 7.16.0 '@typescript-eslint/eslint-plugin@8.50.1(@typescript-eslint/parser@8.50.1(eslint@9.39.2)(typescript@5.9.3))(eslint@9.39.2)(typescript@5.9.3)': dependencies: @@ -1894,62 +1590,66 @@ snapshots: '@typescript-eslint/types': 8.50.1 eslint-visitor-keys: 4.2.1 - '@vitest/coverage-v8@1.6.1(vitest@1.6.1(@types/node@20.19.27))': + '@vitest/coverage-v8@4.0.16(vitest@4.0.16(@types/node@25.0.3))': dependencies: - '@ampproject/remapping': 2.3.0 - '@bcoe/v8-coverage': 0.2.3 - debug: 4.4.3 + '@bcoe/v8-coverage': 1.0.2 + '@vitest/utils': 4.0.16 + ast-v8-to-istanbul: 0.3.10 istanbul-lib-coverage: 3.2.2 istanbul-lib-report: 3.0.1 istanbul-lib-source-maps: 5.0.6 istanbul-reports: 3.2.0 - magic-string: 0.30.21 - magicast: 0.3.5 - picocolors: 1.1.1 + magicast: 0.5.1 + obug: 2.1.1 std-env: 3.10.0 - strip-literal: 2.1.1 - test-exclude: 6.0.0 - vitest: 1.6.1(@types/node@20.19.27) + tinyrainbow: 3.0.3 + vitest: 4.0.16(@types/node@25.0.3) transitivePeerDependencies: - supports-color - '@vitest/expect@1.6.1': + '@vitest/expect@4.0.16': dependencies: - '@vitest/spy': 1.6.1 - '@vitest/utils': 1.6.1 - chai: 4.5.0 + '@standard-schema/spec': 1.1.0 + '@types/chai': 5.2.3 + '@vitest/spy': 4.0.16 + '@vitest/utils': 4.0.16 + chai: 6.2.2 + tinyrainbow: 3.0.3 - '@vitest/runner@1.6.1': + '@vitest/mocker@4.0.16(vite@7.3.0(@types/node@25.0.3))': dependencies: - '@vitest/utils': 1.6.1 - p-limit: 5.0.0 - pathe: 1.1.2 - - '@vitest/snapshot@1.6.1': - dependencies: - magic-string: 0.30.21 - pathe: 1.1.2 - pretty-format: 29.7.0 - - '@vitest/spy@1.6.1': - dependencies: - tinyspy: 2.2.1 - - '@vitest/utils@1.6.1': - dependencies: - diff-sequences: 29.6.3 + '@vitest/spy': 4.0.16 estree-walker: 3.0.3 - loupe: 2.3.7 - pretty-format: 29.7.0 + magic-string: 0.30.21 + optionalDependencies: + vite: 7.3.0(@types/node@25.0.3) + + '@vitest/pretty-format@4.0.16': + dependencies: + tinyrainbow: 3.0.3 + + '@vitest/runner@4.0.16': + dependencies: + '@vitest/utils': 4.0.16 + pathe: 2.0.3 + + '@vitest/snapshot@4.0.16': + dependencies: + '@vitest/pretty-format': 4.0.16 + magic-string: 0.30.21 + pathe: 2.0.3 + + '@vitest/spy@4.0.16': {} + + '@vitest/utils@4.0.16': + dependencies: + '@vitest/pretty-format': 4.0.16 + tinyrainbow: 3.0.3 acorn-jsx@5.3.2(acorn@8.15.0): dependencies: acorn: 8.15.0 - acorn-walk@8.3.4: - dependencies: - acorn: 8.15.0 - acorn@8.15.0: {} ajv@6.12.6: @@ -1963,13 +1663,17 @@ snapshots: dependencies: color-convert: 2.0.1 - ansi-styles@5.2.0: {} - any-promise@1.3.0: {} argparse@2.0.1: {} - assertion-error@1.1.0: {} + assertion-error@2.0.1: {} + + ast-v8-to-istanbul@0.3.10: + dependencies: + '@jridgewell/trace-mapping': 0.3.31 + estree-walker: 3.0.3 + js-tokens: 9.0.1 asynckit@0.4.0: {} @@ -2006,25 +1710,13 @@ snapshots: callsites@3.1.0: {} - chai@4.5.0: - dependencies: - assertion-error: 1.1.0 - check-error: 1.0.3 - deep-eql: 4.1.4 - get-func-name: 2.0.2 - loupe: 2.3.7 - pathval: 1.1.1 - type-detect: 4.1.0 + chai@6.2.2: {} chalk@4.1.2: dependencies: ansi-styles: 4.3.0 supports-color: 7.2.0 - check-error@1.0.3: - dependencies: - get-func-name: 2.0.2 - chokidar@4.0.3: dependencies: readdirp: 4.1.2 @@ -2057,16 +1749,10 @@ snapshots: dependencies: ms: 2.1.3 - deep-eql@4.1.4: - dependencies: - type-detect: 4.1.0 - deep-is@0.1.4: {} delayed-stream@1.0.0: {} - diff-sequences@29.6.3: {} - dunder-proto@1.0.1: dependencies: call-bind-apply-helpers: 1.0.2 @@ -2077,6 +1763,8 @@ snapshots: es-errors@1.3.0: {} + es-module-lexer@1.7.0: {} + es-object-atoms@1.1.1: dependencies: es-errors: 1.3.0 @@ -2088,32 +1776,6 @@ snapshots: has-tostringtag: 1.0.2 hasown: 2.0.2 - esbuild@0.21.5: - optionalDependencies: - '@esbuild/aix-ppc64': 0.21.5 - '@esbuild/android-arm': 0.21.5 - '@esbuild/android-arm64': 0.21.5 - '@esbuild/android-x64': 0.21.5 - '@esbuild/darwin-arm64': 0.21.5 - '@esbuild/darwin-x64': 0.21.5 - '@esbuild/freebsd-arm64': 0.21.5 - '@esbuild/freebsd-x64': 0.21.5 - '@esbuild/linux-arm': 0.21.5 - '@esbuild/linux-arm64': 0.21.5 - '@esbuild/linux-ia32': 0.21.5 - '@esbuild/linux-loong64': 0.21.5 - '@esbuild/linux-mips64el': 0.21.5 - '@esbuild/linux-ppc64': 0.21.5 - '@esbuild/linux-riscv64': 0.21.5 - '@esbuild/linux-s390x': 0.21.5 - '@esbuild/linux-x64': 0.21.5 - '@esbuild/netbsd-x64': 0.21.5 - '@esbuild/openbsd-x64': 0.21.5 - '@esbuild/sunos-x64': 0.21.5 - '@esbuild/win32-arm64': 0.21.5 - '@esbuild/win32-ia32': 0.21.5 - '@esbuild/win32-x64': 0.21.5 - esbuild@0.27.2: optionalDependencies: '@esbuild/aix-ppc64': 0.27.2 @@ -2215,17 +1877,7 @@ snapshots: esutils@2.0.3: {} - execa@8.0.1: - dependencies: - cross-spawn: 7.0.6 - get-stream: 8.0.1 - human-signals: 5.0.0 - is-stream: 3.0.0 - merge-stream: 2.0.0 - npm-run-path: 5.3.0 - onetime: 6.0.0 - signal-exit: 4.1.0 - strip-final-newline: 3.0.0 + expect-type@1.3.0: {} fast-deep-equal@3.1.3: {} @@ -2269,15 +1921,11 @@ snapshots: hasown: 2.0.2 mime-types: 2.1.35 - fs.realpath@1.0.0: {} - fsevents@2.3.3: optional: true function-bind@1.1.2: {} - get-func-name@2.0.2: {} - get-intrinsic@1.3.0: dependencies: call-bind-apply-helpers: 1.0.2 @@ -2296,21 +1944,10 @@ snapshots: dunder-proto: 1.0.1 es-object-atoms: 1.1.1 - get-stream@8.0.1: {} - glob-parent@6.0.2: dependencies: is-glob: 4.0.3 - glob@7.2.3: - dependencies: - fs.realpath: 1.0.0 - inflight: 1.0.6 - inherits: 2.0.4 - minimatch: 3.1.2 - once: 1.4.0 - path-is-absolute: 1.0.1 - globals@14.0.0: {} gopd@1.2.0: {} @@ -2329,8 +1966,6 @@ snapshots: html-escaper@2.0.2: {} - human-signals@5.0.0: {} - ignore@5.3.2: {} ignore@7.0.5: {} @@ -2342,21 +1977,12 @@ snapshots: imurmurhash@0.1.4: {} - inflight@1.0.6: - dependencies: - once: 1.4.0 - wrappy: 1.0.2 - - inherits@2.0.4: {} - is-extglob@2.1.1: {} is-glob@4.0.3: dependencies: is-extglob: 2.1.1 - is-stream@3.0.0: {} - isexe@2.0.0: {} istanbul-lib-coverage@3.2.2: {} @@ -2409,26 +2035,17 @@ snapshots: load-tsconfig@0.2.5: {} - local-pkg@0.5.1: - dependencies: - mlly: 1.8.0 - pkg-types: 1.3.1 - locate-path@6.0.0: dependencies: p-locate: 5.0.0 lodash.merge@4.6.2: {} - loupe@2.3.7: - dependencies: - get-func-name: 2.0.2 - magic-string@0.30.21: dependencies: '@jridgewell/sourcemap-codec': 1.5.5 - magicast@0.3.5: + magicast@0.5.1: dependencies: '@babel/parser': 7.28.5 '@babel/types': 7.28.5 @@ -2440,16 +2057,12 @@ snapshots: math-intrinsics@1.1.0: {} - merge-stream@2.0.0: {} - mime-db@1.52.0: {} mime-types@2.1.35: dependencies: mime-db: 1.52.0 - mimic-fn@4.0.0: {} - minimatch@3.1.2: dependencies: brace-expansion: 1.1.12 @@ -2477,19 +2090,9 @@ snapshots: natural-compare@1.4.0: {} - npm-run-path@5.3.0: - dependencies: - path-key: 4.0.0 - object-assign@4.1.1: {} - once@1.4.0: - dependencies: - wrappy: 1.0.2 - - onetime@6.0.0: - dependencies: - mimic-fn: 4.0.0 + obug@2.1.1: {} optionator@0.9.4: dependencies: @@ -2504,10 +2107,6 @@ snapshots: dependencies: yocto-queue: 0.1.0 - p-limit@5.0.0: - dependencies: - yocto-queue: 1.2.2 - p-locate@5.0.0: dependencies: p-limit: 3.1.0 @@ -2518,18 +2117,10 @@ snapshots: path-exists@4.0.0: {} - path-is-absolute@1.0.1: {} - path-key@3.1.1: {} - path-key@4.0.0: {} - - pathe@1.1.2: {} - pathe@2.0.3: {} - pathval@1.1.1: {} - picocolors@1.1.1: {} picomatch@4.0.3: {} @@ -2556,18 +2147,10 @@ snapshots: prelude-ls@1.2.1: {} - pretty-format@29.7.0: - dependencies: - '@jest/schemas': 29.6.3 - ansi-styles: 5.2.0 - react-is: 18.3.1 - proxy-from-env@1.1.0: {} punycode@2.3.1: {} - react-is@18.3.1: {} - readdirp@4.1.2: {} resolve-from@4.0.0: {} @@ -2612,8 +2195,6 @@ snapshots: siginfo@2.0.0: {} - signal-exit@4.1.0: {} - source-map-js@1.2.1: {} source-map@0.7.6: {} @@ -2622,14 +2203,8 @@ snapshots: std-env@3.10.0: {} - strip-final-newline@3.0.0: {} - strip-json-comments@3.1.1: {} - strip-literal@2.1.1: - dependencies: - js-tokens: 9.0.1 - sucrase@3.35.1: dependencies: '@jridgewell/gen-mapping': 0.3.13 @@ -2644,12 +2219,6 @@ snapshots: dependencies: has-flag: 4.0.0 - test-exclude@6.0.0: - dependencies: - '@istanbuljs/schema': 0.1.3 - glob: 7.2.3 - minimatch: 3.1.2 - thenify-all@1.6.0: dependencies: thenify: 3.3.1 @@ -2662,14 +2231,14 @@ snapshots: tinyexec@0.3.2: {} + tinyexec@1.0.2: {} + tinyglobby@0.2.15: dependencies: fdir: 6.5.0(picomatch@4.0.3) picomatch: 4.0.3 - tinypool@0.8.4: {} - - tinyspy@2.2.1: {} + tinyrainbow@3.0.3: {} tree-kill@1.2.2: {} @@ -2711,78 +2280,64 @@ snapshots: dependencies: prelude-ls: 1.2.1 - type-detect@4.1.0: {} - typescript@5.9.3: {} ufo@1.6.1: {} - undici-types@6.21.0: {} + undici-types@7.16.0: {} uri-js@4.4.1: dependencies: punycode: 2.3.1 - vite-node@1.6.1(@types/node@20.19.27): + vite@7.3.0(@types/node@25.0.3): dependencies: - cac: 6.7.14 - debug: 4.4.3 - pathe: 1.1.2 - picocolors: 1.1.1 - vite: 5.4.21(@types/node@20.19.27) - transitivePeerDependencies: - - '@types/node' - - less - - lightningcss - - sass - - sass-embedded - - stylus - - sugarss - - supports-color - - terser - - vite@5.4.21(@types/node@20.19.27): - dependencies: - esbuild: 0.21.5 + esbuild: 0.27.2 + fdir: 6.5.0(picomatch@4.0.3) + picomatch: 4.0.3 postcss: 8.5.6 rollup: 4.54.0 + tinyglobby: 0.2.15 optionalDependencies: - '@types/node': 20.19.27 + '@types/node': 25.0.3 fsevents: 2.3.3 - vitest@1.6.1(@types/node@20.19.27): + vitest@4.0.16(@types/node@25.0.3): dependencies: - '@vitest/expect': 1.6.1 - '@vitest/runner': 1.6.1 - '@vitest/snapshot': 1.6.1 - '@vitest/spy': 1.6.1 - '@vitest/utils': 1.6.1 - acorn-walk: 8.3.4 - chai: 4.5.0 - debug: 4.4.3 - execa: 8.0.1 - local-pkg: 0.5.1 + '@vitest/expect': 4.0.16 + '@vitest/mocker': 4.0.16(vite@7.3.0(@types/node@25.0.3)) + '@vitest/pretty-format': 4.0.16 + '@vitest/runner': 4.0.16 + '@vitest/snapshot': 4.0.16 + '@vitest/spy': 4.0.16 + '@vitest/utils': 4.0.16 + es-module-lexer: 1.7.0 + expect-type: 1.3.0 magic-string: 0.30.21 - pathe: 1.1.2 - picocolors: 1.1.1 + obug: 2.1.1 + pathe: 2.0.3 + picomatch: 4.0.3 std-env: 3.10.0 - strip-literal: 2.1.1 tinybench: 2.9.0 - tinypool: 0.8.4 - vite: 5.4.21(@types/node@20.19.27) - vite-node: 1.6.1(@types/node@20.19.27) + tinyexec: 1.0.2 + tinyglobby: 0.2.15 + tinyrainbow: 3.0.3 + vite: 7.3.0(@types/node@25.0.3) why-is-node-running: 2.3.0 optionalDependencies: - '@types/node': 20.19.27 + '@types/node': 25.0.3 transitivePeerDependencies: + - jiti - less - lightningcss + - msw - sass - sass-embedded - stylus - sugarss - - supports-color - terser + - tsx + - yaml which@2.0.2: dependencies: @@ -2795,8 +2350,4 @@ snapshots: word-wrap@1.2.5: {} - wrappy@1.0.2: {} - yocto-queue@0.1.0: {} - - yocto-queue@1.2.2: {} diff --git a/sdks/nodejs-client/pnpm-workspace.yaml b/sdks/nodejs-client/pnpm-workspace.yaml new file mode 100644 index 0000000000..efc037aa84 --- /dev/null +++ b/sdks/nodejs-client/pnpm-workspace.yaml @@ -0,0 +1,2 @@ +onlyBuiltDependencies: + - esbuild diff --git a/web/.env.example b/web/.env.example index b488c31057..c06a4fba87 100644 --- a/web/.env.example +++ b/web/.env.example @@ -73,3 +73,6 @@ NEXT_PUBLIC_MAX_TREE_DEPTH=50 # The API key of amplitude NEXT_PUBLIC_AMPLITUDE_API_KEY= + +# number of concurrency +NEXT_PUBLIC_BATCH_CONCURRENCY=5 diff --git a/web/__mocks__/provider-context.ts b/web/__mocks__/provider-context.ts index 3ecb4e9c0e..c69a2ad1d2 100644 --- a/web/__mocks__/provider-context.ts +++ b/web/__mocks__/provider-context.ts @@ -1,6 +1,6 @@ import type { Plan, UsagePlanInfo } from '@/app/components/billing/type' import type { ProviderContextState } from '@/context/provider-context' -import { merge, noop } from 'lodash-es' +import { merge, noop } from 'es-toolkit/compat' import { defaultPlan } from '@/app/components/billing/config' // Avoid being mocked in tests diff --git a/web/__tests__/check-i18n.test.ts b/web/__tests__/check-i18n.test.ts index a6f86d8107..9f573bda10 100644 --- a/web/__tests__/check-i18n.test.ts +++ b/web/__tests__/check-i18n.test.ts @@ -3,7 +3,7 @@ import path from 'node:path' import vm from 'node:vm' import { transpile } from 'typescript' -describe('check-i18n script functionality', () => { +describe('i18n:check script functionality', () => { const testDir = path.join(__dirname, '../i18n-test') const testEnDir = path.join(testDir, 'en-US') const testZhDir = path.join(testDir, 'zh-Hans') diff --git a/web/__tests__/i18n-upload-features.test.ts b/web/__tests__/i18n-upload-features.test.ts index af418262d6..3d0f1d30dd 100644 --- a/web/__tests__/i18n-upload-features.test.ts +++ b/web/__tests__/i18n-upload-features.test.ts @@ -16,7 +16,7 @@ const getSupportedLocales = (): string[] => { // Helper function to load translation file content const loadTranslationContent = (locale: string): string => { - const filePath = path.join(I18N_DIR, locale, 'app-debug.ts') + const filePath = path.join(I18N_DIR, locale, 'app-debug.json') if (!fs.existsSync(filePath)) throw new Error(`Translation file not found: ${filePath}`) @@ -24,14 +24,14 @@ const loadTranslationContent = (locale: string): string => { return fs.readFileSync(filePath, 'utf-8') } -// Helper function to check if upload features exist +// Helper function to check if upload features exist (supports flattened JSON) const hasUploadFeatures = (content: string): { [key: string]: boolean } => { return { - fileUpload: /fileUpload\s*:\s*\{/.test(content), - imageUpload: /imageUpload\s*:\s*\{/.test(content), - documentUpload: /documentUpload\s*:\s*\{/.test(content), - audioUpload: /audioUpload\s*:\s*\{/.test(content), - featureBar: /bar\s*:\s*\{/.test(content), + fileUpload: /"feature\.fileUpload\.title"/.test(content), + imageUpload: /"feature\.imageUpload\.title"/.test(content), + documentUpload: /"feature\.documentUpload\.title"/.test(content), + audioUpload: /"feature\.audioUpload\.title"/.test(content), + featureBar: /"feature\.bar\.empty"/.test(content), } } @@ -45,7 +45,7 @@ describe('Upload Features i18n Translations - Issue #23062', () => { it('all locales should have translation files', () => { supportedLocales.forEach((locale) => { - const filePath = path.join(I18N_DIR, locale, 'app-debug.ts') + const filePath = path.join(I18N_DIR, locale, 'app-debug.json') expect(fs.existsSync(filePath)).toBe(true) }) }) @@ -76,12 +76,9 @@ describe('Upload Features i18n Translations - Issue #23062', () => { previouslyMissingLocales.forEach((locale) => { const content = loadTranslationContent(locale) - // Verify audioUpload exists - expect(/audioUpload\s*:\s*\{/.test(content)).toBe(true) - - // Verify it has title and description - expect(/audioUpload[^}]*title\s*:/.test(content)).toBe(true) - expect(/audioUpload[^}]*description\s*:/.test(content)).toBe(true) + // Verify audioUpload exists with title and description (flattened JSON format) + expect(/"feature\.audioUpload\.title"/.test(content)).toBe(true) + expect(/"feature\.audioUpload\.description"/.test(content)).toBe(true) console.log(`āœ… ${locale} - Issue #23062 resolved: audioUpload feature present`) }) @@ -91,28 +88,28 @@ describe('Upload Features i18n Translations - Issue #23062', () => { supportedLocales.forEach((locale) => { const content = loadTranslationContent(locale) - // Check fileUpload has required properties - if (/fileUpload\s*:\s*\{/.test(content)) { - expect(/fileUpload[^}]*title\s*:/.test(content)).toBe(true) - expect(/fileUpload[^}]*description\s*:/.test(content)).toBe(true) + // Check fileUpload has required properties (flattened JSON format) + if (/"feature\.fileUpload\.title"/.test(content)) { + expect(/"feature\.fileUpload\.title"/.test(content)).toBe(true) + expect(/"feature\.fileUpload\.description"/.test(content)).toBe(true) } // Check imageUpload has required properties - if (/imageUpload\s*:\s*\{/.test(content)) { - expect(/imageUpload[^}]*title\s*:/.test(content)).toBe(true) - expect(/imageUpload[^}]*description\s*:/.test(content)).toBe(true) + if (/"feature\.imageUpload\.title"/.test(content)) { + expect(/"feature\.imageUpload\.title"/.test(content)).toBe(true) + expect(/"feature\.imageUpload\.description"/.test(content)).toBe(true) } // Check documentUpload has required properties - if (/documentUpload\s*:\s*\{/.test(content)) { - expect(/documentUpload[^}]*title\s*:/.test(content)).toBe(true) - expect(/documentUpload[^}]*description\s*:/.test(content)).toBe(true) + if (/"feature\.documentUpload\.title"/.test(content)) { + expect(/"feature\.documentUpload\.title"/.test(content)).toBe(true) + expect(/"feature\.documentUpload\.description"/.test(content)).toBe(true) } // Check audioUpload has required properties - if (/audioUpload\s*:\s*\{/.test(content)) { - expect(/audioUpload[^}]*title\s*:/.test(content)).toBe(true) - expect(/audioUpload[^}]*description\s*:/.test(content)).toBe(true) + if (/"feature\.audioUpload\.title"/.test(content)) { + expect(/"feature\.audioUpload\.title"/.test(content)).toBe(true) + expect(/"feature\.audioUpload\.description"/.test(content)).toBe(true) } }) }) diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/layout-main.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/layout-main.tsx index c1feb3ea5d..470f4477fa 100644 --- a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/layout-main.tsx +++ b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/layout-main.tsx @@ -70,7 +70,7 @@ const AppDetailLayout: FC = (props) => { const navConfig = [ ...(isCurrentWorkspaceEditor ? [{ - name: t('common.appMenus.promptEng'), + name: t('appMenus.promptEng', { ns: 'common' }), href: `/app/${appId}/${(mode === AppModeEnum.WORKFLOW || mode === AppModeEnum.ADVANCED_CHAT) ? 'workflow' : 'configuration'}`, icon: RiTerminalWindowLine, selectedIcon: RiTerminalWindowFill, @@ -78,7 +78,7 @@ const AppDetailLayout: FC = (props) => { : [] ), { - name: t('common.appMenus.apiAccess'), + name: t('appMenus.apiAccess', { ns: 'common' }), href: `/app/${appId}/develop`, icon: RiTerminalBoxLine, selectedIcon: RiTerminalBoxFill, @@ -86,8 +86,8 @@ const AppDetailLayout: FC = (props) => { ...(isCurrentWorkspaceEditor ? [{ name: mode !== AppModeEnum.WORKFLOW - ? t('common.appMenus.logAndAnn') - : t('common.appMenus.logs'), + ? t('appMenus.logAndAnn', { ns: 'common' }) + : t('appMenus.logs', { ns: 'common' }), href: `/app/${appId}/logs`, icon: RiFileList3Line, selectedIcon: RiFileList3Fill, @@ -95,7 +95,7 @@ const AppDetailLayout: FC = (props) => { : [] ), { - name: t('common.appMenus.overview'), + name: t('appMenus.overview', { ns: 'common' }), href: `/app/${appId}/overview`, icon: RiDashboard2Line, selectedIcon: RiDashboard2Fill, @@ -104,7 +104,7 @@ const AppDetailLayout: FC = (props) => { return navConfig }, [t]) - useDocumentTitle(appDetail?.name || t('common.menus.appDetail')) + useDocumentTitle(appDetail?.name || t('menus.appDetail', { ns: 'common' })) useEffect(() => { if (appDetail) { diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/card-view.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/card-view.tsx index e9877f1715..939e4e9fe6 100644 --- a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/card-view.tsx +++ b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/card-view.tsx @@ -4,6 +4,7 @@ import type { IAppCardProps } from '@/app/components/app/overview/app-card' import type { BlockEnum } from '@/app/components/workflow/types' import type { UpdateAppSiteCodeResponse } from '@/models/app' import type { App } from '@/types/app' +import type { I18nKeysByPrefix } from '@/types/i18n' import * as React from 'react' import { useCallback, useMemo } from 'react' import { useTranslation } from 'react-i18next' @@ -62,7 +63,7 @@ const CardView: FC = ({ appId, isInPanel, className }) => { const buildTriggerModeMessage = useCallback((featureName: string) => (
- {t('appOverview.overview.disableTooltip.triggerMode', { feature: featureName })} + {t('overview.disableTooltip.triggerMode', { ns: 'appOverview', feature: featureName })}
= ({ appId, isInPanel, className }) => { window.open(triggerDocUrl, '_blank') }} > - {t('appOverview.overview.appInfo.enableTooltip.learnMore')} + {t('overview.appInfo.enableTooltip.learnMore', { ns: 'appOverview' })}
), [t, triggerDocUrl]) const disableWebAppTooltip = disableAppCards - ? buildTriggerModeMessage(t('appOverview.overview.appInfo.title')) + ? buildTriggerModeMessage(t('overview.appInfo.title', { ns: 'appOverview' })) : null const disableApiTooltip = disableAppCards - ? buildTriggerModeMessage(t('appOverview.overview.apiInfo.title')) + ? buildTriggerModeMessage(t('overview.apiInfo.title', { ns: 'appOverview' })) : null const disableMcpTooltip = disableAppCards - ? buildTriggerModeMessage(t('tools.mcp.server.title')) + ? buildTriggerModeMessage(t('mcp.server.title', { ns: 'tools' })) : null const updateAppDetail = async () => { @@ -94,7 +95,7 @@ const CardView: FC = ({ appId, isInPanel, className }) => { catch (error) { console.error(error) } } - const handleCallbackResult = (err: Error | null, message?: string) => { + const handleCallbackResult = (err: Error | null, message?: I18nKeysByPrefix<'common', 'actionMsg.'>) => { const type = err ? 'error' : 'success' message ||= (type === 'success' ? 'modifiedSuccessfully' : 'modifiedUnsuccessfully') @@ -104,7 +105,7 @@ const CardView: FC = ({ appId, isInPanel, className }) => { notify({ type, - message: t(`common.actionMsg.${message}`), + message: t(`actionMsg.${message}`, { ns: 'common' }) as string, }) } diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/chart-view.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/chart-view.tsx index e1fca90c5d..b6e902f456 100644 --- a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/chart-view.tsx +++ b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/chart-view.tsx @@ -1,5 +1,6 @@ 'use client' import type { PeriodParams } from '@/app/components/app/overview/app-chart' +import type { I18nKeysByPrefix } from '@/types/i18n' import dayjs from 'dayjs' import quarterOfYear from 'dayjs/plugin/quarterOfYear' import * as React from 'react' @@ -16,7 +17,9 @@ dayjs.extend(quarterOfYear) const today = dayjs() -const TIME_PERIOD_MAPPING = [ +type TimePeriodName = I18nKeysByPrefix<'appLog', 'filter.period.'> + +const TIME_PERIOD_MAPPING: { value: number, name: TimePeriodName }[] = [ { value: 0, name: 'today' }, { value: 7, name: 'last7days' }, { value: 30, name: 'last30days' }, @@ -35,8 +38,8 @@ export default function ChartView({ appId, headerRight }: IChartViewProps) { const isChatApp = appDetail?.mode !== 'completion' && appDetail?.mode !== 'workflow' const isWorkflow = appDetail?.mode === 'workflow' const [period, setPeriod] = useState(IS_CLOUD_EDITION - ? { name: t('appLog.filter.period.today'), query: { start: today.startOf('day').format(queryDateFormat), end: today.endOf('day').format(queryDateFormat) } } - : { name: t('appLog.filter.period.last7days'), query: { start: today.subtract(7, 'day').startOf('day').format(queryDateFormat), end: today.endOf('day').format(queryDateFormat) } }, + ? { name: t('filter.period.today', { ns: 'appLog' }), query: { start: today.startOf('day').format(queryDateFormat), end: today.endOf('day').format(queryDateFormat) } } + : { name: t('filter.period.last7days', { ns: 'appLog' }), query: { start: today.subtract(7, 'day').startOf('day').format(queryDateFormat), end: today.endOf('day').format(queryDateFormat) } }, ) if (!appDetail) @@ -45,7 +48,7 @@ export default function ChartView({ appId, headerRight }: IChartViewProps) { return (
-
{t('common.appMenus.overview')}
+
{t('appMenus.overview', { ns: 'common' })}
{IS_CLOUD_EDITION ? ( diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/long-time-range-picker.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/long-time-range-picker.tsx index 557b723259..f7178d7ac2 100644 --- a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/long-time-range-picker.tsx +++ b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/long-time-range-picker.tsx @@ -2,13 +2,16 @@ import type { FC } from 'react' import type { PeriodParams } from '@/app/components/app/overview/app-chart' import type { Item } from '@/app/components/base/select' +import type { I18nKeysByPrefix } from '@/types/i18n' import dayjs from 'dayjs' import * as React from 'react' import { useTranslation } from 'react-i18next' import { SimpleSelect } from '@/app/components/base/select' +type TimePeriodName = I18nKeysByPrefix<'appLog', 'filter.period.'> + type Props = { - periodMapping: { [key: string]: { value: number, name: string } } + periodMapping: { [key: string]: { value: number, name: TimePeriodName } } onSelect: (payload: PeriodParams) => void queryDateFormat: string } @@ -25,9 +28,9 @@ const LongTimeRangePicker: FC = ({ const handleSelect = React.useCallback((item: Item) => { const id = item.value const value = periodMapping[id]?.value ?? '-1' - const name = item.name || t('appLog.filter.period.allTime') + const name = item.name || t('filter.period.allTime', { ns: 'appLog' }) if (value === -1) { - onSelect({ name: t('appLog.filter.period.allTime'), query: undefined }) + onSelect({ name: t('filter.period.allTime', { ns: 'appLog' }), query: undefined }) } else if (value === 0) { const startOfToday = today.startOf('day').format(queryDateFormat) @@ -53,7 +56,7 @@ const LongTimeRangePicker: FC = ({ return ( ({ value: k, name: t(`appLog.filter.period.${v.name}`) }))} + items={Object.entries(periodMapping).map(([k, v]) => ({ value: k, name: t(`filter.period.${v.name}`, { ns: 'appLog' }) }))} className="mt-0 !w-40" notClearable={true} onSelect={handleSelect} diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/time-range-picker/date-picker.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/time-range-picker/date-picker.tsx index ab39846a36..004f83afc5 100644 --- a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/time-range-picker/date-picker.tsx +++ b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/time-range-picker/date-picker.tsx @@ -4,7 +4,7 @@ import type { FC } from 'react' import type { TriggerProps } from '@/app/components/base/date-and-time-picker/types' import { RiCalendarLine } from '@remixicon/react' import dayjs from 'dayjs' -import { noop } from 'lodash-es' +import { noop } from 'es-toolkit/compat' import * as React from 'react' import { useCallback } from 'react' import Picker from '@/app/components/base/date-and-time-picker/date-picker' diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/time-range-picker/index.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/time-range-picker/index.tsx index 469bc97737..10209de97b 100644 --- a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/time-range-picker/index.tsx +++ b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/time-range-picker/index.tsx @@ -2,6 +2,7 @@ import type { Dayjs } from 'dayjs' import type { FC } from 'react' import type { PeriodParams, PeriodParamsWithTimeRange } from '@/app/components/app/overview/app-chart' +import type { I18nKeysByPrefix } from '@/types/i18n' import dayjs from 'dayjs' import * as React from 'react' import { useCallback, useState } from 'react' @@ -13,8 +14,10 @@ import RangeSelector from './range-selector' const today = dayjs() +type TimePeriodName = I18nKeysByPrefix<'appLog', 'filter.period.'> + type Props = { - ranges: { value: number, name: string }[] + ranges: { value: number, name: TimePeriodName }[] onSelect: (payload: PeriodParams) => void queryDateFormat: string } diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/time-range-picker/range-selector.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/time-range-picker/range-selector.tsx index be7181c759..986170728f 100644 --- a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/time-range-picker/range-selector.tsx +++ b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/time-range-picker/range-selector.tsx @@ -2,6 +2,7 @@ import type { FC } from 'react' import type { PeriodParamsWithTimeRange, TimeRange } from '@/app/components/app/overview/app-chart' import type { Item } from '@/app/components/base/select' +import type { I18nKeysByPrefix } from '@/types/i18n' import { RiArrowDownSLine, RiCheckLine } from '@remixicon/react' import dayjs from 'dayjs' import * as React from 'react' @@ -12,9 +13,11 @@ import { cn } from '@/utils/classnames' const today = dayjs() +type TimePeriodName = I18nKeysByPrefix<'appLog', 'filter.period.'> + type Props = { isCustomRange: boolean - ranges: { value: number, name: string }[] + ranges: { value: number, name: TimePeriodName }[] onSelect: (payload: PeriodParamsWithTimeRange) => void } @@ -42,7 +45,7 @@ const RangeSelector: FC = ({ const renderTrigger = useCallback((item: Item | null, isOpen: boolean) => { return (
-
{isCustomRange ? t('appLog.filter.period.custom') : item?.name}
+
{isCustomRange ? t('filter.period.custom', { ns: 'appLog' }) : item?.name}
) @@ -66,7 +69,7 @@ const RangeSelector: FC = ({ }, []) return ( ({ ...v, name: t(`appLog.filter.period.${v.name}`) }))} + items={ranges.map(v => ({ ...v, name: t(`filter.period.${v.name}`, { ns: 'appLog' }) }))} className="mt-0 !w-40" notClearable={true} onSelect={handleSelectRange} diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/config-popup.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/config-popup.tsx index 35ab8a61ec..4469459b52 100644 --- a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/config-popup.tsx +++ b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/config-popup.tsx @@ -15,7 +15,7 @@ import ProviderPanel from './provider-panel' import TracingIcon from './tracing-icon' import { TracingProvider } from './type' -const I18N_PREFIX = 'app.tracing' +const I18N_PREFIX = 'tracing' export type PopupProps = { appId: string @@ -327,19 +327,19 @@ const ConfigPopup: FC = ({
-
{t(`${I18N_PREFIX}.tracing`)}
+
{t(`${I18N_PREFIX}.tracing`, { ns: 'app' })}
- {t(`${I18N_PREFIX}.${enabled ? 'enabled' : 'disabled'}`)} + {t(`${I18N_PREFIX}.${enabled ? 'enabled' : 'disabled'}`, { ns: 'app' })}
{!readOnly && ( <> {providerAllNotConfigured ? ( {switchContent} @@ -351,14 +351,14 @@ const ConfigPopup: FC = ({
- {t(`${I18N_PREFIX}.tracingDescription`)} + {t(`${I18N_PREFIX}.tracingDescription`, { ns: 'app' })}
{(providerAllConfigured || providerAllNotConfigured) ? ( <> -
{t(`${I18N_PREFIX}.configProviderTitle.${providerAllConfigured ? 'configured' : 'notConfigured'}`)}
+
{t(`${I18N_PREFIX}.configProviderTitle.${providerAllConfigured ? 'configured' : 'notConfigured'}`, { ns: 'app' })}
{langfusePanel} {langSmithPanel} @@ -375,11 +375,11 @@ const ConfigPopup: FC = ({ ) : ( <> -
{t(`${I18N_PREFIX}.configProviderTitle.configured`)}
+
{t(`${I18N_PREFIX}.configProviderTitle.configured`, { ns: 'app' })}
{configuredProviderPanel()}
-
{t(`${I18N_PREFIX}.configProviderTitle.moreProvider`)}
+
{t(`${I18N_PREFIX}.configProviderTitle.moreProvider`, { ns: 'app' })}
{moreProviderPanel()}
diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/panel.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/panel.tsx index 438dbddfb8..5e7d98d191 100644 --- a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/panel.tsx +++ b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/panel.tsx @@ -23,7 +23,7 @@ import ConfigButton from './config-button' import TracingIcon from './tracing-icon' import { TracingProvider } from './type' -const I18N_PREFIX = 'app.tracing' +const I18N_PREFIX = 'tracing' const Panel: FC = () => { const { t } = useTranslation() @@ -45,7 +45,7 @@ const Panel: FC = () => { if (!noToast) { Toast.notify({ type: 'success', - message: t('common.api.success'), + message: t('api.success', { ns: 'common' }), }) } } @@ -254,7 +254,7 @@ const Panel: FC = () => { )} > -
{t(`${I18N_PREFIX}.title`)}
+
{t(`${I18N_PREFIX}.title`, { ns: 'app' })}
@@ -295,7 +295,7 @@ const Panel: FC = () => {
- {t(`${I18N_PREFIX}.${enabled ? 'enabled' : 'disabled'}`)} + {t(`${I18N_PREFIX}.${enabled ? 'enabled' : 'disabled'}`, { ns: 'app' })}
{InUseProviderIcon && } diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/provider-config-modal.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/provider-config-modal.tsx index 0ef97e6970..ff78712c3c 100644 --- a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/provider-config-modal.tsx +++ b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/provider-config-modal.tsx @@ -30,7 +30,7 @@ type Props = { onChosen: (provider: TracingProvider) => void } -const I18N_PREFIX = 'app.tracing.configProvider' +const I18N_PREFIX = 'tracing.configProvider' const arizeConfigTemplate = { api_key: '', @@ -157,7 +157,7 @@ const ProviderConfigModal: FC = ({ }) Toast.notify({ type: 'success', - message: t('common.api.remove'), + message: t('api.remove', { ns: 'common' }), }) onRemoved() hideRemoveConfirm() @@ -177,37 +177,37 @@ const ProviderConfigModal: FC = ({ if (type === TracingProvider.arize) { const postData = config as ArizeConfig if (!postData.api_key) - errorMessage = t('common.errorMsg.fieldRequired', { field: 'API Key' }) + errorMessage = t('errorMsg.fieldRequired', { ns: 'common', field: 'API Key' }) if (!postData.space_id) - errorMessage = t('common.errorMsg.fieldRequired', { field: 'Space ID' }) + errorMessage = t('errorMsg.fieldRequired', { ns: 'common', field: 'Space ID' }) if (!errorMessage && !postData.project) - errorMessage = t('common.errorMsg.fieldRequired', { field: t(`${I18N_PREFIX}.project`) }) + errorMessage = t('errorMsg.fieldRequired', { ns: 'common', field: t(`${I18N_PREFIX}.project`, { ns: 'app' }) }) } if (type === TracingProvider.phoenix) { const postData = config as PhoenixConfig if (!postData.api_key) - errorMessage = t('common.errorMsg.fieldRequired', { field: 'API Key' }) + errorMessage = t('errorMsg.fieldRequired', { ns: 'common', field: 'API Key' }) if (!errorMessage && !postData.project) - errorMessage = t('common.errorMsg.fieldRequired', { field: t(`${I18N_PREFIX}.project`) }) + errorMessage = t('errorMsg.fieldRequired', { ns: 'common', field: t(`${I18N_PREFIX}.project`, { ns: 'app' }) }) } if (type === TracingProvider.langSmith) { const postData = config as LangSmithConfig if (!postData.api_key) - errorMessage = t('common.errorMsg.fieldRequired', { field: 'API Key' }) + errorMessage = t('errorMsg.fieldRequired', { ns: 'common', field: 'API Key' }) if (!errorMessage && !postData.project) - errorMessage = t('common.errorMsg.fieldRequired', { field: t(`${I18N_PREFIX}.project`) }) + errorMessage = t('errorMsg.fieldRequired', { ns: 'common', field: t(`${I18N_PREFIX}.project`, { ns: 'app' }) }) } if (type === TracingProvider.langfuse) { const postData = config as LangFuseConfig if (!errorMessage && !postData.secret_key) - errorMessage = t('common.errorMsg.fieldRequired', { field: t(`${I18N_PREFIX}.secretKey`) }) + errorMessage = t('errorMsg.fieldRequired', { ns: 'common', field: t(`${I18N_PREFIX}.secretKey`, { ns: 'app' }) }) if (!errorMessage && !postData.public_key) - errorMessage = t('common.errorMsg.fieldRequired', { field: t(`${I18N_PREFIX}.publicKey`) }) + errorMessage = t('errorMsg.fieldRequired', { ns: 'common', field: t(`${I18N_PREFIX}.publicKey`, { ns: 'app' }) }) if (!errorMessage && !postData.host) - errorMessage = t('common.errorMsg.fieldRequired', { field: 'Host' }) + errorMessage = t('errorMsg.fieldRequired', { ns: 'common', field: 'Host' }) } if (type === TracingProvider.opik) { @@ -218,43 +218,43 @@ const ProviderConfigModal: FC = ({ if (type === TracingProvider.weave) { const postData = config as WeaveConfig if (!errorMessage && !postData.api_key) - errorMessage = t('common.errorMsg.fieldRequired', { field: 'API Key' }) + errorMessage = t('errorMsg.fieldRequired', { ns: 'common', field: 'API Key' }) if (!errorMessage && !postData.project) - errorMessage = t('common.errorMsg.fieldRequired', { field: t(`${I18N_PREFIX}.project`) }) + errorMessage = t('errorMsg.fieldRequired', { ns: 'common', field: t(`${I18N_PREFIX}.project`, { ns: 'app' }) }) } if (type === TracingProvider.aliyun) { const postData = config as AliyunConfig if (!errorMessage && !postData.app_name) - errorMessage = t('common.errorMsg.fieldRequired', { field: 'App Name' }) + errorMessage = t('errorMsg.fieldRequired', { ns: 'common', field: 'App Name' }) if (!errorMessage && !postData.license_key) - errorMessage = t('common.errorMsg.fieldRequired', { field: 'License Key' }) + errorMessage = t('errorMsg.fieldRequired', { ns: 'common', field: 'License Key' }) if (!errorMessage && !postData.endpoint) - errorMessage = t('common.errorMsg.fieldRequired', { field: 'Endpoint' }) + errorMessage = t('errorMsg.fieldRequired', { ns: 'common', field: 'Endpoint' }) } if (type === TracingProvider.mlflow) { const postData = config as MLflowConfig if (!errorMessage && !postData.tracking_uri) - errorMessage = t('common.errorMsg.fieldRequired', { field: 'Tracking URI' }) + errorMessage = t('errorMsg.fieldRequired', { ns: 'common', field: 'Tracking URI' }) } if (type === TracingProvider.databricks) { const postData = config as DatabricksConfig if (!errorMessage && !postData.experiment_id) - errorMessage = t('common.errorMsg.fieldRequired', { field: 'Experiment ID' }) + errorMessage = t('errorMsg.fieldRequired', { ns: 'common', field: 'Experiment ID' }) if (!errorMessage && !postData.host) - errorMessage = t('common.errorMsg.fieldRequired', { field: 'Host' }) + errorMessage = t('errorMsg.fieldRequired', { ns: 'common', field: 'Host' }) } if (type === TracingProvider.tencent) { const postData = config as TencentConfig if (!errorMessage && !postData.token) - errorMessage = t('common.errorMsg.fieldRequired', { field: 'Token' }) + errorMessage = t('errorMsg.fieldRequired', { ns: 'common', field: 'Token' }) if (!errorMessage && !postData.endpoint) - errorMessage = t('common.errorMsg.fieldRequired', { field: 'Endpoint' }) + errorMessage = t('errorMsg.fieldRequired', { ns: 'common', field: 'Endpoint' }) if (!errorMessage && !postData.service_name) - errorMessage = t('common.errorMsg.fieldRequired', { field: 'Service Name' }) + errorMessage = t('errorMsg.fieldRequired', { ns: 'common', field: 'Service Name' }) } return errorMessage @@ -281,7 +281,7 @@ const ProviderConfigModal: FC = ({ }) Toast.notify({ type: 'success', - message: t('common.api.success'), + message: t('api.success', { ns: 'common' }), }) onSaved(config) if (isAdd) @@ -303,8 +303,8 @@ const ProviderConfigModal: FC = ({
- {t(`${I18N_PREFIX}.title`)} - {t(`app.tracing.${type}.title`)} + {t(`${I18N_PREFIX}.title`, { ns: 'app' })} + {t(`tracing.${type}.title`, { ns: 'app' })}
@@ -317,7 +317,7 @@ const ProviderConfigModal: FC = ({ isRequired value={(config as ArizeConfig).api_key} onChange={handleConfigChange('api_key')} - placeholder={t(`${I18N_PREFIX}.placeholder`, { key: 'API Key' })!} + placeholder={t(`${I18N_PREFIX}.placeholder`, { ns: 'app', key: 'API Key' })!} /> = ({ isRequired value={(config as ArizeConfig).space_id} onChange={handleConfigChange('space_id')} - placeholder={t(`${I18N_PREFIX}.placeholder`, { key: 'Space ID' })!} + placeholder={t(`${I18N_PREFIX}.placeholder`, { ns: 'app', key: 'Space ID' })!} /> = ({ isRequired value={(config as PhoenixConfig).api_key} onChange={handleConfigChange('api_key')} - placeholder={t(`${I18N_PREFIX}.placeholder`, { key: 'API Key' })!} + placeholder={t(`${I18N_PREFIX}.placeholder`, { ns: 'app', key: 'API Key' })!} /> = ({ isRequired value={(config as AliyunConfig).license_key} onChange={handleConfigChange('license_key')} - placeholder={t(`${I18N_PREFIX}.placeholder`, { key: 'License Key' })!} + placeholder={t(`${I18N_PREFIX}.placeholder`, { ns: 'app', key: 'License Key' })!} /> = ({ isRequired value={(config as TencentConfig).token} onChange={handleConfigChange('token')} - placeholder={t(`${I18N_PREFIX}.placeholder`, { key: 'Token' })!} + placeholder={t(`${I18N_PREFIX}.placeholder`, { ns: 'app', key: 'Token' })!} /> = ({ isRequired value={(config as WeaveConfig).api_key} onChange={handleConfigChange('api_key')} - placeholder={t(`${I18N_PREFIX}.placeholder`, { key: 'API Key' })!} + placeholder={t(`${I18N_PREFIX}.placeholder`, { ns: 'app', key: 'API Key' })!} /> = ({ isRequired value={(config as LangSmithConfig).api_key} onChange={handleConfigChange('api_key')} - placeholder={t(`${I18N_PREFIX}.placeholder`, { key: 'API Key' })!} + placeholder={t(`${I18N_PREFIX}.placeholder`, { ns: 'app', key: 'API Key' })!} /> = ({ {type === TracingProvider.langfuse && ( <> = ({ labelClassName="!text-sm" value={(config as OpikConfig).api_key} onChange={handleConfigChange('api_key')} - placeholder={t(`${I18N_PREFIX}.placeholder`, { key: 'API Key' })!} + placeholder={t(`${I18N_PREFIX}.placeholder`, { ns: 'app', key: 'API Key' })!} /> = ({ {type === TracingProvider.mlflow && ( <> = ({ placeholder="http://localhost:5000" /> )} {type === TracingProvider.databricks && ( <> )} @@ -634,7 +634,7 @@ const ProviderConfigModal: FC = ({ target="_blank" href={docURL[type]} > - {t(`${I18N_PREFIX}.viewDocsLink`, { key: t(`app.tracing.${type}.title`) })} + {t(`${I18N_PREFIX}.viewDocsLink`, { ns: 'app', key: t(`tracing.${type}.title`, { ns: 'app' }) })}
@@ -644,7 +644,7 @@ const ProviderConfigModal: FC = ({ className="h-9 text-sm font-medium text-text-secondary" onClick={showRemoveConfirm} > - {t('common.operation.remove')} + {t('operation.remove', { ns: 'common' })} @@ -653,7 +653,7 @@ const ProviderConfigModal: FC = ({ className="mr-2 h-9 text-sm font-medium text-text-secondary" onClick={onCancel} > - {t('common.operation.cancel')} + {t('operation.cancel', { ns: 'common' })}
@@ -670,7 +670,7 @@ const ProviderConfigModal: FC = ({
- {t('common.modelProvider.encrypted.front')} + {t('modelProvider.encrypted.front', { ns: 'common' })} = ({ > PKCS1_OAEP - {t('common.modelProvider.encrypted.back')} + {t('modelProvider.encrypted.back', { ns: 'common' })}
@@ -691,8 +691,8 @@ const ProviderConfigModal: FC = ({ diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/provider-panel.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/provider-panel.tsx index 6c66b19ad3..dc0fe2fbbc 100644 --- a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/provider-panel.tsx +++ b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/provider-panel.tsx @@ -11,7 +11,7 @@ import { Eye as View } from '@/app/components/base/icons/src/vender/solid/genera import { cn } from '@/utils/classnames' import { TracingProvider } from './type' -const I18N_PREFIX = 'app.tracing' +const I18N_PREFIX = 'tracing' type Props = { type: TracingProvider @@ -82,14 +82,14 @@ const ProviderPanel: FC = ({
- {isChosen &&
{t(`${I18N_PREFIX}.inUse`)}
} + {isChosen &&
{t(`${I18N_PREFIX}.inUse`, { ns: 'app' })}
}
{!readOnly && (
{hasConfigured && (
-
{t(`${I18N_PREFIX}.view`)}
+
{t(`${I18N_PREFIX}.view`, { ns: 'app' })}
)}
= ({ onClick={handleConfigBtnClick} > -
{t(`${I18N_PREFIX}.config`)}
+
{t(`${I18N_PREFIX}.config`, { ns: 'app' })}
)}
- {t(`${I18N_PREFIX}.${type}.description`)} + {t(`${I18N_PREFIX}.${type}.description`, { ns: 'app' })}
) diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/layout.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/layout.tsx index 4135482dd9..a918ae2786 100644 --- a/web/app/(commonLayout)/app/(appDetailLayout)/layout.tsx +++ b/web/app/(commonLayout)/app/(appDetailLayout)/layout.tsx @@ -15,7 +15,7 @@ const AppDetail: FC = ({ children }) => { const router = useRouter() const { isCurrentWorkspaceDatasetOperator } = useAppContext() const { t } = useTranslation() - useDocumentTitle(t('common.menus.appDetail')) + useDocumentTitle(t('menus.appDetail', { ns: 'common' })) useEffect(() => { if (isCurrentWorkspaceDatasetOperator) diff --git a/web/app/(commonLayout)/datasets/(datasetDetailLayout)/[datasetId]/layout-main.tsx b/web/app/(commonLayout)/datasets/(datasetDetailLayout)/[datasetId]/layout-main.tsx index 10a12d75e1..1c5434924f 100644 --- a/web/app/(commonLayout)/datasets/(datasetDetailLayout)/[datasetId]/layout-main.tsx +++ b/web/app/(commonLayout)/datasets/(datasetDetailLayout)/[datasetId]/layout-main.tsx @@ -70,14 +70,14 @@ const DatasetDetailLayout: FC = (props) => { const navigation = useMemo(() => { const baseNavigation = [ { - name: t('common.datasetMenus.hitTesting'), + name: t('datasetMenus.hitTesting', { ns: 'common' }), href: `/datasets/${datasetId}/hitTesting`, icon: RiFocus2Line, selectedIcon: RiFocus2Fill, disabled: isButtonDisabledWithPipeline, }, { - name: t('common.datasetMenus.settings'), + name: t('datasetMenus.settings', { ns: 'common' }), href: `/datasets/${datasetId}/settings`, icon: RiEqualizer2Line, selectedIcon: RiEqualizer2Fill, @@ -87,14 +87,14 @@ const DatasetDetailLayout: FC = (props) => { if (datasetRes?.provider !== 'external') { baseNavigation.unshift({ - name: t('common.datasetMenus.pipeline'), + name: t('datasetMenus.pipeline', { ns: 'common' }), href: `/datasets/${datasetId}/pipeline`, icon: PipelineLine as RemixiconComponentType, selectedIcon: PipelineFill as RemixiconComponentType, disabled: false, }) baseNavigation.unshift({ - name: t('common.datasetMenus.documents'), + name: t('datasetMenus.documents', { ns: 'common' }), href: `/datasets/${datasetId}/documents`, icon: RiFileTextLine, selectedIcon: RiFileTextFill, @@ -105,7 +105,7 @@ const DatasetDetailLayout: FC = (props) => { return baseNavigation }, [t, datasetId, isButtonDisabledWithPipeline, datasetRes?.provider]) - useDocumentTitle(datasetRes?.name || t('common.menus.datasets')) + useDocumentTitle(datasetRes?.name || t('menus.datasets', { ns: 'common' })) const setAppSidebarExpand = useStore(state => state.setAppSidebarExpand) diff --git a/web/app/(commonLayout)/datasets/(datasetDetailLayout)/[datasetId]/settings/page.tsx b/web/app/(commonLayout)/datasets/(datasetDetailLayout)/[datasetId]/settings/page.tsx index 9dfeaef528..8080b565cd 100644 --- a/web/app/(commonLayout)/datasets/(datasetDetailLayout)/[datasetId]/settings/page.tsx +++ b/web/app/(commonLayout)/datasets/(datasetDetailLayout)/[datasetId]/settings/page.tsx @@ -1,10 +1,11 @@ +/* eslint-disable dify-i18n/require-ns-option */ import * as React from 'react' import Form from '@/app/components/datasets/settings/form' -import { getLocaleOnServer, useTranslation as translate } from '@/i18n-config/server' +import { getLocaleOnServer, getTranslation } from '@/i18n-config/server' const Settings = async () => { const locale = await getLocaleOnServer() - const { t } = await translate(locale, 'dataset-settings') + const { t } = await getTranslation(locale, 'dataset-settings') return (
diff --git a/web/app/(commonLayout)/explore/layout.tsx b/web/app/(commonLayout)/explore/layout.tsx index 5928308cdc..7d59d397f9 100644 --- a/web/app/(commonLayout)/explore/layout.tsx +++ b/web/app/(commonLayout)/explore/layout.tsx @@ -7,7 +7,7 @@ import useDocumentTitle from '@/hooks/use-document-title' const ExploreLayout: FC = ({ children }) => { const { t } = useTranslation() - useDocumentTitle(t('common.menus.explore')) + useDocumentTitle(t('menus.explore', { ns: 'common' })) return ( {children} diff --git a/web/app/(commonLayout)/layout.tsx b/web/app/(commonLayout)/layout.tsx index 91bdb3f99a..a0ccde957d 100644 --- a/web/app/(commonLayout)/layout.tsx +++ b/web/app/(commonLayout)/layout.tsx @@ -1,5 +1,6 @@ import type { ReactNode } from 'react' import * as React from 'react' +import { AppInitializer } from '@/app/components/app-initializer' import AmplitudeProvider from '@/app/components/base/amplitude' import GA, { GaType } from '@/app/components/base/ga' import Zendesk from '@/app/components/base/zendesk' @@ -7,7 +8,6 @@ import GotoAnything from '@/app/components/goto-anything' import Header from '@/app/components/header' import HeaderWrapper from '@/app/components/header/header-wrapper' import ReadmePanel from '@/app/components/plugins/readme-panel' -import SwrInitializer from '@/app/components/swr-initializer' import { AppContextProvider } from '@/context/app-context' import { EventEmitterContextProvider } from '@/context/event-emitter' import { ModalContextProvider } from '@/context/modal-context' @@ -20,7 +20,7 @@ const Layout = ({ children }: { children: ReactNode }) => { <> - + @@ -38,7 +38,7 @@ const Layout = ({ children }: { children: ReactNode }) => { - + ) } diff --git a/web/app/(commonLayout)/tools/page.tsx b/web/app/(commonLayout)/tools/page.tsx index 2d5c1a8e44..3e88050eba 100644 --- a/web/app/(commonLayout)/tools/page.tsx +++ b/web/app/(commonLayout)/tools/page.tsx @@ -12,7 +12,7 @@ const ToolsList: FC = () => { const router = useRouter() const { isCurrentWorkspaceDatasetOperator } = useAppContext() const { t } = useTranslation() - useDocumentTitle(t('common.menus.tools')) + useDocumentTitle(t('menus.tools', { ns: 'common' })) useEffect(() => { if (isCurrentWorkspaceDatasetOperator) diff --git a/web/app/(shareLayout)/components/authenticated-layout.tsx b/web/app/(shareLayout)/components/authenticated-layout.tsx index 00288b7a61..113f3b5680 100644 --- a/web/app/(shareLayout)/components/authenticated-layout.tsx +++ b/web/app/(shareLayout)/components/authenticated-layout.tsx @@ -81,7 +81,7 @@ const AuthenticatedLayout = ({ children }: { children: React.ReactNode }) => { return (
- {t('common.userProfile.logout')} + {t('userProfile.logout', { ns: 'common' })}
) } diff --git a/web/app/(shareLayout)/components/splash.tsx b/web/app/(shareLayout)/components/splash.tsx index b8ea1b2e56..9f89a03993 100644 --- a/web/app/(shareLayout)/components/splash.tsx +++ b/web/app/(shareLayout)/components/splash.tsx @@ -94,8 +94,8 @@ const Splash: FC = ({ children }) => { if (message) { return (
- - {code === '403' ? t('common.userProfile.logout') : t('share.login.backToHome')} + + {code === '403' ? t('userProfile.logout', { ns: 'common' }) : t('login.backToHome', { ns: 'share' })}
) } diff --git a/web/app/(shareLayout)/webapp-reset-password/check-code/page.tsx b/web/app/(shareLayout)/webapp-reset-password/check-code/page.tsx index 9ce058340c..ac15f1df6d 100644 --- a/web/app/(shareLayout)/webapp-reset-password/check-code/page.tsx +++ b/web/app/(shareLayout)/webapp-reset-password/check-code/page.tsx @@ -26,14 +26,14 @@ export default function CheckCode() { if (!code.trim()) { Toast.notify({ type: 'error', - message: t('login.checkCode.emptyCode'), + message: t('checkCode.emptyCode', { ns: 'login' }), }) return } if (!/\d{6}/.test(code)) { Toast.notify({ type: 'error', - message: t('login.checkCode.invalidCode'), + message: t('checkCode.invalidCode', { ns: 'login' }), }) return } @@ -69,22 +69,22 @@ export default function CheckCode() {
-

{t('login.checkCode.checkYourEmail')}

+

{t('checkCode.checkYourEmail', { ns: 'login' })}

- {t('login.checkCode.tipsPrefix')} + {t('checkCode.tipsPrefix', { ns: 'login' })} {email}
- {t('login.checkCode.validTime')} + {t('checkCode.validTime', { ns: 'login' })}

- - setVerifyCode(e.target.value)} maxLength={6} className="mt-1" placeholder={t('login.checkCode.verificationCodePlaceholder') || ''} /> - + + setVerifyCode(e.target.value)} maxLength={6} className="mt-1" placeholder={t('checkCode.verificationCodePlaceholder', { ns: 'login' }) || ''} /> +
@@ -94,7 +94,7 @@ export default function CheckCode() {
- {t('login.back')} + {t('back', { ns: 'login' })}
) diff --git a/web/app/(shareLayout)/webapp-reset-password/page.tsx b/web/app/(shareLayout)/webapp-reset-password/page.tsx index 92c39eb729..6acd8d08f4 100644 --- a/web/app/(shareLayout)/webapp-reset-password/page.tsx +++ b/web/app/(shareLayout)/webapp-reset-password/page.tsx @@ -1,6 +1,6 @@ 'use client' import { RiArrowLeftLine, RiLockPasswordLine } from '@remixicon/react' -import { noop } from 'lodash-es' +import { noop } from 'es-toolkit/compat' import Link from 'next/link' import { useRouter, useSearchParams } from 'next/navigation' import { useState } from 'react' @@ -27,14 +27,14 @@ export default function CheckCode() { const handleGetEMailVerificationCode = async () => { try { if (!email) { - Toast.notify({ type: 'error', message: t('login.error.emailEmpty') }) + Toast.notify({ type: 'error', message: t('error.emailEmpty', { ns: 'login' }) }) return } if (!emailRegex.test(email)) { Toast.notify({ type: 'error', - message: t('login.error.emailInValid'), + message: t('error.emailInValid', { ns: 'login' }), }) return } @@ -50,7 +50,7 @@ export default function CheckCode() { else if (res.code === 'account_not_found') { Toast.notify({ type: 'error', - message: t('login.error.registrationNotAllowed'), + message: t('error.registrationNotAllowed', { ns: 'login' }), }) } else { @@ -74,21 +74,21 @@ export default function CheckCode() {
-

{t('login.resetPassword')}

+

{t('resetPassword', { ns: 'login' })}

- {t('login.resetPasswordDesc')} + {t('resetPasswordDesc', { ns: 'login' })}

- +
- setEmail(e.target.value)} /> + setEmail(e.target.value)} />
- +
@@ -99,7 +99,7 @@ export default function CheckCode() {
- {t('login.backToLogin')} + {t('backToLogin', { ns: 'login' })}
) diff --git a/web/app/(shareLayout)/webapp-reset-password/set-password/page.tsx b/web/app/(shareLayout)/webapp-reset-password/set-password/page.tsx index bfb71d9c6f..9f59e8f9eb 100644 --- a/web/app/(shareLayout)/webapp-reset-password/set-password/page.tsx +++ b/web/app/(shareLayout)/webapp-reset-password/set-password/page.tsx @@ -45,15 +45,15 @@ const ChangePasswordForm = () => { const valid = useCallback(() => { if (!password.trim()) { - showErrorMessage(t('login.error.passwordEmpty')) + showErrorMessage(t('error.passwordEmpty', { ns: 'login' })) return false } if (!validPassword.test(password)) { - showErrorMessage(t('login.error.passwordInvalid')) + showErrorMessage(t('error.passwordInvalid', { ns: 'login' })) return false } if (password !== confirmPassword) { - showErrorMessage(t('common.account.notEqual')) + showErrorMessage(t('account.notEqual', { ns: 'common' })) return false } return true @@ -92,10 +92,10 @@ const ChangePasswordForm = () => {

- {t('login.changePassword')} + {t('changePassword', { ns: 'login' })}

- {t('login.changePasswordTip')} + {t('changePasswordTip', { ns: 'login' })}

@@ -104,7 +104,7 @@ const ChangePasswordForm = () => { {/* Password */}
{ type={showPassword ? 'text' : 'password'} value={password} onChange={e => setPassword(e.target.value)} - placeholder={t('login.passwordPlaceholder') || ''} + placeholder={t('passwordPlaceholder', { ns: 'login' }) || ''} />
@@ -125,12 +125,12 @@ const ChangePasswordForm = () => {
-
{t('login.error.passwordInvalid')}
+
{t('error.passwordInvalid', { ns: 'login' })}
{/* Confirm Password */}
{ type={showConfirmPassword ? 'text' : 'password'} value={confirmPassword} onChange={e => setConfirmPassword(e.target.value)} - placeholder={t('login.confirmPasswordPlaceholder') || ''} + placeholder={t('confirmPasswordPlaceholder', { ns: 'login' }) || ''} />
@@ -171,7 +171,7 @@ const ChangePasswordForm = () => {

- {t('login.passwordChangedTip')} + {t('passwordChangedTip', { ns: 'login' })}

@@ -183,7 +183,7 @@ const ChangePasswordForm = () => { router.replace(getSignInUrl()) }} > - {t('login.passwordChanged')} + {t('passwordChanged', { ns: 'login' })} {' '} ( {Math.round(countdown / 1000)} diff --git a/web/app/(shareLayout)/webapp-signin/check-code/page.tsx b/web/app/(shareLayout)/webapp-signin/check-code/page.tsx index ee7fc22bea..0ef63dcbd2 100644 --- a/web/app/(shareLayout)/webapp-signin/check-code/page.tsx +++ b/web/app/(shareLayout)/webapp-signin/check-code/page.tsx @@ -45,21 +45,21 @@ export default function CheckCode() { if (!code.trim()) { Toast.notify({ type: 'error', - message: t('login.checkCode.emptyCode'), + message: t('checkCode.emptyCode', { ns: 'login' }), }) return } if (!/\d{6}/.test(code)) { Toast.notify({ type: 'error', - message: t('login.checkCode.invalidCode'), + message: t('checkCode.invalidCode', { ns: 'login' }), }) return } if (!redirectUrl || !appCode) { Toast.notify({ type: 'error', - message: t('login.error.redirectUrlMissing'), + message: t('error.redirectUrlMissing', { ns: 'login' }), }) return } @@ -108,19 +108,19 @@ export default function CheckCode() {
-

{t('login.checkCode.checkYourEmail')}

+

{t('checkCode.checkYourEmail', { ns: 'login' })}

- {t('login.checkCode.tipsPrefix')} + {t('checkCode.tipsPrefix', { ns: 'login' })} {email}
- {t('login.checkCode.validTime')} + {t('checkCode.validTime', { ns: 'login' })}

- + setVerifyCode(e.target.value)} maxLength={6} className="mt-1" - placeholder={t('login.checkCode.verificationCodePlaceholder') || ''} + placeholder={t('checkCode.verificationCodePlaceholder', { ns: 'login' }) || ''} /> - +
@@ -140,7 +140,7 @@ export default function CheckCode() {
- {t('login.back')} + {t('back', { ns: 'login' })}
) diff --git a/web/app/(shareLayout)/webapp-signin/components/mail-and-code-auth.tsx b/web/app/(shareLayout)/webapp-signin/components/mail-and-code-auth.tsx index 9020858347..f3e018a1fa 100644 --- a/web/app/(shareLayout)/webapp-signin/components/mail-and-code-auth.tsx +++ b/web/app/(shareLayout)/webapp-signin/components/mail-and-code-auth.tsx @@ -1,4 +1,4 @@ -import { noop } from 'lodash-es' +import { noop } from 'es-toolkit/compat' import { useRouter, useSearchParams } from 'next/navigation' import { useState } from 'react' import { useTranslation } from 'react-i18next' @@ -23,14 +23,14 @@ export default function MailAndCodeAuth() { const handleGetEMailVerificationCode = async () => { try { if (!email) { - Toast.notify({ type: 'error', message: t('login.error.emailEmpty') }) + Toast.notify({ type: 'error', message: t('error.emailEmpty', { ns: 'login' }) }) return } if (!emailRegex.test(email)) { Toast.notify({ type: 'error', - message: t('login.error.emailInValid'), + message: t('error.emailInValid', { ns: 'login' }), }) return } @@ -56,12 +56,12 @@ export default function MailAndCodeAuth() {
- +
- setEmail(e.target.value)} /> + setEmail(e.target.value)} />
- +
diff --git a/web/app/(shareLayout)/webapp-signin/components/mail-and-password-auth.tsx b/web/app/(shareLayout)/webapp-signin/components/mail-and-password-auth.tsx index 67e4bf7af2..7e76a87250 100644 --- a/web/app/(shareLayout)/webapp-signin/components/mail-and-password-auth.tsx +++ b/web/app/(shareLayout)/webapp-signin/components/mail-and-password-auth.tsx @@ -1,5 +1,5 @@ 'use client' -import { noop } from 'lodash-es' +import { noop } from 'es-toolkit/compat' import Link from 'next/link' import { useRouter, useSearchParams } from 'next/navigation' import { useCallback, useState } from 'react' @@ -46,25 +46,25 @@ export default function MailAndPasswordAuth({ isEmailSetup }: MailAndPasswordAut const appCode = getAppCodeFromRedirectUrl() const handleEmailPasswordLogin = async () => { if (!email) { - Toast.notify({ type: 'error', message: t('login.error.emailEmpty') }) + Toast.notify({ type: 'error', message: t('error.emailEmpty', { ns: 'login' }) }) return } if (!emailRegex.test(email)) { Toast.notify({ type: 'error', - message: t('login.error.emailInValid'), + message: t('error.emailInValid', { ns: 'login' }), }) return } if (!password?.trim()) { - Toast.notify({ type: 'error', message: t('login.error.passwordEmpty') }) + Toast.notify({ type: 'error', message: t('error.passwordEmpty', { ns: 'login' }) }) return } if (!redirectUrl || !appCode) { Toast.notify({ type: 'error', - message: t('login.error.redirectUrlMissing'), + message: t('error.redirectUrlMissing', { ns: 'login' }), }) return } @@ -111,7 +111,7 @@ export default function MailAndPasswordAuth({ isEmailSetup }: MailAndPasswordAut
@@ -128,14 +128,14 @@ export default function MailAndPasswordAuth({ isEmailSetup }: MailAndPasswordAut
@@ -149,7 +149,7 @@ export default function MailAndPasswordAuth({ isEmailSetup }: MailAndPasswordAut }} type={showPassword ? 'text' : 'password'} autoComplete="current-password" - placeholder={t('login.passwordPlaceholder') || ''} + placeholder={t('passwordPlaceholder', { ns: 'login' }) || ''} tabIndex={2} />
@@ -172,7 +172,7 @@ export default function MailAndPasswordAuth({ isEmailSetup }: MailAndPasswordAut disabled={isLoading || !email || !password} className="w-full" > - {t('login.signBtn')} + {t('signBtn', { ns: 'login' })}
diff --git a/web/app/(shareLayout)/webapp-signin/components/sso-auth.tsx b/web/app/(shareLayout)/webapp-signin/components/sso-auth.tsx index 472952c2c8..d8f3854868 100644 --- a/web/app/(shareLayout)/webapp-signin/components/sso-auth.tsx +++ b/web/app/(shareLayout)/webapp-signin/components/sso-auth.tsx @@ -82,7 +82,7 @@ const SSOAuth: FC = ({ className="w-full" > - {t('login.withSSO')} + {t('withSSO', { ns: 'login' })} ) } diff --git a/web/app/(shareLayout)/webapp-signin/layout.tsx b/web/app/(shareLayout)/webapp-signin/layout.tsx index dd4510a541..21cb0e1f57 100644 --- a/web/app/(shareLayout)/webapp-signin/layout.tsx +++ b/web/app/(shareLayout)/webapp-signin/layout.tsx @@ -9,7 +9,7 @@ import { cn } from '@/utils/classnames' export default function SignInLayout({ children }: PropsWithChildren) { const { t } = useTranslation() const systemFeatures = useGlobalPublicStore(s => s.systemFeatures) - useDocumentTitle(t('login.webapp.login')) + useDocumentTitle(t('webapp.login', { ns: 'login' })) return ( <>
diff --git a/web/app/(shareLayout)/webapp-signin/normalForm.tsx b/web/app/(shareLayout)/webapp-signin/normalForm.tsx index 40d34dcaf5..b15145346f 100644 --- a/web/app/(shareLayout)/webapp-signin/normalForm.tsx +++ b/web/app/(shareLayout)/webapp-signin/normalForm.tsx @@ -60,8 +60,8 @@ const NormalForm = () => {
-

{t('login.licenseLost')}

-

{t('login.licenseLostTip')}

+

{t('licenseLost', { ns: 'login' })}

+

{t('licenseLostTip', { ns: 'login' })}

@@ -76,8 +76,8 @@ const NormalForm = () => {
-

{t('login.licenseExpired')}

-

{t('login.licenseExpiredTip')}

+

{t('licenseExpired', { ns: 'login' })}

+

{t('licenseExpiredTip', { ns: 'login' })}

@@ -92,8 +92,8 @@ const NormalForm = () => { -

{t('login.licenseInactive')}

-

{t('login.licenseInactiveTip')}

+

{t('licenseInactive', { ns: 'login' })}

+

{t('licenseInactiveTip', { ns: 'login' })}

@@ -104,8 +104,8 @@ const NormalForm = () => { <>
-

{systemFeatures.branding.enabled ? t('login.pageTitleForE') : t('login.pageTitle')}

-

{t('login.welcome')}

+

{systemFeatures.branding.enabled ? t('pageTitleForE', { ns: 'login' }) : t('pageTitle', { ns: 'login' })}

+

{t('welcome', { ns: 'login' })}

@@ -122,7 +122,7 @@ const NormalForm = () => {
- {t('login.or')} + {t('or', { ns: 'login' })}
)} @@ -134,7 +134,7 @@ const NormalForm = () => { {systemFeatures.enable_email_password_login && (
{ updateAuthType('password') }}> - {t('login.usePassword')} + {t('usePassword', { ns: 'login' })}
)} @@ -144,7 +144,7 @@ const NormalForm = () => { {systemFeatures.enable_email_code_login && (
{ updateAuthType('code') }}> - {t('login.useVerificationCode')} + {t('useVerificationCode', { ns: 'login' })}
)} @@ -158,8 +158,8 @@ const NormalForm = () => {
-

{t('login.noLoginMethod')}

-

{t('login.noLoginMethodTip')}

+

{t('noLoginMethod', { ns: 'login' })}

+

{t('noLoginMethodTip', { ns: 'login' })}

{step === STEP.start && ( <> -
{t('common.account.changeEmail.title')}
+
{t('account.changeEmail.title', { ns: 'common' })}
-
{t('common.account.changeEmail.authTip')}
+
{t('account.changeEmail.authTip', { ns: 'common' })}
{ variant="primary" onClick={sendCodeToOriginEmail} > - {t('common.account.changeEmail.sendVerifyCode')} + {t('account.changeEmail.sendVerifyCode', { ns: 'common' })}
)} {step === STEP.verifyOrigin && ( <> -
{t('common.account.changeEmail.verifyEmail')}
+
{t('account.changeEmail.verifyEmail', { ns: 'common' })}
{
-
{t('common.account.changeEmail.codeLabel')}
+
{t('account.changeEmail.codeLabel', { ns: 'common' })}
setCode(e.target.value)} maxLength={6} @@ -267,46 +267,46 @@ const EmailChangeModal = ({ onClose, email, show }: Props) => { variant="primary" onClick={handleVerifyOriginEmail} > - {t('common.account.changeEmail.continue')} + {t('account.changeEmail.continue', { ns: 'common' })}
- {t('common.account.changeEmail.resendTip')} + {t('account.changeEmail.resendTip', { ns: 'common' })} {time > 0 && ( - {t('common.account.changeEmail.resendCount', { count: time })} + {t('account.changeEmail.resendCount', { ns: 'common', count: time })} )} {!time && ( - {t('common.account.changeEmail.resend')} + {t('account.changeEmail.resend', { ns: 'common' })} )}
)} {step === STEP.newEmail && ( <> -
{t('common.account.changeEmail.newEmail')}
+
{t('account.changeEmail.newEmail', { ns: 'common' })}
-
{t('common.account.changeEmail.content3')}
+
{t('account.changeEmail.content3', { ns: 'common' })}
-
{t('common.account.changeEmail.emailLabel')}
+
{t('account.changeEmail.emailLabel', { ns: 'common' })}
handleNewEmailValueChange(e.target.value)} destructive={newEmailExited || unAvailableEmail} /> {newEmailExited && ( -
{t('common.account.changeEmail.existingEmail')}
+
{t('account.changeEmail.existingEmail', { ns: 'common' })}
)} {unAvailableEmail && ( -
{t('common.account.changeEmail.unAvailableEmail')}
+
{t('account.changeEmail.unAvailableEmail', { ns: 'common' })}
)}
@@ -316,20 +316,20 @@ const EmailChangeModal = ({ onClose, email, show }: Props) => { variant="primary" onClick={sendCodeToNewEmail} > - {t('common.account.changeEmail.sendVerifyCode')} + {t('account.changeEmail.sendVerifyCode', { ns: 'common' })}
)} {step === STEP.verifyNew && ( <> -
{t('common.account.changeEmail.verifyNew')}
+
{t('account.changeEmail.verifyNew', { ns: 'common' })}
{
-
{t('common.account.changeEmail.codeLabel')}
+
{t('account.changeEmail.codeLabel', { ns: 'common' })}
setCode(e.target.value)} maxLength={6} @@ -356,22 +356,22 @@ const EmailChangeModal = ({ onClose, email, show }: Props) => { variant="primary" onClick={submitNewEmail} > - {t('common.account.changeEmail.changeTo', { email: mail })} + {t('account.changeEmail.changeTo', { ns: 'common', email: mail })}
- {t('common.account.changeEmail.resendTip')} + {t('account.changeEmail.resendTip', { ns: 'common' })} {time > 0 && ( - {t('common.account.changeEmail.resendCount', { count: time })} + {t('account.changeEmail.resendCount', { ns: 'common', count: time })} )} {!time && ( - {t('common.account.changeEmail.resend')} + {t('account.changeEmail.resend', { ns: 'common' })} )}
diff --git a/web/app/account/(commonLayout)/account-page/index.tsx b/web/app/account/(commonLayout)/account-page/index.tsx index baa9759ffe..f01efc002c 100644 --- a/web/app/account/(commonLayout)/account-page/index.tsx +++ b/web/app/account/(commonLayout)/account-page/index.tsx @@ -61,7 +61,7 @@ export default function AccountPage() { try { setEditing(true) await updateUserProfile({ url: 'account/name', body: { name: editName } }) - notify({ type: 'success', message: t('common.actionMsg.modifiedSuccessfully') }) + notify({ type: 'success', message: t('actionMsg.modifiedSuccessfully', { ns: 'common' }) }) mutateUserProfile() setEditNameModalVisible(false) setEditing(false) @@ -80,15 +80,15 @@ export default function AccountPage() { } const valid = () => { if (!password.trim()) { - showErrorMessage(t('login.error.passwordEmpty')) + showErrorMessage(t('error.passwordEmpty', { ns: 'login' })) return false } if (!validPassword.test(password)) { - showErrorMessage(t('login.error.passwordInvalid')) + showErrorMessage(t('error.passwordInvalid', { ns: 'login' })) return false } if (password !== confirmPassword) { - showErrorMessage(t('common.account.notEqual')) + showErrorMessage(t('account.notEqual', { ns: 'common' })) return false } @@ -112,7 +112,7 @@ export default function AccountPage() { repeat_new_password: confirmPassword, }, }) - notify({ type: 'success', message: t('common.actionMsg.modifiedSuccessfully') }) + notify({ type: 'success', message: t('actionMsg.modifiedSuccessfully', { ns: 'common' }) }) mutateUserProfile() setEditPasswordModalVisible(false) resetPasswordForm() @@ -146,7 +146,7 @@ export default function AccountPage() { return ( <>
-

{t('common.account.myAccount')}

+

{t('account.myAccount', { ns: 'common' })}

@@ -164,25 +164,25 @@ export default function AccountPage() {
-
{t('common.account.name')}
+
{t('account.name', { ns: 'common' })}
{userProfile.name}
- {t('common.operation.edit')} + {t('operation.edit', { ns: 'common' })}
-
{t('common.account.email')}
+
{t('account.email', { ns: 'common' })}
{userProfile.email}
{systemFeatures.enable_change_email && (
setShowUpdateEmail(true)}> - {t('common.operation.change')} + {t('operation.change', { ns: 'common' })}
)}
@@ -191,26 +191,26 @@ export default function AccountPage() { systemFeatures.enable_email_password_login && (
-
{t('common.account.password')}
-
{t('common.account.passwordTip')}
+
{t('account.password', { ns: 'common' })}
+
{t('account.passwordTip', { ns: 'common' })}
- +
) }
-
{t('common.account.langGeniusAccount')}
-
{t('common.account.langGeniusAccountTip')}
+
{t('account.langGeniusAccount', { ns: 'common' })}
+
{t('account.langGeniusAccountTip', { ns: 'common' })}
{!!apps.length && ( ({ ...app, key: app.id, name: app.name }))} renderItem={renderAppItem} wrapperClassName="mt-2" /> )} - {!IS_CE_EDITION && } + {!IS_CE_EDITION && }
{ editNameModalVisible && ( @@ -219,21 +219,21 @@ export default function AccountPage() { onClose={() => setEditNameModalVisible(false)} className="!w-[420px] !p-6" > -
{t('common.account.editName')}
-
{t('common.account.name')}
+
{t('account.editName', { ns: 'common' })}
+
{t('account.name', { ns: 'common' })}
setEditName(e.target.value)} />
- +
@@ -249,10 +249,10 @@ export default function AccountPage() { }} className="!w-[420px] !p-6" > -
{userProfile.is_password_set ? t('common.account.resetPassword') : t('common.account.setPassword')}
+
{userProfile.is_password_set ? t('account.resetPassword', { ns: 'common' }) : t('account.setPassword', { ns: 'common' })}
{userProfile.is_password_set && ( <> -
{t('common.account.currentPassword')}
+
{t('account.currentPassword', { ns: 'common' })}
)}
- {userProfile.is_password_set ? t('common.account.newPassword') : t('common.account.password')} + {userProfile.is_password_set ? t('account.newPassword', { ns: 'common' }) : t('account.password', { ns: 'common' })}
-
{t('common.account.confirmPassword')}
+
{t('account.confirmPassword', { ns: 'common' })}
- {t('common.operation.cancel')} + {t('operation.cancel', { ns: 'common' })}
diff --git a/web/app/account/(commonLayout)/avatar.tsx b/web/app/account/(commonLayout)/avatar.tsx index 3cbbb47a76..8ea29e8e45 100644 --- a/web/app/account/(commonLayout)/avatar.tsx +++ b/web/app/account/(commonLayout)/avatar.tsx @@ -94,7 +94,7 @@ export default function AppSelector() { className="group flex h-9 cursor-pointer items-center justify-start rounded-lg px-3 hover:bg-state-base-hover" > -
{t('common.userProfile.logout')}
+
{t('userProfile.logout', { ns: 'common' })}
diff --git a/web/app/account/(commonLayout)/delete-account/components/check-email.tsx b/web/app/account/(commonLayout)/delete-account/components/check-email.tsx index c0bbf4422a..65a58c936e 100644 --- a/web/app/account/(commonLayout)/delete-account/components/check-email.tsx +++ b/web/app/account/(commonLayout)/delete-account/components/check-email.tsx @@ -31,22 +31,22 @@ export default function CheckEmail(props: DeleteAccountProps) { return ( <>
- {t('common.account.deleteTip')} + {t('account.deleteTip', { ns: 'common' })}
- {t('common.account.deletePrivacyLinkTip')} - {t('common.account.deletePrivacyLink')} + {t('account.deletePrivacyLinkTip', { ns: 'common' })} + {t('account.deletePrivacyLink', { ns: 'common' })}
- + { setUserInputEmail(e.target.value) }} />
- - + +
) diff --git a/web/app/account/(commonLayout)/delete-account/components/feed-back.tsx b/web/app/account/(commonLayout)/delete-account/components/feed-back.tsx index 4a1b41cb20..67fea3c141 100644 --- a/web/app/account/(commonLayout)/delete-account/components/feed-back.tsx +++ b/web/app/account/(commonLayout)/delete-account/components/feed-back.tsx @@ -28,7 +28,7 @@ export default function FeedBack(props: DeleteAccountProps) { await logout() // Tokens are now stored in cookies and cleared by backend router.push('/signin') - Toast.notify({ type: 'info', message: t('common.account.deleteSuccessTip') }) + Toast.notify({ type: 'info', message: t('account.deleteSuccessTip', { ns: 'common' }) }) } catch (error) { console.error(error) } }, [router, t]) @@ -50,22 +50,22 @@ export default function FeedBack(props: DeleteAccountProps) { - + @@ -116,10 +116,10 @@ const MCPServerModal = ({ {latestParams.length > 0 && (
-
{t('tools.mcp.server.modal.parameters')}
+
{t('mcp.server.modal.parameters', { ns: 'tools' })}
-
{t('tools.mcp.server.modal.parametersTip')}
+
{t('mcp.server.modal.parametersTip', { ns: 'tools' })}
{latestParams.map(paramItem => (
- - + +
) diff --git a/web/app/components/tools/mcp/mcp-server-param-item.tsx b/web/app/components/tools/mcp/mcp-server-param-item.tsx index d951f19caa..db27cfdf98 100644 --- a/web/app/components/tools/mcp/mcp-server-param-item.tsx +++ b/web/app/components/tools/mcp/mcp-server-param-item.tsx @@ -27,7 +27,7 @@ const MCPServerParamItem = ({ diff --git a/web/app/components/tools/mcp/mcp-service-card.tsx b/web/app/components/tools/mcp/mcp-service-card.tsx index 07aa5e0168..fc6de3bc3d 100644 --- a/web/app/components/tools/mcp/mcp-service-card.tsx +++ b/web/app/components/tools/mcp/mcp-service-card.tsx @@ -172,7 +172,7 @@ function MCPServiceCard({
- {t('tools.mcp.server.title')} + {t('mcp.server.title', { ns: 'tools' })}
@@ -180,8 +180,8 @@ function MCPServiceCard({
{serverActivated - ? t('appOverview.overview.status.running') - : t('appOverview.overview.status.disable')} + ? t('overview.status.running', { ns: 'appOverview' }) + : t('overview.status.disable', { ns: 'appOverview' })}
- {t('appOverview.overview.appInfo.enableTooltip.description')} + {t('overview.appInfo.enableTooltip.description', { ns: 'appOverview' })}
window.open(docLink('/guides/workflow/node/user-input'), '_blank')} > - {t('appOverview.overview.appInfo.enableTooltip.learnMore')} + {t('overview.appInfo.enableTooltip.learnMore', { ns: 'appOverview' })}
) @@ -222,7 +222,7 @@ function MCPServiceCard({ {!isMinimalState && (
- {t('tools.mcp.server.url')} + {t('mcp.server.url', { ns: 'tools' })}
@@ -239,7 +239,7 @@ function MCPServiceCard({ {isCurrentWorkspaceManager && (
-
{serverPublished ? t('tools.mcp.server.edit') : t('tools.mcp.server.addDescription')}
+
{serverPublished ? t('mcp.server.edit', { ns: 'tools' }) : t('mcp.server.addDescription', { ns: 'tools' })}
@@ -287,8 +287,8 @@ function MCPServiceCard({ {showConfirmDelete && ( { onGenCode() diff --git a/web/app/components/tools/mcp/modal.tsx b/web/app/components/tools/mcp/modal.tsx index eb8f280484..9bf4b351b4 100644 --- a/web/app/components/tools/mcp/modal.tsx +++ b/web/app/components/tools/mcp/modal.tsx @@ -5,7 +5,7 @@ import type { ToolWithProvider } from '@/app/components/workflow/types' import type { AppIconType } from '@/types/app' import { RiCloseLine, RiEditLine } from '@remixicon/react' import { useHover } from 'ahooks' -import { noop } from 'lodash-es' +import { noop } from 'es-toolkit/compat' import * as React from 'react' import { useCallback, useRef, useState } from 'react' import { useTranslation } from 'react-i18next' @@ -81,15 +81,15 @@ const MCPModal = ({ const authMethods = [ { - text: t('tools.mcp.modal.authentication'), + text: t('mcp.modal.authentication', { ns: 'tools' }), value: MCPAuthMethod.authentication, }, { - text: t('tools.mcp.modal.headers'), + text: t('mcp.modal.headers', { ns: 'tools' }), value: MCPAuthMethod.headers, }, { - text: t('tools.mcp.modal.configurations'), + text: t('mcp.modal.configurations', { ns: 'tools' }), value: MCPAuthMethod.configurations, }, ] @@ -231,33 +231,33 @@ const MCPModal = ({
-
{!isCreate ? t('tools.mcp.modal.editTitle') : t('tools.mcp.modal.title')}
+
{!isCreate ? t('mcp.modal.editTitle', { ns: 'tools' }) : t('mcp.modal.title', { ns: 'tools' })}
- {t('tools.mcp.modal.serverUrl')} + {t('mcp.modal.serverUrl', { ns: 'tools' })}
setUrl(e.target.value)} onBlur={e => handleBlur(e.target.value.trim())} - placeholder={t('tools.mcp.modal.serverUrlPlaceholder')} + placeholder={t('mcp.modal.serverUrlPlaceholder', { ns: 'tools' })} /> {originalServerUrl && originalServerUrl !== url && (
- {t('tools.mcp.modal.serverUrlWarning')} + {t('mcp.modal.serverUrlWarning', { ns: 'tools' })}
)}
- {t('tools.mcp.modal.name')} + {t('mcp.modal.name', { ns: 'tools' })}
setName(e.target.value)} - placeholder={t('tools.mcp.modal.namePlaceholder')} + placeholder={t('mcp.modal.namePlaceholder', { ns: 'tools' })} />
@@ -284,17 +284,17 @@ const MCPModal = ({
- {t('tools.mcp.modal.serverIdentifier')} + {t('mcp.modal.serverIdentifier', { ns: 'tools' })}
-
{t('tools.mcp.modal.serverIdentifierTip')}
+
{t('mcp.modal.serverIdentifierTip', { ns: 'tools' })}
setServerIdentifier(e.target.value)} - placeholder={t('tools.mcp.modal.serverIdentifierPlaceholder')} + placeholder={t('mcp.modal.serverIdentifierPlaceholder', { ns: 'tools' })} /> {originalServerID && originalServerID !== serverIdentifier && (
- {t('tools.mcp.modal.serverIdentifierWarning')} + {t('mcp.modal.serverIdentifierWarning', { ns: 'tools' })}
)}
@@ -317,13 +317,13 @@ const MCPModal = ({ defaultValue={isDynamicRegistration} onChange={setIsDynamicRegistration} /> - {t('tools.mcp.modal.useDynamicClientRegistration')} + {t('mcp.modal.useDynamicClientRegistration', { ns: 'tools' })}
{!isDynamicRegistration && (
-
{t('tools.mcp.modal.redirectUrlWarning')}
+
{t('mcp.modal.redirectUrlWarning', { ns: 'tools' })}
{`${API_PREFIX}/mcp/oauth/callback`} @@ -333,25 +333,25 @@ const MCPModal = ({
- {t('tools.mcp.modal.clientID')} + {t('mcp.modal.clientID', { ns: 'tools' })}
setClientID(e.target.value)} onBlur={e => handleBlur(e.target.value.trim())} - placeholder={t('tools.mcp.modal.clientID')} + placeholder={t('mcp.modal.clientID', { ns: 'tools' })} disabled={isDynamicRegistration} />
- {t('tools.mcp.modal.clientSecret')} + {t('mcp.modal.clientSecret', { ns: 'tools' })}
setCredentials(e.target.value)} onBlur={e => handleBlur(e.target.value.trim())} - placeholder={t('tools.mcp.modal.clientSecretPlaceholder')} + placeholder={t('mcp.modal.clientSecretPlaceholder', { ns: 'tools' })} disabled={isDynamicRegistration} />
@@ -362,9 +362,9 @@ const MCPModal = ({ authMethod === MCPAuthMethod.headers && (
- {t('tools.mcp.modal.headers')} + {t('mcp.modal.headers', { ns: 'tools' })}
-
{t('tools.mcp.modal.headersTip')}
+
{t('mcp.modal.headersTip', { ns: 'tools' })}
- {t('tools.mcp.modal.timeout')} + {t('mcp.modal.timeout', { ns: 'tools' })}
setMcpTimeout(Number(e.target.value))} onBlur={e => handleBlur(e.target.value.trim())} - placeholder={t('tools.mcp.modal.timeoutPlaceholder')} + placeholder={t('mcp.modal.timeoutPlaceholder', { ns: 'tools' })} />
- {t('tools.mcp.modal.sseReadTimeout')} + {t('mcp.modal.sseReadTimeout', { ns: 'tools' })}
setSseReadTimeout(Number(e.target.value))} onBlur={e => handleBlur(e.target.value.trim())} - placeholder={t('tools.mcp.modal.timeoutPlaceholder')} + placeholder={t('mcp.modal.timeoutPlaceholder', { ns: 'tools' })} />
@@ -406,8 +406,8 @@ const MCPModal = ({ }
- - + +
{showAppIconPicker && ( diff --git a/web/app/components/tools/mcp/provider-card.tsx b/web/app/components/tools/mcp/provider-card.tsx index a7b092e0c0..d8a8e71a82 100644 --- a/web/app/components/tools/mcp/provider-card.tsx +++ b/web/app/components/tools/mcp/provider-card.tsx @@ -96,19 +96,19 @@ const MCPCard = ({
{data.tools.length > 0 && ( -
{t('tools.mcp.toolsCount', { count: data.tools.length })}
+
{t('mcp.toolsCount', { ns: 'tools', count: data.tools.length })}
)} {!data.tools.length && ( -
{t('tools.mcp.noTools')}
+
{t('mcp.noTools', { ns: 'tools' })}
)}
/
-
{`${t('tools.mcp.updateTime')} ${formatTimeFromNow(data.updated_at! * 1000)}`}
+
{`${t('mcp.updateTime', { ns: 'tools' })} ${formatTimeFromNow(data.updated_at! * 1000)}`}
{data.is_team_authorization && data.tools.length > 0 && } {(!data.is_team_authorization || !data.tools.length) && (
- {t('tools.mcp.noConfigured')} + {t('mcp.noConfigured', { ns: 'tools' })}
)} @@ -134,10 +134,10 @@ const MCPCard = ({ {isShowDeleteConfirm && ( - {t('tools.mcp.deleteConfirmTitle', { mcp: data.name })} + {t('mcp.deleteConfirmTitle', { ns: 'tools', mcp: data.name })}
)} onCancel={hideDeleteConfirm} diff --git a/web/app/components/tools/provider-list.tsx b/web/app/components/tools/provider-list.tsx index 648ecb9802..48fd4ef29d 100644 --- a/web/app/components/tools/provider-list.tsx +++ b/web/app/components/tools/provider-list.tsx @@ -1,5 +1,6 @@ 'use client' import type { Collection } from './types' +import { useQueryState } from 'nuqs' import { useCallback, useEffect, useMemo, useRef, useState } from 'react' import { useTranslation } from 'react-i18next' import Input from '@/app/components/base/input' @@ -14,7 +15,6 @@ import CustomCreateCard from '@/app/components/tools/provider/custom-create-card import ProviderDetail from '@/app/components/tools/provider/detail' import WorkflowToolEmpty from '@/app/components/tools/provider/empty' import { useGlobalPublicStore } from '@/context/global-public-context' -import { useTabSearchParams } from '@/hooks/use-tab-searchparams' import { useCheckInstalled, useInvalidateInstalledPluginList } from '@/service/use-plugins' import { useAllToolProviders } from '@/service/use-tools' import { cn } from '@/utils/classnames' @@ -45,13 +45,13 @@ const ProviderList = () => { const { enable_marketplace } = useGlobalPublicStore(s => s.systemFeatures) const containerRef = useRef(null) - const [activeTab, setActiveTab] = useTabSearchParams({ - defaultTab: 'builtin', + const [activeTab, setActiveTab] = useQueryState('category', { + defaultValue: 'builtin', }) const options = [ - { value: 'builtin', text: t('tools.type.builtIn') }, - { value: 'api', text: t('tools.type.custom') }, - { value: 'workflow', text: t('tools.type.workflow') }, + { value: 'builtin', text: t('type.builtIn', { ns: 'tools' }) }, + { value: 'api', text: t('type.custom', { ns: 'tools' }) }, + { value: 'workflow', text: t('type.workflow', { ns: 'tools' }) }, { value: 'mcp', text: 'MCP' }, ] const [tagFilterValue, setTagFilterValue] = useState([]) @@ -194,7 +194,7 @@ const ProviderList = () => {
)} {!filteredCollectionList.length && activeTab === 'builtin' && ( - + )}
{enable_marketplace && activeTab === 'builtin' && ( diff --git a/web/app/components/tools/provider/custom-create-card.tsx b/web/app/components/tools/provider/custom-create-card.tsx index ba0c9e6449..56ce3845f2 100644 --- a/web/app/components/tools/provider/custom-create-card.tsx +++ b/web/app/components/tools/provider/custom-create-card.tsx @@ -37,7 +37,7 @@ const Contribute = ({ onRefreshData }: Props) => { await createCustomCollection(data) Toast.notify({ type: 'success', - message: t('common.api.actionSuccess'), + message: t('api.actionSuccess', { ns: 'common' }), }) setIsShowEditCustomCollectionModal(false) onRefreshData() @@ -52,13 +52,13 @@ const Contribute = ({ onRefreshData }: Props) => {
-
{t('tools.createCustomTool')}
+
{t('createCustomTool', { ns: 'tools' })}
diff --git a/web/app/components/tools/provider/detail.tsx b/web/app/components/tools/provider/detail.tsx index c4b65f353d..70d65f02bc 100644 --- a/web/app/components/tools/provider/detail.tsx +++ b/web/app/components/tools/provider/detail.tsx @@ -100,24 +100,7 @@ const ProviderDetail = ({ const [isShowEditCollectionToolModal, setIsShowEditCustomCollectionModal] = useState(false) const [showConfirmDelete, setShowConfirmDelete] = useState(false) const [deleteAction, setDeleteAction] = useState('') - const doUpdateCustomToolCollection = async (data: CustomCollectionBackend) => { - await updateCustomCollection(data) - onRefreshData() - Toast.notify({ - type: 'success', - message: t('common.api.actionSuccess'), - }) - setIsShowEditCustomCollectionModal(false) - } - const doRemoveCustomToolCollection = async () => { - await removeCustomCollection(collection?.name as string) - onRefreshData() - Toast.notify({ - type: 'success', - message: t('common.api.actionSuccess'), - }) - setIsShowEditCustomCollectionModal(false) - } + const getCustomProvider = useCallback(async () => { setIsDetailLoading(true) const res = await fetchCustomCollection(collection.name) @@ -132,6 +115,28 @@ const ProviderDetail = ({ }) setIsDetailLoading(false) }, [collection.labels, collection.name]) + + const doUpdateCustomToolCollection = async (data: CustomCollectionBackend) => { + await updateCustomCollection(data) + onRefreshData() + await getCustomProvider() + // Use fresh data from form submission to avoid race condition with collection.labels + setCustomCollection(prev => prev ? { ...prev, labels: data.labels } : null) + Toast.notify({ + type: 'success', + message: t('api.actionSuccess', { ns: 'common' }), + }) + setIsShowEditCustomCollectionModal(false) + } + const doRemoveCustomToolCollection = async () => { + await removeCustomCollection(collection?.name as string) + onRefreshData() + Toast.notify({ + type: 'success', + message: t('api.actionSuccess', { ns: 'common' }), + }) + setIsShowEditCustomCollectionModal(false) + } // workflow provider const [isShowEditWorkflowToolModal, setIsShowEditWorkflowToolModal] = useState(false) const getWorkflowToolProvider = useCallback(async () => { @@ -158,7 +163,7 @@ const ProviderDetail = ({ onRefreshData() Toast.notify({ type: 'success', - message: t('common.api.actionSuccess'), + message: t('api.actionSuccess', { ns: 'common' }), }) setIsShowEditWorkflowToolModal(false) } @@ -172,7 +177,7 @@ const ProviderDetail = ({ getWorkflowToolProvider() Toast.notify({ type: 'success', - message: t('common.api.actionSuccess'), + message: t('api.actionSuccess', { ns: 'common' }), }) setIsShowEditWorkflowToolModal(false) } @@ -270,7 +275,7 @@ const ProviderDetail = ({ onClick={() => setIsShowEditCustomCollectionModal(true)} > -
{t('tools.createTool.editAction')}
+
{t('createTool.editAction', { ns: 'tools' })}
)} {collection.type === CollectionType.workflow && !isDetailLoading && customCollection && ( @@ -280,7 +285,7 @@ const ProviderDetail = ({ className={cn('my-3 w-[183px] shrink-0')} > -
{t('tools.openInStudio')}
+
{t('openInStudio', { ns: 'tools' })}
@@ -289,7 +294,7 @@ const ProviderDetail = ({ onClick={() => setIsShowEditWorkflowToolModal(true)} disabled={!isCurrentWorkspaceManager} > -
{t('tools.createTool.editAction')}
+
{t('createTool.editAction', { ns: 'tools' })}
)} @@ -301,7 +306,7 @@ const ProviderDetail = ({
{(collection.type === CollectionType.builtIn || collection.type === CollectionType.model) && isAuthed && (
- {t('plugin.detailPanel.actionNum', { num: toolList.length, action: toolList.length > 1 ? 'actions' : 'action' })} + {t('detailPanel.actionNum', { ns: 'plugin', num: toolList.length, action: toolList.length > 1 ? 'actions' : 'action' })} {needAuth && ( )}
@@ -321,9 +326,9 @@ const ProviderDetail = ({ {(collection.type === CollectionType.builtIn || collection.type === CollectionType.model) && needAuth && !isAuthed && ( <>
- {t('tools.includeToolNum', { num: toolList.length, action: toolList.length > 1 ? 'actions' : 'action' }).toLocaleUpperCase()} + {t('includeToolNum', { ns: 'tools', num: toolList.length, action: toolList.length > 1 ? 'actions' : 'action' }).toLocaleUpperCase()} Ā· - {t('tools.auth.setup').toLocaleUpperCase()} + {t('auth.setup', { ns: 'tools' }).toLocaleUpperCase()}
)} {(collection.type === CollectionType.custom) && (
- {t('tools.includeToolNum', { num: toolList.length, action: toolList.length > 1 ? 'actions' : 'action' }).toLocaleUpperCase()} + {t('includeToolNum', { ns: 'tools', num: toolList.length, action: toolList.length > 1 ? 'actions' : 'action' }).toLocaleUpperCase()}
)} {(collection.type === CollectionType.workflow) && (
- {t('tools.createTool.toolInput.title').toLocaleUpperCase()} + {t('createTool.toolInput.title', { ns: 'tools' }).toLocaleUpperCase()}
)}
@@ -365,7 +370,7 @@ const ProviderDetail = ({
{item.name} {item.type} - {item.required ? t('tools.createTool.toolInput.required') : ''} + {item.required ? t('createTool.toolInput.required', { ns: 'tools' }) : ''}
{item.llm_description}
@@ -382,7 +387,7 @@ const ProviderDetail = ({ await updateBuiltInToolCredential(collection.name, value) Toast.notify({ type: 'success', - message: t('common.api.actionSuccess'), + message: t('api.actionSuccess', { ns: 'common' }), }) await onRefreshData() setShowSettingAuth(false) @@ -391,7 +396,7 @@ const ProviderDetail = ({ await removeBuiltInToolCredential(collection.name) Toast.notify({ type: 'success', - message: t('common.api.actionSuccess'), + message: t('api.actionSuccess', { ns: 'common' }), }) await onRefreshData() setShowSettingAuth(false) @@ -416,8 +421,8 @@ const ProviderDetail = ({ )} {showConfirmDelete && ( setShowConfirmDelete(false)} diff --git a/web/app/components/tools/provider/empty.tsx b/web/app/components/tools/provider/empty.tsx index 7e916ba62f..4940dd6fc5 100644 --- a/web/app/components/tools/provider/empty.tsx +++ b/web/app/components/tools/provider/empty.tsx @@ -32,18 +32,18 @@ const Empty = ({ const hasLink = type && [ToolTypeEnum.Custom, ToolTypeEnum.MCP].includes(type) const Comp = (hasLink ? Link : 'div') as any const linkProps = hasLink ? { href: getLink(type), target: '_blank' } : {} - const renderType = isAgent ? 'agent' : type - const hasTitle = t(`tools.addToolModal.${renderType}.title`) !== `tools.addToolModal.${renderType}.title` + const renderType = isAgent ? 'agent' as const : type + const hasTitle = renderType && t(`addToolModal.${renderType}.title`, { ns: 'tools' }) !== `addToolModal.${renderType}.title` return (
- {hasTitle ? t(`tools.addToolModal.${renderType}.title`) : 'No tools available'} + {(hasTitle && renderType) ? t(`addToolModal.${renderType}.title`, { ns: 'tools' }) : 'No tools available'}
- {(!isAgent && hasTitle) && ( + {(!isAgent && hasTitle && renderType) && ( - {t(`tools.addToolModal.${renderType}.tip`)} + {t(`addToolModal.${renderType}.tip`, { ns: 'tools' })} {' '} {hasLink && } diff --git a/web/app/components/tools/setting/build-in/config-credentials.tsx b/web/app/components/tools/setting/build-in/config-credentials.tsx index 43383cdb51..cb11c5cf16 100644 --- a/web/app/components/tools/setting/build-in/config-credentials.tsx +++ b/web/app/components/tools/setting/build-in/config-credentials.tsx @@ -1,7 +1,7 @@ 'use client' import type { FC } from 'react' import type { Collection } from '../../types' -import { noop } from 'lodash-es' +import { noop } from 'es-toolkit/compat' import * as React from 'react' import { useEffect, useState } from 'react' import { useTranslation } from 'react-i18next' @@ -53,7 +53,7 @@ const ConfigCredential: FC = ({ const handleSave = async () => { for (const field of credentialSchema) { if (field.required && !tempCredential[field.name]) { - Toast.notify({ type: 'error', message: t('common.errorMsg.fieldRequired', { field: field.label[language] || field.label.en_US }) }) + Toast.notify({ type: 'error', message: t('errorMsg.fieldRequired', { ns: 'common', field: field.label[language] || field.label.en_US }) }) return } } @@ -71,8 +71,8 @@ const ConfigCredential: FC = ({ = ({ rel="noopener noreferrer" className="inline-flex items-center text-xs text-text-accent" > - {t('tools.howToGet')} + {t('howToGet', { ns: 'tools' })} ) @@ -111,12 +111,12 @@ const ConfigCredential: FC = ({
{ (collection.is_team_authorization && !isHideRemoveBtn) && ( - + ) }
- - + +
diff --git a/web/app/components/tools/workflow-tool/configure-button.tsx b/web/app/components/tools/workflow-tool/configure-button.tsx index f142989ff6..6526722b63 100644 --- a/web/app/components/tools/workflow-tool/configure-button.tsx +++ b/web/app/components/tools/workflow-tool/configure-button.tsx @@ -166,7 +166,7 @@ const WorkflowToolConfigureButton = ({ getDetail(workflowAppId) Toast.notify({ type: 'success', - message: t('common.api.actionSuccess'), + message: t('api.actionSuccess', { ns: 'common' }), }) setShowModal(false) } @@ -187,7 +187,7 @@ const WorkflowToolConfigureButton = ({ getDetail(workflowAppId) Toast.notify({ type: 'success', - message: t('common.api.actionSuccess'), + message: t('api.actionSuccess', { ns: 'common' }), }) setShowModal(false) } @@ -214,14 +214,14 @@ const WorkflowToolConfigureButton = ({ >
- {t('workflow.common.workflowAsTool')} + {t('common.workflowAsTool', { ns: 'workflow' })}
{!published && ( - {t('workflow.common.configureRequired')} + {t('common.configureRequired', { ns: 'workflow' })} )}
@@ -232,10 +232,10 @@ const WorkflowToolConfigureButton = ({ >
- {t('workflow.common.workflowAsTool')} + {t('common.workflowAsTool', { ns: 'workflow' })}
)} @@ -253,7 +253,7 @@ const WorkflowToolConfigureButton = ({ onClick={() => setShowModal(true)} disabled={!isCurrentWorkspaceManager || disabled} > - {t('workflow.common.configure')} + {t('common.configure', { ns: 'workflow' })} {outdated && } {outdated && (
- {t('workflow.common.workflowAsToolTip')} + {t('common.workflowAsToolTip', { ns: 'workflow' })}
)} diff --git a/web/app/components/tools/workflow-tool/confirm-modal/index.tsx b/web/app/components/tools/workflow-tool/confirm-modal/index.tsx index f90774cb32..2abee055bb 100644 --- a/web/app/components/tools/workflow-tool/confirm-modal/index.tsx +++ b/web/app/components/tools/workflow-tool/confirm-modal/index.tsx @@ -1,7 +1,7 @@ 'use client' import { RiCloseLine } from '@remixicon/react' -import { noop } from 'lodash-es' +import { noop } from 'es-toolkit/compat' import { useTranslation } from 'react-i18next' import Button from '@/app/components/base/button' import { AlertTriangle } from '@/app/components/base/icons/src/vender/solid/alertsAndFeedback' @@ -29,14 +29,14 @@ const ConfirmModal = ({ show, onConfirm, onClose }: ConfirmModalProps) => {
-
{t('tools.createTool.confirmTitle')}
+
{t('createTool.confirmTitle', { ns: 'tools' })}
- {t('tools.createTool.confirmTip')} + {t('createTool.confirmTip', { ns: 'tools' })}
- - + +
diff --git a/web/app/components/tools/workflow-tool/index.tsx b/web/app/components/tools/workflow-tool/index.tsx index 8804a4128d..9a2c6a4c4c 100644 --- a/web/app/components/tools/workflow-tool/index.tsx +++ b/web/app/components/tools/workflow-tool/index.tsx @@ -55,19 +55,19 @@ const WorkflowToolAsModal: FC = ({ const reservedOutputParameters: WorkflowToolProviderOutputParameter[] = [ { name: 'text', - description: t('workflow.nodes.tool.outputVars.text'), + description: t('nodes.tool.outputVars.text', { ns: 'workflow' }), type: VarType.string, reserved: true, }, { name: 'files', - description: t('workflow.nodes.tool.outputVars.files.title'), + description: t('nodes.tool.outputVars.files.title', { ns: 'workflow' }), type: VarType.arrayFile, reserved: true, }, { name: 'json', - description: t('workflow.nodes.tool.outputVars.json'), + description: t('nodes.tool.outputVars.json', { ns: 'workflow' }), type: VarType.arrayObject, reserved: true, }, @@ -104,13 +104,13 @@ const WorkflowToolAsModal: FC = ({ const onConfirm = () => { let errorMessage = '' if (!label) - errorMessage = t('common.errorMsg.fieldRequired', { field: t('tools.createTool.name') }) + errorMessage = t('errorMsg.fieldRequired', { ns: 'common', field: t('createTool.name', { ns: 'tools' }) }) if (!name) - errorMessage = t('common.errorMsg.fieldRequired', { field: t('tools.createTool.nameForToolCall') }) + errorMessage = t('errorMsg.fieldRequired', { ns: 'common', field: t('createTool.nameForToolCall', { ns: 'tools' }) }) if (!isNameValid(name)) - errorMessage = t('tools.createTool.nameForToolCall') + t('tools.createTool.nameForToolCallTip') + errorMessage = t('createTool.nameForToolCall', { ns: 'tools' }) + t('createTool.nameForToolCallTip', { ns: 'tools' }) if (errorMessage) { Toast.notify({ @@ -152,7 +152,7 @@ const WorkflowToolAsModal: FC = ({ = ({ {/* name & icon */}
- {t('tools.createTool.name')} + {t('createTool.name', { ns: 'tools' })} {' '} *
@@ -171,7 +171,7 @@ const WorkflowToolAsModal: FC = ({ { setShowEmojiPicker(true) }} className="cursor-pointer" iconType="emoji" icon={emoji.content} background={emoji.background} /> setLabel(e.target.value)} /> @@ -180,46 +180,46 @@ const WorkflowToolAsModal: FC = ({ {/* name for tool call */}
- {t('tools.createTool.nameForToolCall')} + {t('createTool.nameForToolCall', { ns: 'tools' })} {' '} * - {t('tools.createTool.nameForToolCallPlaceHolder')} + {t('createTool.nameForToolCallPlaceHolder', { ns: 'tools' })}
)} />
setName(e.target.value)} /> {!isNameValid(name) && ( -
{t('tools.createTool.nameForToolCallTip')}
+
{t('createTool.nameForToolCallTip', { ns: 'tools' })}
)}
{/* description */}
-
{t('tools.createTool.description')}
+
{t('createTool.description', { ns: 'tools' })}