diff --git a/.agent/skills b/.agent/skills
new file mode 120000
index 0000000000..454b8427cd
--- /dev/null
+++ b/.agent/skills
@@ -0,0 +1 @@
+../.claude/skills
\ No newline at end of file
diff --git a/.claude/settings.json b/.claude/settings.json
index 72dcb5ec73..f9e1016d02 100644
--- a/.claude/settings.json
+++ b/.claude/settings.json
@@ -1,11 +1,4 @@
{
- "enabledPlugins": {
- "feature-dev@claude-plugins-official": true,
- "context7@claude-plugins-official": true,
- "typescript-lsp@claude-plugins-official": true,
- "pyright-lsp@claude-plugins-official": true,
- "ralph-loop@claude-plugins-official": true
- },
"hooks": {
"PreToolUse": [
{
@@ -18,5 +11,10 @@
]
}
]
+ },
+ "enabledPlugins": {
+ "feature-dev@claude-plugins-official": true,
+ "context7@claude-plugins-official": true,
+ "ralph-loop@claude-plugins-official": true
}
}
diff --git a/.claude/skills/frontend-testing/SKILL.md b/.claude/skills/frontend-testing/SKILL.md
index dd9677a78e..0716c81ef7 100644
--- a/.claude/skills/frontend-testing/SKILL.md
+++ b/.claude/skills/frontend-testing/SKILL.md
@@ -83,6 +83,9 @@ vi.mock('next/navigation', () => ({
usePathname: () => '/test',
}))
+// ✅ Zustand stores: Use real stores (auto-mocked globally)
+// Set test state with: useAppStore.setState({ ... })
+
// Shared state for mocks (if needed)
let mockSharedState = false
@@ -296,7 +299,7 @@ For each test file generated, aim for:
For more detailed information, refer to:
- `references/workflow.md` - **Incremental testing workflow** (MUST READ for multi-file testing)
-- `references/mocking.md` - Mock patterns and best practices
+- `references/mocking.md` - Mock patterns, Zustand store testing, and best practices
- `references/async-testing.md` - Async operations and API calls
- `references/domain-components.md` - Workflow, Dataset, Configuration testing
- `references/common-patterns.md` - Frequently used testing patterns
diff --git a/.claude/skills/frontend-testing/references/mocking.md b/.claude/skills/frontend-testing/references/mocking.md
index c70bcf0ae5..86bd375987 100644
--- a/.claude/skills/frontend-testing/references/mocking.md
+++ b/.claude/skills/frontend-testing/references/mocking.md
@@ -37,16 +37,36 @@ Only mock these categories:
1. **Third-party libraries with side effects** - `next/navigation`, external SDKs
1. **i18n** - Always mock to return keys
+### Zustand Stores - DO NOT Mock Manually
+
+**Zustand is globally mocked** in `web/vitest.setup.ts`. Use real stores with `setState()`:
+
+```typescript
+// ✅ CORRECT: Use real store, set test state
+import { useAppStore } from '@/app/components/app/store'
+
+useAppStore.setState({ appDetail: { id: 'test', name: 'Test' } })
+render()
+
+// ❌ WRONG: Don't mock the store module
+vi.mock('@/app/components/app/store', () => ({ ... }))
+```
+
+See [Zustand Store Testing](#zustand-store-testing) section for full details.
+
## Mock Placement
| Location | Purpose |
|----------|---------|
-| `web/vitest.setup.ts` | Global mocks shared by all tests (for example `react-i18next`, `next/image`) |
+| `web/vitest.setup.ts` | Global mocks shared by all tests (`react-i18next`, `next/image`, `zustand`) |
+| `web/__mocks__/zustand.ts` | Zustand mock implementation (auto-resets stores after each test) |
| `web/__mocks__/` | Reusable mock factories shared across multiple test files |
| Test file | Test-specific mocks, inline with `vi.mock()` |
Modules are not mocked automatically. Use `vi.mock` in test files, or add global mocks in `web/vitest.setup.ts`.
+**Note**: Zustand is special - it's globally mocked but you should NOT mock store modules manually. See [Zustand Store Testing](#zustand-store-testing).
+
## Essential Mocks
### 1. i18n (Auto-loaded via Global Mock)
@@ -276,6 +296,7 @@ const renderWithQueryClient = (ui: React.ReactElement) => {
1. **Use real base components** - Import from `@/app/components/base/` directly
1. **Use real project components** - Prefer importing over mocking
+1. **Use real Zustand stores** - Set test state via `store.setState()`
1. **Reset mocks in `beforeEach`**, not `afterEach`
1. **Match actual component behavior** in mocks (when mocking is necessary)
1. **Use factory functions** for complex mock data
@@ -285,6 +306,7 @@ const renderWithQueryClient = (ui: React.ReactElement) => {
### ❌ DON'T
1. **Don't mock base components** (`Loading`, `Button`, `Tooltip`, etc.)
+1. **Don't mock Zustand store modules** - Use real stores with `setState()`
1. Don't mock components you can import directly
1. Don't create overly simplified mocks that miss conditional logic
1. Don't forget to clean up nock after each test
@@ -308,10 +330,151 @@ Need to use a component in test?
├─ Is it a third-party lib with side effects?
│ └─ YES → Mock it (next/navigation, external SDKs)
│
+├─ Is it a Zustand store?
+│ └─ YES → DO NOT mock the module!
+│ Use real store + setState() to set test state
+│ (Global mock handles auto-reset)
+│
└─ Is it i18n?
└─ YES → Uses shared mock (auto-loaded). Override only for custom translations
```
+## Zustand Store Testing
+
+### Global Zustand Mock (Auto-loaded)
+
+Zustand is globally mocked in `web/vitest.setup.ts` following the [official Zustand testing guide](https://zustand.docs.pmnd.rs/guides/testing). The mock in `web/__mocks__/zustand.ts` provides:
+
+- Real store behavior with `getState()`, `setState()`, `subscribe()` methods
+- Automatic store reset after each test via `afterEach`
+- Proper test isolation between tests
+
+### ✅ Recommended: Use Real Stores (Official Best Practice)
+
+**DO NOT mock store modules manually.** Import and use the real store, then use `setState()` to set test state:
+
+```typescript
+// ✅ CORRECT: Use real store with setState
+import { useAppStore } from '@/app/components/app/store'
+
+describe('MyComponent', () => {
+ it('should render app details', () => {
+ // Arrange: Set test state via setState
+ useAppStore.setState({
+ appDetail: {
+ id: 'test-app',
+ name: 'Test App',
+ mode: 'chat',
+ },
+ })
+
+ // Act
+ render()
+
+ // Assert
+ expect(screen.getByText('Test App')).toBeInTheDocument()
+ // Can also verify store state directly
+ expect(useAppStore.getState().appDetail?.name).toBe('Test App')
+ })
+
+ // No cleanup needed - global mock auto-resets after each test
+})
+```
+
+### ❌ Avoid: Manual Store Module Mocking
+
+Manual mocking conflicts with the global Zustand mock and loses store functionality:
+
+```typescript
+// ❌ WRONG: Don't mock the store module
+vi.mock('@/app/components/app/store', () => ({
+ useStore: (selector) => mockSelector(selector), // Missing getState, setState!
+}))
+
+// ❌ WRONG: This conflicts with global zustand mock
+vi.mock('@/app/components/workflow/store', () => ({
+ useWorkflowStore: vi.fn(() => mockState),
+}))
+```
+
+**Problems with manual mocking:**
+
+1. Loses `getState()`, `setState()`, `subscribe()` methods
+1. Conflicts with global Zustand mock behavior
+1. Requires manual maintenance of store API
+1. Tests don't reflect actual store behavior
+
+### When Manual Store Mocking is Necessary
+
+In rare cases where the store has complex initialization or side effects, you can mock it, but ensure you provide the full store API:
+
+```typescript
+// If you MUST mock (rare), include full store API
+const mockStore = {
+ appDetail: { id: 'test', name: 'Test' },
+ setAppDetail: vi.fn(),
+}
+
+vi.mock('@/app/components/app/store', () => ({
+ useStore: Object.assign(
+ (selector: (state: typeof mockStore) => unknown) => selector(mockStore),
+ {
+ getState: () => mockStore,
+ setState: vi.fn(),
+ subscribe: vi.fn(),
+ },
+ ),
+}))
+```
+
+### Store Testing Decision Tree
+
+```
+Need to test a component using Zustand store?
+│
+├─ Can you use the real store?
+│ └─ YES → Use real store + setState (RECOMMENDED)
+│ useAppStore.setState({ ... })
+│
+├─ Does the store have complex initialization/side effects?
+│ └─ YES → Consider mocking, but include full API
+│ (getState, setState, subscribe)
+│
+└─ Are you testing the store itself (not a component)?
+ └─ YES → Test store directly with getState/setState
+ const store = useMyStore
+ store.setState({ count: 0 })
+ store.getState().increment()
+ expect(store.getState().count).toBe(1)
+```
+
+### Example: Testing Store Actions
+
+```typescript
+import { useCounterStore } from '@/stores/counter'
+
+describe('Counter Store', () => {
+ it('should increment count', () => {
+ // Initial state (auto-reset by global mock)
+ expect(useCounterStore.getState().count).toBe(0)
+
+ // Call action
+ useCounterStore.getState().increment()
+
+ // Verify state change
+ expect(useCounterStore.getState().count).toBe(1)
+ })
+
+ it('should reset to initial state', () => {
+ // Set some state
+ useCounterStore.setState({ count: 100 })
+ expect(useCounterStore.getState().count).toBe(100)
+
+ // After this test, global mock will reset to initial state
+ })
+})
+```
+
## Factory Function Pattern
```typescript
diff --git a/.claude/skills/orpc-contract-first/SKILL.md b/.claude/skills/orpc-contract-first/SKILL.md
new file mode 100644
index 0000000000..4e3bfc7a37
--- /dev/null
+++ b/.claude/skills/orpc-contract-first/SKILL.md
@@ -0,0 +1,46 @@
+---
+name: orpc-contract-first
+description: Guide for implementing oRPC contract-first API patterns in Dify frontend. Triggers when creating new API contracts, adding service endpoints, integrating TanStack Query with typed contracts, or migrating legacy service calls to oRPC. Use for all API layer work in web/contract and web/service directories.
+---
+
+# oRPC Contract-First Development
+
+## Project Structure
+
+```
+web/contract/
+├── base.ts # Base contract (inputStructure: 'detailed')
+├── router.ts # Router composition & type exports
+├── marketplace.ts # Marketplace contracts
+└── console/ # Console contracts by domain
+ ├── system.ts
+ └── billing.ts
+```
+
+## Workflow
+
+1. **Create contract** in `web/contract/console/{domain}.ts`
+ - Import `base` from `../base` and `type` from `@orpc/contract`
+ - Define route with `path`, `method`, `input`, `output`
+
+2. **Register in router** at `web/contract/router.ts`
+ - Import directly from domain file (no barrel files)
+ - Nest by API prefix: `billing: { invoices, bindPartnerStack }`
+
+3. **Create hooks** in `web/service/use-{domain}.ts`
+ - Use `consoleQuery.{group}.{contract}.queryKey()` for query keys
+ - Use `consoleClient.{group}.{contract}()` for API calls
+
+## Key Rules
+
+- **Input structure**: Always use `{ params, query?, body? }` format
+- **Path params**: Use `{paramName}` in path, match in `params` object
+- **Router nesting**: Group by API prefix (e.g., `/billing/*` → `billing: {}`)
+- **No barrel files**: Import directly from specific files
+- **Types**: Import from `@/types/`, use `type()` helper
+
+## Type Export
+
+```typescript
+export type ConsoleInputs = InferContractRouterInputs
+```
diff --git a/.claude/skills/vercel-react-best-practices/AGENTS.md b/.claude/skills/vercel-react-best-practices/AGENTS.md
new file mode 100644
index 0000000000..f9b9e99c44
--- /dev/null
+++ b/.claude/skills/vercel-react-best-practices/AGENTS.md
@@ -0,0 +1,2410 @@
+# React Best Practices
+
+**Version 1.0.0**
+Vercel Engineering
+January 2026
+
+> **Note:**
+> This document is mainly for agents and LLMs to follow when maintaining,
+> generating, or refactoring React and Next.js codebases at Vercel. Humans
+> may also find it useful, but guidance here is optimized for automation
+> and consistency by AI-assisted workflows.
+
+---
+
+## Abstract
+
+Comprehensive performance optimization guide for React and Next.js applications, designed for AI agents and LLMs. Contains 40+ rules across 8 categories, prioritized by impact from critical (eliminating waterfalls, reducing bundle size) to incremental (advanced patterns). Each rule includes detailed explanations, real-world examples comparing incorrect vs. correct implementations, and specific impact metrics to guide automated refactoring and code generation.
+
+---
+
+## Table of Contents
+
+1. [Eliminating Waterfalls](#1-eliminating-waterfalls) — **CRITICAL**
+ - 1.1 [Defer Await Until Needed](#11-defer-await-until-needed)
+ - 1.2 [Dependency-Based Parallelization](#12-dependency-based-parallelization)
+ - 1.3 [Prevent Waterfall Chains in API Routes](#13-prevent-waterfall-chains-in-api-routes)
+ - 1.4 [Promise.all() for Independent Operations](#14-promiseall-for-independent-operations)
+ - 1.5 [Strategic Suspense Boundaries](#15-strategic-suspense-boundaries)
+2. [Bundle Size Optimization](#2-bundle-size-optimization) — **CRITICAL**
+ - 2.1 [Avoid Barrel File Imports](#21-avoid-barrel-file-imports)
+ - 2.2 [Conditional Module Loading](#22-conditional-module-loading)
+ - 2.3 [Defer Non-Critical Third-Party Libraries](#23-defer-non-critical-third-party-libraries)
+ - 2.4 [Dynamic Imports for Heavy Components](#24-dynamic-imports-for-heavy-components)
+ - 2.5 [Preload Based on User Intent](#25-preload-based-on-user-intent)
+3. [Server-Side Performance](#3-server-side-performance) — **HIGH**
+ - 3.1 [Cross-Request LRU Caching](#31-cross-request-lru-caching)
+ - 3.2 [Minimize Serialization at RSC Boundaries](#32-minimize-serialization-at-rsc-boundaries)
+ - 3.3 [Parallel Data Fetching with Component Composition](#33-parallel-data-fetching-with-component-composition)
+ - 3.4 [Per-Request Deduplication with React.cache()](#34-per-request-deduplication-with-reactcache)
+ - 3.5 [Use after() for Non-Blocking Operations](#35-use-after-for-non-blocking-operations)
+4. [Client-Side Data Fetching](#4-client-side-data-fetching) — **MEDIUM-HIGH**
+ - 4.1 [Deduplicate Global Event Listeners](#41-deduplicate-global-event-listeners)
+ - 4.2 [Use Passive Event Listeners for Scrolling Performance](#42-use-passive-event-listeners-for-scrolling-performance)
+ - 4.3 [Use SWR for Automatic Deduplication](#43-use-swr-for-automatic-deduplication)
+ - 4.4 [Version and Minimize localStorage Data](#44-version-and-minimize-localstorage-data)
+5. [Re-render Optimization](#5-re-render-optimization) — **MEDIUM**
+ - 5.1 [Defer State Reads to Usage Point](#51-defer-state-reads-to-usage-point)
+ - 5.2 [Extract to Memoized Components](#52-extract-to-memoized-components)
+ - 5.3 [Narrow Effect Dependencies](#53-narrow-effect-dependencies)
+ - 5.4 [Subscribe to Derived State](#54-subscribe-to-derived-state)
+ - 5.5 [Use Functional setState Updates](#55-use-functional-setstate-updates)
+ - 5.6 [Use Lazy State Initialization](#56-use-lazy-state-initialization)
+ - 5.7 [Use Transitions for Non-Urgent Updates](#57-use-transitions-for-non-urgent-updates)
+6. [Rendering Performance](#6-rendering-performance) — **MEDIUM**
+ - 6.1 [Animate SVG Wrapper Instead of SVG Element](#61-animate-svg-wrapper-instead-of-svg-element)
+ - 6.2 [CSS content-visibility for Long Lists](#62-css-content-visibility-for-long-lists)
+ - 6.3 [Hoist Static JSX Elements](#63-hoist-static-jsx-elements)
+ - 6.4 [Optimize SVG Precision](#64-optimize-svg-precision)
+ - 6.5 [Prevent Hydration Mismatch Without Flickering](#65-prevent-hydration-mismatch-without-flickering)
+ - 6.6 [Use Activity Component for Show/Hide](#66-use-activity-component-for-showhide)
+ - 6.7 [Use Explicit Conditional Rendering](#67-use-explicit-conditional-rendering)
+7. [JavaScript Performance](#7-javascript-performance) — **LOW-MEDIUM**
+ - 7.1 [Batch DOM CSS Changes](#71-batch-dom-css-changes)
+ - 7.2 [Build Index Maps for Repeated Lookups](#72-build-index-maps-for-repeated-lookups)
+ - 7.3 [Cache Property Access in Loops](#73-cache-property-access-in-loops)
+ - 7.4 [Cache Repeated Function Calls](#74-cache-repeated-function-calls)
+ - 7.5 [Cache Storage API Calls](#75-cache-storage-api-calls)
+ - 7.6 [Combine Multiple Array Iterations](#76-combine-multiple-array-iterations)
+ - 7.7 [Early Length Check for Array Comparisons](#77-early-length-check-for-array-comparisons)
+ - 7.8 [Early Return from Functions](#78-early-return-from-functions)
+ - 7.9 [Hoist RegExp Creation](#79-hoist-regexp-creation)
+ - 7.10 [Use Loop for Min/Max Instead of Sort](#710-use-loop-for-minmax-instead-of-sort)
+ - 7.11 [Use Set/Map for O(1) Lookups](#711-use-setmap-for-o1-lookups)
+ - 7.12 [Use toSorted() Instead of sort() for Immutability](#712-use-tosorted-instead-of-sort-for-immutability)
+8. [Advanced Patterns](#8-advanced-patterns) — **LOW**
+ - 8.1 [Store Event Handlers in Refs](#81-store-event-handlers-in-refs)
+ - 8.2 [useLatest for Stable Callback Refs](#82-uselatest-for-stable-callback-refs)
+
+---
+
+## 1. Eliminating Waterfalls
+
+**Impact: CRITICAL**
+
+Waterfalls are the #1 performance killer. Each sequential await adds full network latency. Eliminating them yields the largest gains.
+
+### 1.1 Defer Await Until Needed
+
+**Impact: HIGH (avoids blocking unused code paths)**
+
+Move `await` operations into the branches where they're actually used to avoid blocking code paths that don't need them.
+
+**Incorrect: blocks both branches**
+
+```typescript
+async function handleRequest(userId: string, skipProcessing: boolean) {
+ const userData = await fetchUserData(userId)
+
+ if (skipProcessing) {
+ // Returns immediately but still waited for userData
+ return { skipped: true }
+ }
+
+ // Only this branch uses userData
+ return processUserData(userData)
+}
+```
+
+**Correct: only blocks when needed**
+
+```typescript
+async function handleRequest(userId: string, skipProcessing: boolean) {
+ if (skipProcessing) {
+ // Returns immediately without waiting
+ return { skipped: true }
+ }
+
+ // Fetch only when needed
+ const userData = await fetchUserData(userId)
+ return processUserData(userData)
+}
+```
+
+**Another example: early return optimization**
+
+```typescript
+// Incorrect: always fetches permissions
+async function updateResource(resourceId: string, userId: string) {
+ const permissions = await fetchPermissions(userId)
+ const resource = await getResource(resourceId)
+
+ if (!resource) {
+ return { error: 'Not found' }
+ }
+
+ if (!permissions.canEdit) {
+ return { error: 'Forbidden' }
+ }
+
+ return await updateResourceData(resource, permissions)
+}
+
+// Correct: fetches only when needed
+async function updateResource(resourceId: string, userId: string) {
+ const resource = await getResource(resourceId)
+
+ if (!resource) {
+ return { error: 'Not found' }
+ }
+
+ const permissions = await fetchPermissions(userId)
+
+ if (!permissions.canEdit) {
+ return { error: 'Forbidden' }
+ }
+
+ return await updateResourceData(resource, permissions)
+}
+```
+
+This optimization is especially valuable when the skipped branch is frequently taken, or when the deferred operation is expensive.
+
+### 1.2 Dependency-Based Parallelization
+
+**Impact: CRITICAL (2-10× improvement)**
+
+For operations with partial dependencies, use `better-all` to maximize parallelism. It automatically starts each task at the earliest possible moment.
+
+**Incorrect: profile waits for config unnecessarily**
+
+```typescript
+const [user, config] = await Promise.all([
+ fetchUser(),
+ fetchConfig()
+])
+const profile = await fetchProfile(user.id)
+```
+
+**Correct: config and profile run in parallel**
+
+```typescript
+import { all } from 'better-all'
+
+const { user, config, profile } = await all({
+ async user() { return fetchUser() },
+ async config() { return fetchConfig() },
+ async profile() {
+ return fetchProfile((await this.$.user).id)
+ }
+})
+```
+
+Reference: [https://github.com/shuding/better-all](https://github.com/shuding/better-all)
+
+### 1.3 Prevent Waterfall Chains in API Routes
+
+**Impact: CRITICAL (2-10× improvement)**
+
+In API routes and Server Actions, start independent operations immediately, even if you don't await them yet.
+
+**Incorrect: config waits for auth, data waits for both**
+
+```typescript
+export async function GET(request: Request) {
+ const session = await auth()
+ const config = await fetchConfig()
+ const data = await fetchData(session.user.id)
+ return Response.json({ data, config })
+}
+```
+
+**Correct: auth and config start immediately**
+
+```typescript
+export async function GET(request: Request) {
+ const sessionPromise = auth()
+ const configPromise = fetchConfig()
+ const session = await sessionPromise
+ const [config, data] = await Promise.all([
+ configPromise,
+ fetchData(session.user.id)
+ ])
+ return Response.json({ data, config })
+}
+```
+
+For operations with more complex dependency chains, use `better-all` to automatically maximize parallelism (see Dependency-Based Parallelization).
+
+### 1.4 Promise.all() for Independent Operations
+
+**Impact: CRITICAL (2-10× improvement)**
+
+When async operations have no interdependencies, execute them concurrently using `Promise.all()`.
+
+**Incorrect: sequential execution, 3 round trips**
+
+```typescript
+const user = await fetchUser()
+const posts = await fetchPosts()
+const comments = await fetchComments()
+```
+
+**Correct: parallel execution, 1 round trip**
+
+```typescript
+const [user, posts, comments] = await Promise.all([
+ fetchUser(),
+ fetchPosts(),
+ fetchComments()
+])
+```
+
+### 1.5 Strategic Suspense Boundaries
+
+**Impact: HIGH (faster initial paint)**
+
+Instead of awaiting data in async components before returning JSX, use Suspense boundaries to show the wrapper UI faster while data loads.
+
+**Incorrect: wrapper blocked by data fetching**
+
+```tsx
+async function Page() {
+ const data = await fetchData() // Blocks entire page
+
+ return (
+
+
Sidebar
+
Header
+
+
+
+
Footer
+
+ )
+}
+```
+
+The entire layout waits for data even though only the middle section needs it.
+
+**Correct: wrapper shows immediately, data streams in**
+
+```tsx
+function Page() {
+ return (
+
+
Sidebar
+
Header
+
+ }>
+
+
+
+
Footer
+
+ )
+}
+
+async function DataDisplay() {
+ const data = await fetchData() // Only blocks this component
+ return
{data.content}
+}
+```
+
+Sidebar, Header, and Footer render immediately. Only DataDisplay waits for data.
+
+**Alternative: share promise across components**
+
+```tsx
+function Page() {
+ // Start fetch immediately, but don't await
+ const dataPromise = fetchData()
+
+ return (
+
+}
+
+function DataSummary({ dataPromise }: { dataPromise: Promise }) {
+ const data = use(dataPromise) // Reuses the same promise
+ return
{data.summary}
+}
+```
+
+Both components share the same promise, so only one fetch occurs. Layout renders immediately while both components wait together.
+
+**When NOT to use this pattern:**
+
+- Critical data needed for layout decisions (affects positioning)
+
+- SEO-critical content above the fold
+
+- Small, fast queries where suspense overhead isn't worth it
+
+- When you want to avoid layout shift (loading → content jump)
+
+**Trade-off:** Faster initial paint vs potential layout shift. Choose based on your UX priorities.
+
+---
+
+## 2. Bundle Size Optimization
+
+**Impact: CRITICAL**
+
+Reducing initial bundle size improves Time to Interactive and Largest Contentful Paint.
+
+### 2.1 Avoid Barrel File Imports
+
+**Impact: CRITICAL (200-800ms import cost, slow builds)**
+
+Import directly from source files instead of barrel files to avoid loading thousands of unused modules. **Barrel files** are entry points that re-export multiple modules (e.g., `index.js` that does `export * from './module'`).
+
+Popular icon and component libraries can have **up to 10,000 re-exports** in their entry file. For many React packages, **it takes 200-800ms just to import them**, affecting both development speed and production cold starts.
+
+**Why tree-shaking doesn't help:** When a library is marked as external (not bundled), the bundler can't optimize it. If you bundle it to enable tree-shaking, builds become substantially slower analyzing the entire module graph.
+
+**Incorrect: imports entire library**
+
+```tsx
+import { Check, X, Menu } from 'lucide-react'
+// Loads 1,583 modules, takes ~2.8s extra in dev
+// Runtime cost: 200-800ms on every cold start
+
+import { Button, TextField } from '@mui/material'
+// Loads 2,225 modules, takes ~4.2s extra in dev
+```
+
+**Correct: imports only what you need**
+
+```tsx
+import Check from 'lucide-react/dist/esm/icons/check'
+import X from 'lucide-react/dist/esm/icons/x'
+import Menu from 'lucide-react/dist/esm/icons/menu'
+// Loads only 3 modules (~2KB vs ~1MB)
+
+import Button from '@mui/material/Button'
+import TextField from '@mui/material/TextField'
+// Loads only what you use
+```
+
+**Alternative: Next.js 13.5+**
+
+```js
+// next.config.js - use optimizePackageImports
+module.exports = {
+ experimental: {
+ optimizePackageImports: ['lucide-react', '@mui/material']
+ }
+}
+
+// Then you can keep the ergonomic barrel imports:
+import { Check, X, Menu } from 'lucide-react'
+// Automatically transformed to direct imports at build time
+```
+
+Direct imports provide 15-70% faster dev boot, 28% faster builds, 40% faster cold starts, and significantly faster HMR.
+
+Libraries commonly affected: `lucide-react`, `@mui/material`, `@mui/icons-material`, `@tabler/icons-react`, `react-icons`, `@headlessui/react`, `@radix-ui/react-*`, `lodash`, `ramda`, `date-fns`, `rxjs`, `react-use`.
+
+Reference: [https://vercel.com/blog/how-we-optimized-package-imports-in-next-js](https://vercel.com/blog/how-we-optimized-package-imports-in-next-js)
+
+### 2.2 Conditional Module Loading
+
+**Impact: HIGH (loads large data only when needed)**
+
+Load large data or modules only when a feature is activated.
+
+**Example: lazy-load animation frames**
+
+```tsx
+function AnimationPlayer({ enabled, setEnabled }: { enabled: boolean; setEnabled: React.Dispatch> }) {
+ const [frames, setFrames] = useState(null)
+
+ useEffect(() => {
+ if (enabled && !frames && typeof window !== 'undefined') {
+ import('./animation-frames.js')
+ .then(mod => setFrames(mod.frames))
+ .catch(() => setEnabled(false))
+ }
+ }, [enabled, frames, setEnabled])
+
+ if (!frames) return
+ return
+}
+```
+
+The `typeof window !== 'undefined'` check prevents bundling this module for SSR, optimizing server bundle size and build speed.
+
+### 2.3 Defer Non-Critical Third-Party Libraries
+
+**Impact: MEDIUM (loads after hydration)**
+
+Analytics, logging, and error tracking don't block user interaction. Load them after hydration.
+
+**Incorrect: blocks initial bundle**
+
+```tsx
+import { Analytics } from '@vercel/analytics/react'
+
+export default function RootLayout({ children }) {
+ return (
+
+
+ {children}
+
+
+
+ )
+}
+```
+
+**Correct: loads after hydration**
+
+```tsx
+import dynamic from 'next/dynamic'
+
+const Analytics = dynamic(
+ () => import('@vercel/analytics/react').then(m => m.Analytics),
+ { ssr: false }
+)
+
+export default function RootLayout({ children }) {
+ return (
+
+
+ {children}
+
+
+
+ )
+}
+```
+
+### 2.4 Dynamic Imports for Heavy Components
+
+**Impact: CRITICAL (directly affects TTI and LCP)**
+
+Use `next/dynamic` to lazy-load large components not needed on initial render.
+
+**Incorrect: Monaco bundles with main chunk ~300KB**
+
+```tsx
+import { MonacoEditor } from './monaco-editor'
+
+function CodePanel({ code }: { code: string }) {
+ return
+}
+```
+
+**Correct: Monaco loads on demand**
+
+```tsx
+import dynamic from 'next/dynamic'
+
+const MonacoEditor = dynamic(
+ () => import('./monaco-editor').then(m => m.MonacoEditor),
+ { ssr: false }
+)
+
+function CodePanel({ code }: { code: string }) {
+ return
+}
+```
+
+### 2.5 Preload Based on User Intent
+
+**Impact: MEDIUM (reduces perceived latency)**
+
+Preload heavy bundles before they're needed to reduce perceived latency.
+
+**Example: preload on hover/focus**
+
+```tsx
+function EditorButton({ onClick }: { onClick: () => void }) {
+ const preload = () => {
+ if (typeof window !== 'undefined') {
+ void import('./monaco-editor')
+ }
+ }
+
+ return (
+
+ )
+}
+```
+
+**Example: preload when feature flag is enabled**
+
+```tsx
+function FlagsProvider({ children, flags }: Props) {
+ useEffect(() => {
+ if (flags.editorEnabled && typeof window !== 'undefined') {
+ void import('./monaco-editor').then(mod => mod.init())
+ }
+ }, [flags.editorEnabled])
+
+ return
+ {children}
+
+}
+```
+
+The `typeof window !== 'undefined'` check prevents bundling preloaded modules for SSR, optimizing server bundle size and build speed.
+
+---
+
+## 3. Server-Side Performance
+
+**Impact: HIGH**
+
+Optimizing server-side rendering and data fetching eliminates server-side waterfalls and reduces response times.
+
+### 3.1 Cross-Request LRU Caching
+
+**Impact: HIGH (caches across requests)**
+
+`React.cache()` only works within one request. For data shared across sequential requests (user clicks button A then button B), use an LRU cache.
+
+**Implementation:**
+
+```typescript
+import { LRUCache } from 'lru-cache'
+
+const cache = new LRUCache({
+ max: 1000,
+ ttl: 5 * 60 * 1000 // 5 minutes
+})
+
+export async function getUser(id: string) {
+ const cached = cache.get(id)
+ if (cached) return cached
+
+ const user = await db.user.findUnique({ where: { id } })
+ cache.set(id, user)
+ return user
+}
+
+// Request 1: DB query, result cached
+// Request 2: cache hit, no DB query
+```
+
+Use when sequential user actions hit multiple endpoints needing the same data within seconds.
+
+**With Vercel's [Fluid Compute](https://vercel.com/docs/fluid-compute):** LRU caching is especially effective because multiple concurrent requests can share the same function instance and cache. This means the cache persists across requests without needing external storage like Redis.
+
+**In traditional serverless:** Each invocation runs in isolation, so consider Redis for cross-process caching.
+
+Reference: [https://github.com/isaacs/node-lru-cache](https://github.com/isaacs/node-lru-cache)
+
+### 3.2 Minimize Serialization at RSC Boundaries
+
+**Impact: HIGH (reduces data transfer size)**
+
+The React Server/Client boundary serializes all object properties into strings and embeds them in the HTML response and subsequent RSC requests. This serialized data directly impacts page weight and load time, so **size matters a lot**. Only pass fields that the client actually uses.
+
+**Incorrect: serializes all 50 fields**
+
+```tsx
+async function Page() {
+ const user = await fetchUser() // 50 fields
+ return
+}
+
+'use client'
+function Profile({ user }: { user: User }) {
+ return
{user.name}
// uses 1 field
+}
+```
+
+**Correct: serializes only 1 field**
+
+```tsx
+async function Page() {
+ const user = await fetchUser()
+ return
+}
+
+'use client'
+function Profile({ name }: { name: string }) {
+ return
{name}
+}
+```
+
+### 3.3 Parallel Data Fetching with Component Composition
+
+**Impact: CRITICAL (eliminates server-side waterfalls)**
+
+React Server Components execute sequentially within a tree. Restructure with composition to parallelize data fetching.
+
+**Incorrect: Sidebar waits for Page's fetch to complete**
+
+```tsx
+export default async function Page() {
+ const header = await fetchHeader()
+ return (
+
+
{header}
+
+
+ )
+}
+
+async function Sidebar() {
+ const items = await fetchSidebarItems()
+ return
+}
+```
+
+**Correct: both fetch simultaneously**
+
+```tsx
+async function Header() {
+ const data = await fetchHeader()
+ return
+ )
+}
+```
+
+This is especially helpful for large and static SVG nodes, which can be expensive to recreate on every render.
+
+**Note:** If your project has [React Compiler](https://react.dev/learn/react-compiler) enabled, the compiler automatically hoists static JSX elements and optimizes component re-renders, making manual hoisting unnecessary.
+
+### 6.4 Optimize SVG Precision
+
+**Impact: LOW (reduces file size)**
+
+Reduce SVG coordinate precision to decrease file size. The optimal precision depends on the viewBox size, but in general reducing precision should be considered.
+
+**Incorrect: excessive precision**
+
+```svg
+
+```
+
+**Correct: 1 decimal place**
+
+```svg
+
+```
+
+**Automate with SVGO:**
+
+```bash
+npx svgo --precision=1 --multipass icon.svg
+```
+
+### 6.5 Prevent Hydration Mismatch Without Flickering
+
+**Impact: MEDIUM (avoids visual flicker and hydration errors)**
+
+When rendering content that depends on client-side storage (localStorage, cookies), avoid both SSR breakage and post-hydration flickering by injecting a synchronous script that updates the DOM before React hydrates.
+
+**Incorrect: breaks SSR**
+
+```tsx
+function ThemeWrapper({ children }: { children: ReactNode }) {
+ // localStorage is not available on server - throws error
+ const theme = localStorage.getItem('theme') || 'light'
+
+ return (
+
+ )
+}
+```
+
+Component first renders with default value (`light`), then updates after hydration, causing a visible flash of incorrect content.
+
+**Correct: no flicker, no hydration mismatch**
+
+```tsx
+function ThemeWrapper({ children }: { children: ReactNode }) {
+ return (
+ <>
+
+ {children}
+
+
+ >
+ )
+}
+```
+
+The inline script executes synchronously before showing the element, ensuring the DOM already has the correct value. No flickering, no hydration mismatch.
+
+This pattern is especially useful for theme toggles, user preferences, authentication states, and any client-only data that should render immediately without flashing default values.
+
+### 6.6 Use Activity Component for Show/Hide
+
+**Impact: MEDIUM (preserves state/DOM)**
+
+Use React's `` to preserve state/DOM for expensive components that frequently toggle visibility.
+
+**Usage:**
+
+```tsx
+import { Activity } from 'react'
+
+function Dropdown({ isOpen }: Props) {
+ return (
+
+
+
+ )
+}
+```
+
+Avoids expensive re-renders and state loss.
+
+### 6.7 Use Explicit Conditional Rendering
+
+**Impact: LOW (prevents rendering 0 or NaN)**
+
+Use explicit ternary operators (`? :`) instead of `&&` for conditional rendering when the condition can be `0`, `NaN`, or other falsy values that render.
+
+**Incorrect: renders "0" when count is 0**
+
+```tsx
+function Badge({ count }: { count: number }) {
+ return (
+
+ {count && {count}}
+
+ )
+}
+
+// When count = 0, renders:
0
+// When count = 5, renders:
5
+```
+
+**Correct: renders nothing when count is 0**
+
+```tsx
+function Badge({ count }: { count: number }) {
+ return (
+
+ {count > 0 ? {count} : null}
+
+ )
+}
+
+// When count = 0, renders:
+// When count = 5, renders:
5
+```
+
+---
+
+## 7. JavaScript Performance
+
+**Impact: LOW-MEDIUM**
+
+Micro-optimizations for hot paths can add up to meaningful improvements.
+
+### 7.1 Batch DOM CSS Changes
+
+**Impact: MEDIUM (reduces reflows/repaints)**
+
+Avoid changing styles one property at a time. Group multiple CSS changes together via classes or `cssText` to minimize browser reflows.
+
+**Incorrect: multiple reflows**
+
+```typescript
+function updateElementStyles(element: HTMLElement) {
+ // Each line triggers a reflow
+ element.style.width = '100px'
+ element.style.height = '200px'
+ element.style.backgroundColor = 'blue'
+ element.style.border = '1px solid black'
+}
+```
+
+**Correct: add class - single reflow**
+
+```typescript
+// CSS file
+.highlighted-box {
+ width: 100px;
+ height: 200px;
+ background-color: blue;
+ border: 1px solid black;
+}
+
+// JavaScript
+function updateElementStyles(element: HTMLElement) {
+ element.classList.add('highlighted-box')
+}
+```
+
+**Correct: change cssText - single reflow**
+
+```typescript
+function updateElementStyles(element: HTMLElement) {
+ element.style.cssText = `
+ width: 100px;
+ height: 200px;
+ background-color: blue;
+ border: 1px solid black;
+ `
+}
+```
+
+**React example:**
+
+```tsx
+// Incorrect: changing styles one by one
+function Box({ isHighlighted }: { isHighlighted: boolean }) {
+ const ref = useRef(null)
+
+ useEffect(() => {
+ if (ref.current && isHighlighted) {
+ ref.current.style.width = '100px'
+ ref.current.style.height = '200px'
+ ref.current.style.backgroundColor = 'blue'
+ }
+ }, [isHighlighted])
+
+ return
+}
+```
+
+**Why this matters in React:**
+
+1. Props/state mutations break React's immutability model - React expects props and state to be treated as read-only
+
+2. Causes stale closure bugs - Mutating arrays inside closures (callbacks, effects) can lead to unexpected behavior
+
+**Browser support: fallback for older browsers**
+
+```typescript
+// Fallback for older browsers
+const sorted = [...items].sort((a, b) => a.value - b.value)
+```
+
+`.toSorted()` is available in all modern browsers (Chrome 110+, Safari 16+, Firefox 115+, Node.js 20+). For older environments, use spread operator:
+
+**Other immutable array methods:**
+
+- `.toSorted()` - immutable sort
+
+- `.toReversed()` - immutable reverse
+
+- `.toSpliced()` - immutable splice
+
+- `.with()` - immutable element replacement
+
+---
+
+## 8. Advanced Patterns
+
+**Impact: LOW**
+
+Advanced patterns for specific cases that require careful implementation.
+
+### 8.1 Store Event Handlers in Refs
+
+**Impact: LOW (stable subscriptions)**
+
+Store callbacks in refs when used in effects that shouldn't re-subscribe on callback changes.
+
+**Incorrect: re-subscribes on every render**
+
+```tsx
+function useWindowEvent(event: string, handler: () => void) {
+ useEffect(() => {
+ window.addEventListener(event, handler)
+ return () => window.removeEventListener(event, handler)
+ }, [event, handler])
+}
+```
+
+**Correct: stable subscription**
+
+```tsx
+import { useEffectEvent } from 'react'
+
+function useWindowEvent(event: string, handler: () => void) {
+ const onEvent = useEffectEvent(handler)
+
+ useEffect(() => {
+ window.addEventListener(event, onEvent)
+ return () => window.removeEventListener(event, onEvent)
+ }, [event])
+}
+```
+
+**Alternative: use `useEffectEvent` if you're on latest React:**
+
+`useEffectEvent` provides a cleaner API for the same pattern: it creates a stable function reference that always calls the latest version of the handler.
+
+### 8.2 useLatest for Stable Callback Refs
+
+**Impact: LOW (prevents effect re-runs)**
+
+Access latest values in callbacks without adding them to dependency arrays. Prevents effect re-runs while avoiding stale closures.
+
+**Implementation:**
+
+```typescript
+function useLatest(value: T) {
+ const ref = useRef(value)
+ useEffect(() => {
+ ref.current = value
+ }, [value])
+ return ref
+}
+```
+
+**Incorrect: effect re-runs on every callback change**
+
+```tsx
+function SearchInput({ onSearch }: { onSearch: (q: string) => void }) {
+ const [query, setQuery] = useState('')
+
+ useEffect(() => {
+ const timeout = setTimeout(() => onSearch(query), 300)
+ return () => clearTimeout(timeout)
+ }, [query, onSearch])
+}
+```
+
+**Correct: stable effect, fresh callback**
+
+```tsx
+function SearchInput({ onSearch }: { onSearch: (q: string) => void }) {
+ const [query, setQuery] = useState('')
+ const onSearchRef = useLatest(onSearch)
+
+ useEffect(() => {
+ const timeout = setTimeout(() => onSearchRef.current(query), 300)
+ return () => clearTimeout(timeout)
+ }, [query])
+}
+```
+
+---
+
+## References
+
+1. [https://react.dev](https://react.dev)
+2. [https://nextjs.org](https://nextjs.org)
+3. [https://swr.vercel.app](https://swr.vercel.app)
+4. [https://github.com/shuding/better-all](https://github.com/shuding/better-all)
+5. [https://github.com/isaacs/node-lru-cache](https://github.com/isaacs/node-lru-cache)
+6. [https://vercel.com/blog/how-we-optimized-package-imports-in-next-js](https://vercel.com/blog/how-we-optimized-package-imports-in-next-js)
+7. [https://vercel.com/blog/how-we-made-the-vercel-dashboard-twice-as-fast](https://vercel.com/blog/how-we-made-the-vercel-dashboard-twice-as-fast)
diff --git a/.claude/skills/vercel-react-best-practices/SKILL.md b/.claude/skills/vercel-react-best-practices/SKILL.md
new file mode 100644
index 0000000000..b064716f60
--- /dev/null
+++ b/.claude/skills/vercel-react-best-practices/SKILL.md
@@ -0,0 +1,125 @@
+---
+name: vercel-react-best-practices
+description: React and Next.js performance optimization guidelines from Vercel Engineering. This skill should be used when writing, reviewing, or refactoring React/Next.js code to ensure optimal performance patterns. Triggers on tasks involving React components, Next.js pages, data fetching, bundle optimization, or performance improvements.
+license: MIT
+metadata:
+ author: vercel
+ version: "1.0.0"
+---
+
+# Vercel React Best Practices
+
+Comprehensive performance optimization guide for React and Next.js applications, maintained by Vercel. Contains 45 rules across 8 categories, prioritized by impact to guide automated refactoring and code generation.
+
+## When to Apply
+
+Reference these guidelines when:
+- Writing new React components or Next.js pages
+- Implementing data fetching (client or server-side)
+- Reviewing code for performance issues
+- Refactoring existing React/Next.js code
+- Optimizing bundle size or load times
+
+## Rule Categories by Priority
+
+| Priority | Category | Impact | Prefix |
+|----------|----------|--------|--------|
+| 1 | Eliminating Waterfalls | CRITICAL | `async-` |
+| 2 | Bundle Size Optimization | CRITICAL | `bundle-` |
+| 3 | Server-Side Performance | HIGH | `server-` |
+| 4 | Client-Side Data Fetching | MEDIUM-HIGH | `client-` |
+| 5 | Re-render Optimization | MEDIUM | `rerender-` |
+| 6 | Rendering Performance | MEDIUM | `rendering-` |
+| 7 | JavaScript Performance | LOW-MEDIUM | `js-` |
+| 8 | Advanced Patterns | LOW | `advanced-` |
+
+## Quick Reference
+
+### 1. Eliminating Waterfalls (CRITICAL)
+
+- `async-defer-await` - Move await into branches where actually used
+- `async-parallel` - Use Promise.all() for independent operations
+- `async-dependencies` - Use better-all for partial dependencies
+- `async-api-routes` - Start promises early, await late in API routes
+- `async-suspense-boundaries` - Use Suspense to stream content
+
+### 2. Bundle Size Optimization (CRITICAL)
+
+- `bundle-barrel-imports` - Import directly, avoid barrel files
+- `bundle-dynamic-imports` - Use next/dynamic for heavy components
+- `bundle-defer-third-party` - Load analytics/logging after hydration
+- `bundle-conditional` - Load modules only when feature is activated
+- `bundle-preload` - Preload on hover/focus for perceived speed
+
+### 3. Server-Side Performance (HIGH)
+
+- `server-cache-react` - Use React.cache() for per-request deduplication
+- `server-cache-lru` - Use LRU cache for cross-request caching
+- `server-serialization` - Minimize data passed to client components
+- `server-parallel-fetching` - Restructure components to parallelize fetches
+- `server-after-nonblocking` - Use after() for non-blocking operations
+
+### 4. Client-Side Data Fetching (MEDIUM-HIGH)
+
+- `client-swr-dedup` - Use SWR for automatic request deduplication
+- `client-event-listeners` - Deduplicate global event listeners
+
+### 5. Re-render Optimization (MEDIUM)
+
+- `rerender-defer-reads` - Don't subscribe to state only used in callbacks
+- `rerender-memo` - Extract expensive work into memoized components
+- `rerender-dependencies` - Use primitive dependencies in effects
+- `rerender-derived-state` - Subscribe to derived booleans, not raw values
+- `rerender-functional-setstate` - Use functional setState for stable callbacks
+- `rerender-lazy-state-init` - Pass function to useState for expensive values
+- `rerender-transitions` - Use startTransition for non-urgent updates
+
+### 6. Rendering Performance (MEDIUM)
+
+- `rendering-animate-svg-wrapper` - Animate div wrapper, not SVG element
+- `rendering-content-visibility` - Use content-visibility for long lists
+- `rendering-hoist-jsx` - Extract static JSX outside components
+- `rendering-svg-precision` - Reduce SVG coordinate precision
+- `rendering-hydration-no-flicker` - Use inline script for client-only data
+- `rendering-activity` - Use Activity component for show/hide
+- `rendering-conditional-render` - Use ternary, not && for conditionals
+
+### 7. JavaScript Performance (LOW-MEDIUM)
+
+- `js-batch-dom-css` - Group CSS changes via classes or cssText
+- `js-index-maps` - Build Map for repeated lookups
+- `js-cache-property-access` - Cache object properties in loops
+- `js-cache-function-results` - Cache function results in module-level Map
+- `js-cache-storage` - Cache localStorage/sessionStorage reads
+- `js-combine-iterations` - Combine multiple filter/map into one loop
+- `js-length-check-first` - Check array length before expensive comparison
+- `js-early-exit` - Return early from functions
+- `js-hoist-regexp` - Hoist RegExp creation outside loops
+- `js-min-max-loop` - Use loop for min/max instead of sort
+- `js-set-map-lookups` - Use Set/Map for O(1) lookups
+- `js-tosorted-immutable` - Use toSorted() for immutability
+
+### 8. Advanced Patterns (LOW)
+
+- `advanced-event-handler-refs` - Store event handlers in refs
+- `advanced-use-latest` - useLatest for stable callback refs
+
+## How to Use
+
+Read individual rule files for detailed explanations and code examples:
+
+```
+rules/async-parallel.md
+rules/bundle-barrel-imports.md
+rules/_sections.md
+```
+
+Each rule file contains:
+- Brief explanation of why it matters
+- Incorrect code example with explanation
+- Correct code example with explanation
+- Additional context and references
+
+## Full Compiled Document
+
+For the complete guide with all rules expanded: `AGENTS.md`
diff --git a/.claude/skills/vercel-react-best-practices/rules/advanced-event-handler-refs.md b/.claude/skills/vercel-react-best-practices/rules/advanced-event-handler-refs.md
new file mode 100644
index 0000000000..97e7ade243
--- /dev/null
+++ b/.claude/skills/vercel-react-best-practices/rules/advanced-event-handler-refs.md
@@ -0,0 +1,55 @@
+---
+title: Store Event Handlers in Refs
+impact: LOW
+impactDescription: stable subscriptions
+tags: advanced, hooks, refs, event-handlers, optimization
+---
+
+## Store Event Handlers in Refs
+
+Store callbacks in refs when used in effects that shouldn't re-subscribe on callback changes.
+
+**Incorrect (re-subscribes on every render):**
+
+```tsx
+function useWindowEvent(event: string, handler: (e) => void) {
+ useEffect(() => {
+ window.addEventListener(event, handler)
+ return () => window.removeEventListener(event, handler)
+ }, [event, handler])
+}
+```
+
+**Correct (stable subscription):**
+
+```tsx
+function useWindowEvent(event: string, handler: (e) => void) {
+ const handlerRef = useRef(handler)
+ useEffect(() => {
+ handlerRef.current = handler
+ }, [handler])
+
+ useEffect(() => {
+ const listener = (e) => handlerRef.current(e)
+ window.addEventListener(event, listener)
+ return () => window.removeEventListener(event, listener)
+ }, [event])
+}
+```
+
+**Alternative: use `useEffectEvent` if you're on latest React:**
+
+```tsx
+import { useEffectEvent } from 'react'
+
+function useWindowEvent(event: string, handler: (e) => void) {
+ const onEvent = useEffectEvent(handler)
+
+ useEffect(() => {
+ window.addEventListener(event, onEvent)
+ return () => window.removeEventListener(event, onEvent)
+ }, [event])
+}
+```
+
+`useEffectEvent` provides a cleaner API for the same pattern: it creates a stable function reference that always calls the latest version of the handler.
diff --git a/.claude/skills/vercel-react-best-practices/rules/advanced-use-latest.md b/.claude/skills/vercel-react-best-practices/rules/advanced-use-latest.md
new file mode 100644
index 0000000000..483c2ef7da
--- /dev/null
+++ b/.claude/skills/vercel-react-best-practices/rules/advanced-use-latest.md
@@ -0,0 +1,49 @@
+---
+title: useLatest for Stable Callback Refs
+impact: LOW
+impactDescription: prevents effect re-runs
+tags: advanced, hooks, useLatest, refs, optimization
+---
+
+## useLatest for Stable Callback Refs
+
+Access latest values in callbacks without adding them to dependency arrays. Prevents effect re-runs while avoiding stale closures.
+
+**Implementation:**
+
+```typescript
+function useLatest(value: T) {
+ const ref = useRef(value)
+ useLayoutEffect(() => {
+ ref.current = value
+ }, [value])
+ return ref
+}
+```
+
+**Incorrect (effect re-runs on every callback change):**
+
+```tsx
+function SearchInput({ onSearch }: { onSearch: (q: string) => void }) {
+ const [query, setQuery] = useState('')
+
+ useEffect(() => {
+ const timeout = setTimeout(() => onSearch(query), 300)
+ return () => clearTimeout(timeout)
+ }, [query, onSearch])
+}
+```
+
+**Correct (stable effect, fresh callback):**
+
+```tsx
+function SearchInput({ onSearch }: { onSearch: (q: string) => void }) {
+ const [query, setQuery] = useState('')
+ const onSearchRef = useLatest(onSearch)
+
+ useEffect(() => {
+ const timeout = setTimeout(() => onSearchRef.current(query), 300)
+ return () => clearTimeout(timeout)
+ }, [query])
+}
+```
diff --git a/.claude/skills/vercel-react-best-practices/rules/async-api-routes.md b/.claude/skills/vercel-react-best-practices/rules/async-api-routes.md
new file mode 100644
index 0000000000..6feda1ef0a
--- /dev/null
+++ b/.claude/skills/vercel-react-best-practices/rules/async-api-routes.md
@@ -0,0 +1,38 @@
+---
+title: Prevent Waterfall Chains in API Routes
+impact: CRITICAL
+impactDescription: 2-10× improvement
+tags: api-routes, server-actions, waterfalls, parallelization
+---
+
+## Prevent Waterfall Chains in API Routes
+
+In API routes and Server Actions, start independent operations immediately, even if you don't await them yet.
+
+**Incorrect (config waits for auth, data waits for both):**
+
+```typescript
+export async function GET(request: Request) {
+ const session = await auth()
+ const config = await fetchConfig()
+ const data = await fetchData(session.user.id)
+ return Response.json({ data, config })
+}
+```
+
+**Correct (auth and config start immediately):**
+
+```typescript
+export async function GET(request: Request) {
+ const sessionPromise = auth()
+ const configPromise = fetchConfig()
+ const session = await sessionPromise
+ const [config, data] = await Promise.all([
+ configPromise,
+ fetchData(session.user.id)
+ ])
+ return Response.json({ data, config })
+}
+```
+
+For operations with more complex dependency chains, use `better-all` to automatically maximize parallelism (see Dependency-Based Parallelization).
diff --git a/.claude/skills/vercel-react-best-practices/rules/async-defer-await.md b/.claude/skills/vercel-react-best-practices/rules/async-defer-await.md
new file mode 100644
index 0000000000..ea7082a362
--- /dev/null
+++ b/.claude/skills/vercel-react-best-practices/rules/async-defer-await.md
@@ -0,0 +1,80 @@
+---
+title: Defer Await Until Needed
+impact: HIGH
+impactDescription: avoids blocking unused code paths
+tags: async, await, conditional, optimization
+---
+
+## Defer Await Until Needed
+
+Move `await` operations into the branches where they're actually used to avoid blocking code paths that don't need them.
+
+**Incorrect (blocks both branches):**
+
+```typescript
+async function handleRequest(userId: string, skipProcessing: boolean) {
+ const userData = await fetchUserData(userId)
+
+ if (skipProcessing) {
+ // Returns immediately but still waited for userData
+ return { skipped: true }
+ }
+
+ // Only this branch uses userData
+ return processUserData(userData)
+}
+```
+
+**Correct (only blocks when needed):**
+
+```typescript
+async function handleRequest(userId: string, skipProcessing: boolean) {
+ if (skipProcessing) {
+ // Returns immediately without waiting
+ return { skipped: true }
+ }
+
+ // Fetch only when needed
+ const userData = await fetchUserData(userId)
+ return processUserData(userData)
+}
+```
+
+**Another example (early return optimization):**
+
+```typescript
+// Incorrect: always fetches permissions
+async function updateResource(resourceId: string, userId: string) {
+ const permissions = await fetchPermissions(userId)
+ const resource = await getResource(resourceId)
+
+ if (!resource) {
+ return { error: 'Not found' }
+ }
+
+ if (!permissions.canEdit) {
+ return { error: 'Forbidden' }
+ }
+
+ return await updateResourceData(resource, permissions)
+}
+
+// Correct: fetches only when needed
+async function updateResource(resourceId: string, userId: string) {
+ const resource = await getResource(resourceId)
+
+ if (!resource) {
+ return { error: 'Not found' }
+ }
+
+ const permissions = await fetchPermissions(userId)
+
+ if (!permissions.canEdit) {
+ return { error: 'Forbidden' }
+ }
+
+ return await updateResourceData(resource, permissions)
+}
+```
+
+This optimization is especially valuable when the skipped branch is frequently taken, or when the deferred operation is expensive.
diff --git a/.claude/skills/vercel-react-best-practices/rules/async-dependencies.md b/.claude/skills/vercel-react-best-practices/rules/async-dependencies.md
new file mode 100644
index 0000000000..fb90d861ac
--- /dev/null
+++ b/.claude/skills/vercel-react-best-practices/rules/async-dependencies.md
@@ -0,0 +1,36 @@
+---
+title: Dependency-Based Parallelization
+impact: CRITICAL
+impactDescription: 2-10× improvement
+tags: async, parallelization, dependencies, better-all
+---
+
+## Dependency-Based Parallelization
+
+For operations with partial dependencies, use `better-all` to maximize parallelism. It automatically starts each task at the earliest possible moment.
+
+**Incorrect (profile waits for config unnecessarily):**
+
+```typescript
+const [user, config] = await Promise.all([
+ fetchUser(),
+ fetchConfig()
+])
+const profile = await fetchProfile(user.id)
+```
+
+**Correct (config and profile run in parallel):**
+
+```typescript
+import { all } from 'better-all'
+
+const { user, config, profile } = await all({
+ async user() { return fetchUser() },
+ async config() { return fetchConfig() },
+ async profile() {
+ return fetchProfile((await this.$.user).id)
+ }
+})
+```
+
+Reference: [https://github.com/shuding/better-all](https://github.com/shuding/better-all)
diff --git a/.claude/skills/vercel-react-best-practices/rules/async-parallel.md b/.claude/skills/vercel-react-best-practices/rules/async-parallel.md
new file mode 100644
index 0000000000..64133f6c31
--- /dev/null
+++ b/.claude/skills/vercel-react-best-practices/rules/async-parallel.md
@@ -0,0 +1,28 @@
+---
+title: Promise.all() for Independent Operations
+impact: CRITICAL
+impactDescription: 2-10× improvement
+tags: async, parallelization, promises, waterfalls
+---
+
+## Promise.all() for Independent Operations
+
+When async operations have no interdependencies, execute them concurrently using `Promise.all()`.
+
+**Incorrect (sequential execution, 3 round trips):**
+
+```typescript
+const user = await fetchUser()
+const posts = await fetchPosts()
+const comments = await fetchComments()
+```
+
+**Correct (parallel execution, 1 round trip):**
+
+```typescript
+const [user, posts, comments] = await Promise.all([
+ fetchUser(),
+ fetchPosts(),
+ fetchComments()
+])
+```
diff --git a/.claude/skills/vercel-react-best-practices/rules/async-suspense-boundaries.md b/.claude/skills/vercel-react-best-practices/rules/async-suspense-boundaries.md
new file mode 100644
index 0000000000..1fbc05b04e
--- /dev/null
+++ b/.claude/skills/vercel-react-best-practices/rules/async-suspense-boundaries.md
@@ -0,0 +1,99 @@
+---
+title: Strategic Suspense Boundaries
+impact: HIGH
+impactDescription: faster initial paint
+tags: async, suspense, streaming, layout-shift
+---
+
+## Strategic Suspense Boundaries
+
+Instead of awaiting data in async components before returning JSX, use Suspense boundaries to show the wrapper UI faster while data loads.
+
+**Incorrect (wrapper blocked by data fetching):**
+
+```tsx
+async function Page() {
+ const data = await fetchData() // Blocks entire page
+
+ return (
+
+
Sidebar
+
Header
+
+
+
+
Footer
+
+ )
+}
+```
+
+The entire layout waits for data even though only the middle section needs it.
+
+**Correct (wrapper shows immediately, data streams in):**
+
+```tsx
+function Page() {
+ return (
+
+
Sidebar
+
Header
+
+ }>
+
+
+
+
Footer
+
+ )
+}
+
+async function DataDisplay() {
+ const data = await fetchData() // Only blocks this component
+ return
{data.content}
+}
+```
+
+Sidebar, Header, and Footer render immediately. Only DataDisplay waits for data.
+
+**Alternative (share promise across components):**
+
+```tsx
+function Page() {
+ // Start fetch immediately, but don't await
+ const dataPromise = fetchData()
+
+ return (
+
+ )
+}
+```
+
+This applies to all CSS transforms and transitions (`transform`, `opacity`, `translate`, `scale`, `rotate`). The wrapper div allows browsers to use GPU acceleration for smoother animations.
diff --git a/.claude/skills/vercel-react-best-practices/rules/rendering-conditional-render.md b/.claude/skills/vercel-react-best-practices/rules/rendering-conditional-render.md
new file mode 100644
index 0000000000..7e866f5852
--- /dev/null
+++ b/.claude/skills/vercel-react-best-practices/rules/rendering-conditional-render.md
@@ -0,0 +1,40 @@
+---
+title: Use Explicit Conditional Rendering
+impact: LOW
+impactDescription: prevents rendering 0 or NaN
+tags: rendering, conditional, jsx, falsy-values
+---
+
+## Use Explicit Conditional Rendering
+
+Use explicit ternary operators (`? :`) instead of `&&` for conditional rendering when the condition can be `0`, `NaN`, or other falsy values that render.
+
+**Incorrect (renders "0" when count is 0):**
+
+```tsx
+function Badge({ count }: { count: number }) {
+ return (
+
+ {count && {count}}
+
+ )
+}
+
+// When count = 0, renders:
0
+// When count = 5, renders:
5
+```
+
+**Correct (renders nothing when count is 0):**
+
+```tsx
+function Badge({ count }: { count: number }) {
+ return (
+
+ {count > 0 ? {count} : null}
+
+ )
+}
+
+// When count = 0, renders:
+// When count = 5, renders:
+ )
+}
+```
+
+This is especially helpful for large and static SVG nodes, which can be expensive to recreate on every render.
+
+**Note:** If your project has [React Compiler](https://react.dev/learn/react-compiler) enabled, the compiler automatically hoists static JSX elements and optimizes component re-renders, making manual hoisting unnecessary.
diff --git a/.claude/skills/vercel-react-best-practices/rules/rendering-hydration-no-flicker.md b/.claude/skills/vercel-react-best-practices/rules/rendering-hydration-no-flicker.md
new file mode 100644
index 0000000000..5cf0e79b69
--- /dev/null
+++ b/.claude/skills/vercel-react-best-practices/rules/rendering-hydration-no-flicker.md
@@ -0,0 +1,82 @@
+---
+title: Prevent Hydration Mismatch Without Flickering
+impact: MEDIUM
+impactDescription: avoids visual flicker and hydration errors
+tags: rendering, ssr, hydration, localStorage, flicker
+---
+
+## Prevent Hydration Mismatch Without Flickering
+
+When rendering content that depends on client-side storage (localStorage, cookies), avoid both SSR breakage and post-hydration flickering by injecting a synchronous script that updates the DOM before React hydrates.
+
+**Incorrect (breaks SSR):**
+
+```tsx
+function ThemeWrapper({ children }: { children: ReactNode }) {
+ // localStorage is not available on server - throws error
+ const theme = localStorage.getItem('theme') || 'light'
+
+ return (
+
+ )
+}
+```
+
+Component first renders with default value (`light`), then updates after hydration, causing a visible flash of incorrect content.
+
+**Correct (no flicker, no hydration mismatch):**
+
+```tsx
+function ThemeWrapper({ children }: { children: ReactNode }) {
+ return (
+ <>
+
+ {children}
+
+
+ >
+ )
+}
+```
+
+The inline script executes synchronously before showing the element, ensuring the DOM already has the correct value. No flickering, no hydration mismatch.
+
+This pattern is especially useful for theme toggles, user preferences, authentication states, and any client-only data that should render immediately without flashing default values.
diff --git a/.claude/skills/vercel-react-best-practices/rules/rendering-svg-precision.md b/.claude/skills/vercel-react-best-practices/rules/rendering-svg-precision.md
new file mode 100644
index 0000000000..6d77128603
--- /dev/null
+++ b/.claude/skills/vercel-react-best-practices/rules/rendering-svg-precision.md
@@ -0,0 +1,28 @@
+---
+title: Optimize SVG Precision
+impact: LOW
+impactDescription: reduces file size
+tags: rendering, svg, optimization, svgo
+---
+
+## Optimize SVG Precision
+
+Reduce SVG coordinate precision to decrease file size. The optimal precision depends on the viewBox size, but in general reducing precision should be considered.
+
+**Incorrect (excessive precision):**
+
+```svg
+
+```
+
+**Correct (1 decimal place):**
+
+```svg
+
+```
+
+**Automate with SVGO:**
+
+```bash
+npx svgo --precision=1 --multipass icon.svg
+```
diff --git a/.claude/skills/vercel-react-best-practices/rules/rerender-defer-reads.md b/.claude/skills/vercel-react-best-practices/rules/rerender-defer-reads.md
new file mode 100644
index 0000000000..e867c95f02
--- /dev/null
+++ b/.claude/skills/vercel-react-best-practices/rules/rerender-defer-reads.md
@@ -0,0 +1,39 @@
+---
+title: Defer State Reads to Usage Point
+impact: MEDIUM
+impactDescription: avoids unnecessary subscriptions
+tags: rerender, searchParams, localStorage, optimization
+---
+
+## Defer State Reads to Usage Point
+
+Don't subscribe to dynamic state (searchParams, localStorage) if you only read it inside callbacks.
+
+**Incorrect (subscribes to all searchParams changes):**
+
+```tsx
+function ShareButton({ chatId }: { chatId: string }) {
+ const searchParams = useSearchParams()
+
+ const handleShare = () => {
+ const ref = searchParams.get('ref')
+ shareChat(chatId, { ref })
+ }
+
+ return
+}
+```
+
+**Correct (reads on demand, no subscription):**
+
+```tsx
+function ShareButton({ chatId }: { chatId: string }) {
+ const handleShare = () => {
+ const params = new URLSearchParams(window.location.search)
+ const ref = params.get('ref')
+ shareChat(chatId, { ref })
+ }
+
+ return
+}
+```
diff --git a/.claude/skills/vercel-react-best-practices/rules/rerender-dependencies.md b/.claude/skills/vercel-react-best-practices/rules/rerender-dependencies.md
new file mode 100644
index 0000000000..47a4d92685
--- /dev/null
+++ b/.claude/skills/vercel-react-best-practices/rules/rerender-dependencies.md
@@ -0,0 +1,45 @@
+---
+title: Narrow Effect Dependencies
+impact: LOW
+impactDescription: minimizes effect re-runs
+tags: rerender, useEffect, dependencies, optimization
+---
+
+## Narrow Effect Dependencies
+
+Specify primitive dependencies instead of objects to minimize effect re-runs.
+
+**Incorrect (re-runs on any user field change):**
+
+```tsx
+useEffect(() => {
+ console.log(user.id)
+}, [user])
+```
+
+**Correct (re-runs only when id changes):**
+
+```tsx
+useEffect(() => {
+ console.log(user.id)
+}, [user.id])
+```
+
+**For derived state, compute outside effect:**
+
+```tsx
+// Incorrect: runs on width=767, 766, 765...
+useEffect(() => {
+ if (width < 768) {
+ enableMobileMode()
+ }
+}, [width])
+
+// Correct: runs only on boolean transition
+const isMobile = width < 768
+useEffect(() => {
+ if (isMobile) {
+ enableMobileMode()
+ }
+}, [isMobile])
+```
diff --git a/.claude/skills/vercel-react-best-practices/rules/rerender-derived-state.md b/.claude/skills/vercel-react-best-practices/rules/rerender-derived-state.md
new file mode 100644
index 0000000000..e5c899f6c0
--- /dev/null
+++ b/.claude/skills/vercel-react-best-practices/rules/rerender-derived-state.md
@@ -0,0 +1,29 @@
+---
+title: Subscribe to Derived State
+impact: MEDIUM
+impactDescription: reduces re-render frequency
+tags: rerender, derived-state, media-query, optimization
+---
+
+## Subscribe to Derived State
+
+Subscribe to derived boolean state instead of continuous values to reduce re-render frequency.
+
+**Incorrect (re-renders on every pixel change):**
+
+```tsx
+function Sidebar() {
+ const width = useWindowWidth() // updates continuously
+ const isMobile = width < 768
+ return
+}
+```
+
+**Correct (re-renders only when boolean changes):**
+
+```tsx
+function Sidebar() {
+ const isMobile = useMediaQuery('(max-width: 767px)')
+ return
+}
+```
diff --git a/.claude/skills/vercel-react-best-practices/rules/rerender-functional-setstate.md b/.claude/skills/vercel-react-best-practices/rules/rerender-functional-setstate.md
new file mode 100644
index 0000000000..b004ef45e3
--- /dev/null
+++ b/.claude/skills/vercel-react-best-practices/rules/rerender-functional-setstate.md
@@ -0,0 +1,74 @@
+---
+title: Use Functional setState Updates
+impact: MEDIUM
+impactDescription: prevents stale closures and unnecessary callback recreations
+tags: react, hooks, useState, useCallback, callbacks, closures
+---
+
+## Use Functional setState Updates
+
+When updating state based on the current state value, use the functional update form of setState instead of directly referencing the state variable. This prevents stale closures, eliminates unnecessary dependencies, and creates stable callback references.
+
+**Incorrect (requires state as dependency):**
+
+```tsx
+function TodoList() {
+ const [items, setItems] = useState(initialItems)
+
+ // Callback must depend on items, recreated on every items change
+ const addItems = useCallback((newItems: Item[]) => {
+ setItems([...items, ...newItems])
+ }, [items]) // ❌ items dependency causes recreations
+
+ // Risk of stale closure if dependency is forgotten
+ const removeItem = useCallback((id: string) => {
+ setItems(items.filter(item => item.id !== id))
+ }, []) // ❌ Missing items dependency - will use stale items!
+
+ return
+}
+```
+
+The first callback is recreated every time `items` changes, which can cause child components to re-render unnecessarily. The second callback has a stale closure bug—it will always reference the initial `items` value.
+
+**Correct (stable callbacks, no stale closures):**
+
+```tsx
+function TodoList() {
+ const [items, setItems] = useState(initialItems)
+
+ // Stable callback, never recreated
+ const addItems = useCallback((newItems: Item[]) => {
+ setItems(curr => [...curr, ...newItems])
+ }, []) // ✅ No dependencies needed
+
+ // Always uses latest state, no stale closure risk
+ const removeItem = useCallback((id: string) => {
+ setItems(curr => curr.filter(item => item.id !== id))
+ }, []) // ✅ Safe and stable
+
+ return
+}
+```
+
+**Benefits:**
+
+1. **Stable callback references** - Callbacks don't need to be recreated when state changes
+2. **No stale closures** - Always operates on the latest state value
+3. **Fewer dependencies** - Simplifies dependency arrays and reduces memory leaks
+4. **Prevents bugs** - Eliminates the most common source of React closure bugs
+
+**When to use functional updates:**
+
+- Any setState that depends on the current state value
+- Inside useCallback/useMemo when state is needed
+- Event handlers that reference state
+- Async operations that update state
+
+**When direct updates are fine:**
+
+- Setting state to a static value: `setCount(0)`
+- Setting state from props/arguments only: `setName(newName)`
+- State doesn't depend on previous value
+
+**Note:** If your project has [React Compiler](https://react.dev/learn/react-compiler) enabled, the compiler can automatically optimize some cases, but functional updates are still recommended for correctness and to prevent stale closure bugs.
diff --git a/.claude/skills/vercel-react-best-practices/rules/rerender-lazy-state-init.md b/.claude/skills/vercel-react-best-practices/rules/rerender-lazy-state-init.md
new file mode 100644
index 0000000000..4ecb350fba
--- /dev/null
+++ b/.claude/skills/vercel-react-best-practices/rules/rerender-lazy-state-init.md
@@ -0,0 +1,58 @@
+---
+title: Use Lazy State Initialization
+impact: MEDIUM
+impactDescription: wasted computation on every render
+tags: react, hooks, useState, performance, initialization
+---
+
+## Use Lazy State Initialization
+
+Pass a function to `useState` for expensive initial values. Without the function form, the initializer runs on every render even though the value is only used once.
+
+**Incorrect (runs on every render):**
+
+```tsx
+function FilteredList({ items }: { items: Item[] }) {
+ // buildSearchIndex() runs on EVERY render, even after initialization
+ const [searchIndex, setSearchIndex] = useState(buildSearchIndex(items))
+ const [query, setQuery] = useState('')
+
+ // When query changes, buildSearchIndex runs again unnecessarily
+ return
+}
+
+function UserProfile() {
+ // JSON.parse runs on every render
+ const [settings, setSettings] = useState(
+ JSON.parse(localStorage.getItem('settings') || '{}')
+ )
+
+ return
+}
+```
+
+**Correct (runs only once):**
+
+```tsx
+function FilteredList({ items }: { items: Item[] }) {
+ // buildSearchIndex() runs ONLY on initial render
+ const [searchIndex, setSearchIndex] = useState(() => buildSearchIndex(items))
+ const [query, setQuery] = useState('')
+
+ return
+}
+
+function UserProfile() {
+ // JSON.parse runs only on initial render
+ const [settings, setSettings] = useState(() => {
+ const stored = localStorage.getItem('settings')
+ return stored ? JSON.parse(stored) : {}
+ })
+
+ return
+}
+```
+
+Use lazy initialization when computing initial values from localStorage/sessionStorage, building data structures (indexes, maps), reading from the DOM, or performing heavy transformations.
+
+For simple primitives (`useState(0)`), direct references (`useState(props.value)`), or cheap literals (`useState({})`), the function form is unnecessary.
diff --git a/.claude/skills/vercel-react-best-practices/rules/rerender-memo.md b/.claude/skills/vercel-react-best-practices/rules/rerender-memo.md
new file mode 100644
index 0000000000..f8982ab612
--- /dev/null
+++ b/.claude/skills/vercel-react-best-practices/rules/rerender-memo.md
@@ -0,0 +1,44 @@
+---
+title: Extract to Memoized Components
+impact: MEDIUM
+impactDescription: enables early returns
+tags: rerender, memo, useMemo, optimization
+---
+
+## Extract to Memoized Components
+
+Extract expensive work into memoized components to enable early returns before computation.
+
+**Incorrect (computes avatar even when loading):**
+
+```tsx
+function Profile({ user, loading }: Props) {
+ const avatar = useMemo(() => {
+ const id = computeAvatarId(user)
+ return
+ }, [user])
+
+ if (loading) return
+ return
{avatar}
+}
+```
+
+**Correct (skips computation when loading):**
+
+```tsx
+const UserAvatar = memo(function UserAvatar({ user }: { user: User }) {
+ const id = useMemo(() => computeAvatarId(user), [user])
+ return
+})
+
+function Profile({ user, loading }: Props) {
+ if (loading) return
+ return (
+
+
+
+ )
+}
+```
+
+**Note:** If your project has [React Compiler](https://react.dev/learn/react-compiler) enabled, manual memoization with `memo()` and `useMemo()` is not necessary. The compiler automatically optimizes re-renders.
diff --git a/.claude/skills/vercel-react-best-practices/rules/rerender-transitions.md b/.claude/skills/vercel-react-best-practices/rules/rerender-transitions.md
new file mode 100644
index 0000000000..d99f43f764
--- /dev/null
+++ b/.claude/skills/vercel-react-best-practices/rules/rerender-transitions.md
@@ -0,0 +1,40 @@
+---
+title: Use Transitions for Non-Urgent Updates
+impact: MEDIUM
+impactDescription: maintains UI responsiveness
+tags: rerender, transitions, startTransition, performance
+---
+
+## Use Transitions for Non-Urgent Updates
+
+Mark frequent, non-urgent state updates as transitions to maintain UI responsiveness.
+
+**Incorrect (blocks UI on every scroll):**
+
+```tsx
+function ScrollTracker() {
+ const [scrollY, setScrollY] = useState(0)
+ useEffect(() => {
+ const handler = () => setScrollY(window.scrollY)
+ window.addEventListener('scroll', handler, { passive: true })
+ return () => window.removeEventListener('scroll', handler)
+ }, [])
+}
+```
+
+**Correct (non-blocking updates):**
+
+```tsx
+import { startTransition } from 'react'
+
+function ScrollTracker() {
+ const [scrollY, setScrollY] = useState(0)
+ useEffect(() => {
+ const handler = () => {
+ startTransition(() => setScrollY(window.scrollY))
+ }
+ window.addEventListener('scroll', handler, { passive: true })
+ return () => window.removeEventListener('scroll', handler)
+ }, [])
+}
+```
diff --git a/.claude/skills/vercel-react-best-practices/rules/server-after-nonblocking.md b/.claude/skills/vercel-react-best-practices/rules/server-after-nonblocking.md
new file mode 100644
index 0000000000..e8f5b260f5
--- /dev/null
+++ b/.claude/skills/vercel-react-best-practices/rules/server-after-nonblocking.md
@@ -0,0 +1,73 @@
+---
+title: Use after() for Non-Blocking Operations
+impact: MEDIUM
+impactDescription: faster response times
+tags: server, async, logging, analytics, side-effects
+---
+
+## Use after() for Non-Blocking Operations
+
+Use Next.js's `after()` to schedule work that should execute after a response is sent. This prevents logging, analytics, and other side effects from blocking the response.
+
+**Incorrect (blocks response):**
+
+```tsx
+import { logUserAction } from '@/app/utils'
+
+export async function POST(request: Request) {
+ // Perform mutation
+ await updateDatabase(request)
+
+ // Logging blocks the response
+ const userAgent = request.headers.get('user-agent') || 'unknown'
+ await logUserAction({ userAgent })
+
+ return new Response(JSON.stringify({ status: 'success' }), {
+ status: 200,
+ headers: { 'Content-Type': 'application/json' }
+ })
+}
+```
+
+**Correct (non-blocking):**
+
+```tsx
+import { after } from 'next/server'
+import { headers, cookies } from 'next/headers'
+import { logUserAction } from '@/app/utils'
+
+export async function POST(request: Request) {
+ // Perform mutation
+ await updateDatabase(request)
+
+ // Log after response is sent
+ after(async () => {
+ const userAgent = (await headers()).get('user-agent') || 'unknown'
+ const sessionCookie = (await cookies()).get('session-id')?.value || 'anonymous'
+
+ logUserAction({ sessionCookie, userAgent })
+ })
+
+ return new Response(JSON.stringify({ status: 'success' }), {
+ status: 200,
+ headers: { 'Content-Type': 'application/json' }
+ })
+}
+```
+
+The response is sent immediately while logging happens in the background.
+
+**Common use cases:**
+
+- Analytics tracking
+- Audit logging
+- Sending notifications
+- Cache invalidation
+- Cleanup tasks
+
+**Important notes:**
+
+- `after()` runs even if the response fails or redirects
+- Works in Server Actions, Route Handlers, and Server Components
+
+Reference: [https://nextjs.org/docs/app/api-reference/functions/after](https://nextjs.org/docs/app/api-reference/functions/after)
diff --git a/.claude/skills/vercel-react-best-practices/rules/server-cache-lru.md b/.claude/skills/vercel-react-best-practices/rules/server-cache-lru.md
new file mode 100644
index 0000000000..ef6938aa53
--- /dev/null
+++ b/.claude/skills/vercel-react-best-practices/rules/server-cache-lru.md
@@ -0,0 +1,41 @@
+---
+title: Cross-Request LRU Caching
+impact: HIGH
+impactDescription: caches across requests
+tags: server, cache, lru, cross-request
+---
+
+## Cross-Request LRU Caching
+
+`React.cache()` only works within one request. For data shared across sequential requests (user clicks button A then button B), use an LRU cache.
+
+**Implementation:**
+
+```typescript
+import { LRUCache } from 'lru-cache'
+
+const cache = new LRUCache({
+ max: 1000,
+ ttl: 5 * 60 * 1000 // 5 minutes
+})
+
+export async function getUser(id: string) {
+ const cached = cache.get(id)
+ if (cached) return cached
+
+ const user = await db.user.findUnique({ where: { id } })
+ cache.set(id, user)
+ return user
+}
+
+// Request 1: DB query, result cached
+// Request 2: cache hit, no DB query
+```
+
+Use when sequential user actions hit multiple endpoints needing the same data within seconds.
+
+**With Vercel's [Fluid Compute](https://vercel.com/docs/fluid-compute):** LRU caching is especially effective because multiple concurrent requests can share the same function instance and cache. This means the cache persists across requests without needing external storage like Redis.
+
+**In traditional serverless:** Each invocation runs in isolation, so consider Redis for cross-process caching.
+
+Reference: [https://github.com/isaacs/node-lru-cache](https://github.com/isaacs/node-lru-cache)
diff --git a/.claude/skills/vercel-react-best-practices/rules/server-cache-react.md b/.claude/skills/vercel-react-best-practices/rules/server-cache-react.md
new file mode 100644
index 0000000000..87c9ca3316
--- /dev/null
+++ b/.claude/skills/vercel-react-best-practices/rules/server-cache-react.md
@@ -0,0 +1,76 @@
+---
+title: Per-Request Deduplication with React.cache()
+impact: MEDIUM
+impactDescription: deduplicates within request
+tags: server, cache, react-cache, deduplication
+---
+
+## Per-Request Deduplication with React.cache()
+
+Use `React.cache()` for server-side request deduplication. Authentication and database queries benefit most.
+
+**Usage:**
+
+```typescript
+import { cache } from 'react'
+
+export const getCurrentUser = cache(async () => {
+ const session = await auth()
+ if (!session?.user?.id) return null
+ return await db.user.findUnique({
+ where: { id: session.user.id }
+ })
+})
+```
+
+Within a single request, multiple calls to `getCurrentUser()` execute the query only once.
+
+**Avoid inline objects as arguments:**
+
+`React.cache()` uses shallow equality (`Object.is`) to determine cache hits. Inline objects create new references each call, preventing cache hits.
+
+**Incorrect (always cache miss):**
+
+```typescript
+const getUser = cache(async (params: { uid: number }) => {
+ return await db.user.findUnique({ where: { id: params.uid } })
+})
+
+// Each call creates new object, never hits cache
+getUser({ uid: 1 })
+getUser({ uid: 1 }) // Cache miss, runs query again
+```
+
+**Correct (cache hit):**
+
+```typescript
+const getUser = cache(async (uid: number) => {
+ return await db.user.findUnique({ where: { id: uid } })
+})
+
+// Primitive args use value equality
+getUser(1)
+getUser(1) // Cache hit, returns cached result
+```
+
+If you must pass objects, pass the same reference:
+
+```typescript
+const params = { uid: 1 }
+getUser(params) // Query runs
+getUser(params) // Cache hit (same reference)
+```
+
+**Next.js-Specific Note:**
+
+In Next.js, the `fetch` API is automatically extended with request memoization. Requests with the same URL and options are automatically deduplicated within a single request, so you don't need `React.cache()` for `fetch` calls. However, `React.cache()` is still essential for other async tasks:
+
+- Database queries (Prisma, Drizzle, etc.)
+- Heavy computations
+- Authentication checks
+- File system operations
+- Any non-fetch async work
+
+Use `React.cache()` to deduplicate these operations across your component tree.
+
+Reference: [React.cache documentation](https://react.dev/reference/react/cache)
diff --git a/.claude/skills/vercel-react-best-practices/rules/server-parallel-fetching.md b/.claude/skills/vercel-react-best-practices/rules/server-parallel-fetching.md
new file mode 100644
index 0000000000..1affc835a6
--- /dev/null
+++ b/.claude/skills/vercel-react-best-practices/rules/server-parallel-fetching.md
@@ -0,0 +1,83 @@
+---
+title: Parallel Data Fetching with Component Composition
+impact: CRITICAL
+impactDescription: eliminates server-side waterfalls
+tags: server, rsc, parallel-fetching, composition
+---
+
+## Parallel Data Fetching with Component Composition
+
+React Server Components execute sequentially within a tree. Restructure with composition to parallelize data fetching.
+
+**Incorrect (Sidebar waits for Page's fetch to complete):**
+
+```tsx
+export default async function Page() {
+ const header = await fetchHeader()
+ return (
+
+ )
+}
+
+export default function Page() {
+ return (
+
+
+
+ )
+}
+```
diff --git a/.claude/skills/vercel-react-best-practices/rules/server-serialization.md b/.claude/skills/vercel-react-best-practices/rules/server-serialization.md
new file mode 100644
index 0000000000..39c5c4164c
--- /dev/null
+++ b/.claude/skills/vercel-react-best-practices/rules/server-serialization.md
@@ -0,0 +1,38 @@
+---
+title: Minimize Serialization at RSC Boundaries
+impact: HIGH
+impactDescription: reduces data transfer size
+tags: server, rsc, serialization, props
+---
+
+## Minimize Serialization at RSC Boundaries
+
+The React Server/Client boundary serializes all object properties into strings and embeds them in the HTML response and subsequent RSC requests. This serialized data directly impacts page weight and load time, so **size matters a lot**. Only pass fields that the client actually uses.
+
+**Incorrect (serializes all 50 fields):**
+
+```tsx
+async function Page() {
+ const user = await fetchUser() // 50 fields
+ return
+}
+
+'use client'
+function Profile({ user }: { user: User }) {
+ return
{user.name}
// uses 1 field
+}
+```
+
+**Correct (serializes only 1 field):**
+
+```tsx
+async function Page() {
+ const user = await fetchUser()
+ return
+}
+
+'use client'
+function Profile({ name }: { name: string }) {
+ return
{name}
+}
+```
diff --git a/.github/labeler.yml b/.github/labeler.yml
new file mode 100644
index 0000000000..d1d324d381
--- /dev/null
+++ b/.github/labeler.yml
@@ -0,0 +1,3 @@
+web:
+ - changed-files:
+ - any-glob-to-any-file: 'web/**'
diff --git a/.github/workflows/autofix.yml b/.github/workflows/autofix.yml
index 5413f83c27..ff006324bb 100644
--- a/.github/workflows/autofix.yml
+++ b/.github/workflows/autofix.yml
@@ -16,14 +16,14 @@ jobs:
- name: Check Docker Compose inputs
id: docker-compose-changes
- uses: tj-actions/changed-files@v46
+ uses: tj-actions/changed-files@v47
with:
files: |
docker/generate_docker_compose
docker/.env.example
docker/docker-compose-template.yaml
docker/docker-compose.yaml
- - uses: actions/setup-python@v5
+ - uses: actions/setup-python@v6
with:
python-version: "3.11"
@@ -82,6 +82,6 @@ jobs:
# mdformat breaks YAML front matter in markdown files. Add --exclude for directories containing YAML front matter.
- name: mdformat
run: |
- uvx --python 3.13 mdformat . --exclude ".claude/skills/**/SKILL.md"
+ uvx --python 3.13 mdformat . --exclude ".claude/skills/**"
- uses: autofix-ci/action@635ffb0c9798bd160680f18fd73371e355b85f27
diff --git a/.github/workflows/build-push.yml b/.github/workflows/build-push.yml
index bbf89236de..704d896192 100644
--- a/.github/workflows/build-push.yml
+++ b/.github/workflows/build-push.yml
@@ -112,7 +112,7 @@ jobs:
context: "web"
steps:
- name: Download digests
- uses: actions/download-artifact@v4
+ uses: actions/download-artifact@v7
with:
path: /tmp/digests
pattern: digests-${{ matrix.context }}-*
diff --git a/.github/workflows/deploy-agent-dev.yml b/.github/workflows/deploy-agent-dev.yml
index dff48b5510..dd759f7ba5 100644
--- a/.github/workflows/deploy-agent-dev.yml
+++ b/.github/workflows/deploy-agent-dev.yml
@@ -19,7 +19,7 @@ jobs:
github.event.workflow_run.head_branch == 'deploy/agent-dev'
steps:
- name: Deploy to server
- uses: appleboy/ssh-action@v0.1.8
+ uses: appleboy/ssh-action@v1
with:
host: ${{ secrets.AGENT_DEV_SSH_HOST }}
username: ${{ secrets.SSH_USER }}
diff --git a/.github/workflows/deploy-dev.yml b/.github/workflows/deploy-dev.yml
index cd1c86e668..38fa0b9a7f 100644
--- a/.github/workflows/deploy-dev.yml
+++ b/.github/workflows/deploy-dev.yml
@@ -16,7 +16,7 @@ jobs:
github.event.workflow_run.head_branch == 'deploy/dev'
steps:
- name: Deploy to server
- uses: appleboy/ssh-action@v0.1.8
+ uses: appleboy/ssh-action@v1
with:
host: ${{ secrets.SSH_HOST }}
username: ${{ secrets.SSH_USER }}
diff --git a/.github/workflows/deploy-hitl.yml b/.github/workflows/deploy-hitl.yml
new file mode 100644
index 0000000000..7d5f0a22e7
--- /dev/null
+++ b/.github/workflows/deploy-hitl.yml
@@ -0,0 +1,29 @@
+name: Deploy HITL
+
+on:
+ workflow_run:
+ workflows: ["Build and Push API & Web"]
+ branches:
+ - "feat/hitl-frontend"
+ - "feat/hitl-backend"
+ types:
+ - completed
+
+jobs:
+ deploy:
+ runs-on: ubuntu-latest
+ if: |
+ github.event.workflow_run.conclusion == 'success' &&
+ (
+ github.event.workflow_run.head_branch == 'feat/hitl-frontend' ||
+ github.event.workflow_run.head_branch == 'feat/hitl-backend'
+ )
+ steps:
+ - name: Deploy to server
+ uses: appleboy/ssh-action@v1
+ with:
+ host: ${{ secrets.HITL_SSH_HOST }}
+ username: ${{ secrets.SSH_USER }}
+ key: ${{ secrets.SSH_PRIVATE_KEY }}
+ script: |
+ ${{ vars.SSH_SCRIPT || secrets.SSH_SCRIPT }}
diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml
new file mode 100644
index 0000000000..06782b53c1
--- /dev/null
+++ b/.github/workflows/labeler.yml
@@ -0,0 +1,14 @@
+name: "Pull Request Labeler"
+on:
+ pull_request_target:
+
+jobs:
+ labeler:
+ permissions:
+ contents: read
+ pull-requests: write
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/labeler@v6
+ with:
+ sync-labels: true
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
index 1870b1f670..b6df1d7e93 100644
--- a/.github/workflows/stale.yml
+++ b/.github/workflows/stale.yml
@@ -18,7 +18,7 @@ jobs:
pull-requests: write
steps:
- - uses: actions/stale@v5
+ - uses: actions/stale@v10
with:
days-before-issue-stale: 15
days-before-issue-close: 3
diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml
index 462ece303e..5551030f1e 100644
--- a/.github/workflows/style.yml
+++ b/.github/workflows/style.yml
@@ -65,6 +65,9 @@ jobs:
defaults:
run:
working-directory: ./web
+ permissions:
+ checks: write
+ pull-requests: read
steps:
- name: Checkout code
@@ -90,7 +93,7 @@ jobs:
uses: actions/setup-node@v6
if: steps.changed-files.outputs.any_changed == 'true'
with:
- node-version: 22
+ node-version: 24
cache: pnpm
cache-dependency-path: ./web/pnpm-lock.yaml
@@ -103,7 +106,21 @@ jobs:
if: steps.changed-files.outputs.any_changed == 'true'
working-directory: ./web
run: |
- pnpm run lint
+ pnpm run lint:ci
+ # pnpm run lint:report
+ # continue-on-error: true
+
+ # - name: Annotate Code
+ # if: steps.changed-files.outputs.any_changed == 'true' && github.event_name == 'pull_request'
+ # uses: DerLev/eslint-annotations@51347b3a0abfb503fc8734d5ae31c4b151297fae
+ # with:
+ # eslint-report: web/eslint_report.json
+ # github-token: ${{ secrets.GITHUB_TOKEN }}
+
+ - name: Web tsslint
+ if: steps.changed-files.outputs.any_changed == 'true'
+ working-directory: ./web
+ run: pnpm run lint:tss
- name: Web type check
if: steps.changed-files.outputs.any_changed == 'true'
@@ -115,11 +132,6 @@ jobs:
working-directory: ./web
run: pnpm run knip
- - name: Web build check
- if: steps.changed-files.outputs.any_changed == 'true'
- working-directory: ./web
- run: pnpm run build
-
superlinter:
name: SuperLinter
runs-on: ubuntu-latest
diff --git a/.github/workflows/tool-test-sdks.yaml b/.github/workflows/tool-test-sdks.yaml
index 0259ef2232..ec392cb3b2 100644
--- a/.github/workflows/tool-test-sdks.yaml
+++ b/.github/workflows/tool-test-sdks.yaml
@@ -16,10 +16,6 @@ jobs:
name: unit test for Node.js SDK
runs-on: ubuntu-latest
- strategy:
- matrix:
- node-version: [16, 18, 20, 22]
-
defaults:
run:
working-directory: sdks/nodejs-client
@@ -29,10 +25,10 @@ jobs:
with:
persist-credentials: false
- - name: Use Node.js ${{ matrix.node-version }}
+ - name: Use Node.js
uses: actions/setup-node@v6
with:
- node-version: ${{ matrix.node-version }}
+ node-version: 24
cache: ''
cache-dependency-path: 'pnpm-lock.yaml'
diff --git a/.github/workflows/translate-i18n-claude.yml b/.github/workflows/translate-i18n-claude.yml
index 003e7ffc6e..8344af9890 100644
--- a/.github/workflows/translate-i18n-claude.yml
+++ b/.github/workflows/translate-i18n-claude.yml
@@ -57,7 +57,7 @@ jobs:
- name: Set up Node.js
uses: actions/setup-node@v6
with:
- node-version: 'lts/*'
+ node-version: 24
cache: pnpm
cache-dependency-path: ./web/pnpm-lock.yaml
diff --git a/.github/workflows/trigger-i18n-sync.yml b/.github/workflows/trigger-i18n-sync.yml
index de093c9235..66a29453b4 100644
--- a/.github/workflows/trigger-i18n-sync.yml
+++ b/.github/workflows/trigger-i18n-sync.yml
@@ -21,7 +21,7 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@v4
+ uses: actions/checkout@v6
with:
fetch-depth: 0
diff --git a/.github/workflows/web-tests.yml b/.github/workflows/web-tests.yml
index 0fd1d5d22b..191ce56aaa 100644
--- a/.github/workflows/web-tests.yml
+++ b/.github/workflows/web-tests.yml
@@ -31,7 +31,7 @@ jobs:
- name: Setup Node.js
uses: actions/setup-node@v6
with:
- node-version: 22
+ node-version: 24
cache: pnpm
cache-dependency-path: ./web/pnpm-lock.yaml
@@ -366,3 +366,48 @@ jobs:
path: web/coverage
retention-days: 30
if-no-files-found: error
+
+ web-build:
+ name: Web Build
+ runs-on: ubuntu-latest
+ defaults:
+ run:
+ working-directory: ./web
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v6
+ with:
+ persist-credentials: false
+
+ - name: Check changed files
+ id: changed-files
+ uses: tj-actions/changed-files@v47
+ with:
+ files: |
+ web/**
+ .github/workflows/web-tests.yml
+
+ - name: Install pnpm
+ uses: pnpm/action-setup@v4
+ with:
+ package_json_file: web/package.json
+ run_install: false
+
+ - name: Setup NodeJS
+ uses: actions/setup-node@v6
+ if: steps.changed-files.outputs.any_changed == 'true'
+ with:
+ node-version: 24
+ cache: pnpm
+ cache-dependency-path: ./web/pnpm-lock.yaml
+
+ - name: Web dependencies
+ if: steps.changed-files.outputs.any_changed == 'true'
+ working-directory: ./web
+ run: pnpm install --frozen-lockfile
+
+ - name: Web build check
+ if: steps.changed-files.outputs.any_changed == 'true'
+ working-directory: ./web
+ run: pnpm run build
diff --git a/AGENTS.md b/AGENTS.md
index 782861ad36..deab7c8629 100644
--- a/AGENTS.md
+++ b/AGENTS.md
@@ -12,12 +12,8 @@ The codebase is split into:
## Backend Workflow
+- Read `api/AGENTS.md` for details
- Run backend CLI commands through `uv run --project api `.
-
-- Before submission, all backend modifications must pass local checks: `make lint`, `make type-check`, and `uv run --project api --dev dev/pytest/pytest_unit_tests.sh`.
-
-- Use Makefile targets for linting and formatting; `make lint` and `make type-check` cover the required checks.
-
- Integration tests are CI-only and are not expected to run in the local environment.
## Frontend Workflow
diff --git a/Makefile b/Makefile
index 60c32948b9..e92a7b1314 100644
--- a/Makefile
+++ b/Makefile
@@ -61,7 +61,8 @@ check:
lint:
@echo "🔧 Running ruff format, check with fixes, import linter, and dotenv-linter..."
- @uv run --project api --dev sh -c 'ruff format ./api && ruff check --fix ./api'
+ @uv run --project api --dev ruff format ./api
+ @uv run --project api --dev ruff check --fix ./api
@uv run --directory api --dev lint-imports
@uv run --project api --dev dotenv-linter ./api/.env.example ./web/.env.example
@echo "✅ Linting complete"
@@ -73,7 +74,12 @@ type-check:
test:
@echo "🧪 Running backend unit tests..."
- @uv run --project api --dev dev/pytest/pytest_unit_tests.sh
+ @if [ -n "$(TARGET_TESTS)" ]; then \
+ echo "Target: $(TARGET_TESTS)"; \
+ uv run --project api --dev pytest $(TARGET_TESTS); \
+ else \
+ uv run --project api --dev dev/pytest/pytest_unit_tests.sh; \
+ fi
@echo "✅ Tests complete"
# Build Docker images
@@ -125,7 +131,7 @@ help:
@echo " make check - Check code with ruff"
@echo " make lint - Format, fix, and lint code (ruff, imports, dotenv)"
@echo " make type-check - Run type checking with basedpyright"
- @echo " make test - Run backend unit tests"
+ @echo " make test - Run backend unit tests (or TARGET_TESTS=./api/tests/)"
@echo ""
@echo "Docker Build Targets:"
@echo " make build-web - Build web Docker image"
diff --git a/agent-notes/.gitkeep b/agent-notes/.gitkeep
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/api/.env.example b/api/.env.example
index 44d770ed70..15981c14b8 100644
--- a/api/.env.example
+++ b/api/.env.example
@@ -417,6 +417,8 @@ SMTP_USERNAME=123
SMTP_PASSWORD=abc
SMTP_USE_TLS=true
SMTP_OPPORTUNISTIC_TLS=false
+# Optional: override the local hostname used for SMTP HELO/EHLO
+SMTP_LOCAL_HOSTNAME=
# Sendgid configuration
SENDGRID_API_KEY=
# Sentry configuration
@@ -589,6 +591,7 @@ ENABLE_CLEAN_UNUSED_DATASETS_TASK=false
ENABLE_CREATE_TIDB_SERVERLESS_TASK=false
ENABLE_UPDATE_TIDB_SERVERLESS_STATUS_TASK=false
ENABLE_CLEAN_MESSAGES=false
+ENABLE_WORKFLOW_RUN_CLEANUP_TASK=false
ENABLE_MAIL_CLEAN_DOCUMENT_NOTIFY_TASK=false
ENABLE_DATASETS_QUEUE_MONITOR=false
ENABLE_CHECK_UPGRADABLE_PLUGIN_TASK=true
@@ -712,3 +715,4 @@ ANNOTATION_IMPORT_MAX_CONCURRENT=5
SANDBOX_EXPIRED_RECORDS_CLEAN_GRACEFUL_PERIOD=21
SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_SIZE=1000
SANDBOX_EXPIRED_RECORDS_RETENTION_DAYS=30
+
diff --git a/api/AGENTS.md b/api/AGENTS.md
index 17398ec4b8..6ce419828b 100644
--- a/api/AGENTS.md
+++ b/api/AGENTS.md
@@ -1,62 +1,236 @@
-# Agent Skill Index
+# API Agent Guide
+
+## Agent Notes (must-check)
+
+Before you start work on any backend file under `api/`, you MUST check whether a related note exists under:
+
+- `agent-notes/.md`
+
+Rules:
+
+- **Path mapping**: for a target file `/.py`, the note must be `agent-notes//.py.md` (same folder structure, same filename, plus `.md`).
+- **Before working**:
+ - If the note exists, read it first and follow any constraints/decisions recorded there.
+ - If the note conflicts with the current code, or references an "origin" file/path that has been deleted, renamed, or migrated, treat the **code as the single source of truth** and update the note to match reality.
+ - If the note does not exist, create it with a short architecture/intent summary and any relevant invariants/edge cases.
+- **During working**:
+ - Keep the note in sync as you discover constraints, make decisions, or change approach.
+ - If you move/rename a file, migrate its note to the new mapped path (and fix any outdated references inside the note).
+ - Record non-obvious edge cases, trade-offs, and the test/verification plan as you go (not just at the end).
+ - Keep notes **coherent**: integrate new findings into the relevant sections and rewrite for clarity; avoid append-only “recent fix” / changelog-style additions unless the note is explicitly intended to be a changelog.
+- **When finishing work**:
+ - Update the related note(s) to reflect what changed, why, and any new edge cases/tests.
+ - If a file is deleted, remove or clearly deprecate the corresponding note so it cannot be mistaken as current guidance.
+ - Keep notes concise and accurate; they are meant to prevent repeated rediscovery.
+
+## Skill Index
Start with the section that best matches your need. Each entry lists the problems it solves plus key files/concepts so you know what to expect before opening it.
-______________________________________________________________________
+### Platform Foundations
-## Platform Foundations
-
-- **[Infrastructure Overview](agent_skills/infra.md)**\
- When to read this:
+#### [Infrastructure Overview](agent_skills/infra.md)
+- **When to read this**
- You need to understand where a feature belongs in the architecture.
- You’re wiring storage, Redis, vector stores, or OTEL.
- - You’re about to add CLI commands or async jobs.\
- What it covers: configuration stack (`configs/app_config.py`, remote settings), storage entry points (`extensions/ext_storage.py`, `core/file/file_manager.py`), Redis conventions (`extensions/ext_redis.py`), plugin runtime topology, vector-store factory (`core/rag/datasource/vdb/*`), observability hooks, SSRF proxy usage, and core CLI commands.
+ - You’re about to add CLI commands or async jobs.
+- **What it covers**
+ - Configuration stack (`configs/app_config.py`, remote settings)
+ - Storage entry points (`extensions/ext_storage.py`, `core/file/file_manager.py`)
+ - Redis conventions (`extensions/ext_redis.py`)
+ - Plugin runtime topology
+ - Vector-store factory (`core/rag/datasource/vdb/*`)
+ - Observability hooks
+ - SSRF proxy usage
+ - Core CLI commands
-- **[Coding Style](agent_skills/coding_style.md)**\
- When to read this:
+### Plugin & Extension Development
- - You’re writing or reviewing backend code and need the authoritative checklist.
- - You’re unsure about Pydantic validators, SQLAlchemy session usage, or logging patterns.
- - You want the exact lint/type/test commands used in PRs.\
- Includes: Ruff & BasedPyright commands, no-annotation policy, session examples (`with Session(db.engine, ...)`), `@field_validator` usage, logging expectations, and the rule set for file size, helpers, and package management.
-
-______________________________________________________________________
-
-## Plugin & Extension Development
-
-- **[Plugin Systems](agent_skills/plugin.md)**\
- When to read this:
+#### [Plugin Systems](agent_skills/plugin.md)
+- **When to read this**
- You’re building or debugging a marketplace plugin.
- - You need to know how manifests, providers, daemons, and migrations fit together.\
- What it covers: plugin manifests (`core/plugin/entities/plugin.py`), installation/upgrade flows (`services/plugin/plugin_service.py`, CLI commands), runtime adapters (`core/plugin/impl/*` for tool/model/datasource/trigger/endpoint/agent), daemon coordination (`core/plugin/entities/plugin_daemon.py`), and how provider registries surface capabilities to the rest of the platform.
+ - You need to know how manifests, providers, daemons, and migrations fit together.
+- **What it covers**
+ - Plugin manifests (`core/plugin/entities/plugin.py`)
+ - Installation/upgrade flows (`services/plugin/plugin_service.py`, CLI commands)
+ - Runtime adapters (`core/plugin/impl/*` for tool/model/datasource/trigger/endpoint/agent)
+ - Daemon coordination (`core/plugin/entities/plugin_daemon.py`)
+ - How provider registries surface capabilities to the rest of the platform
-- **[Plugin OAuth](agent_skills/plugin_oauth.md)**\
- When to read this:
+#### [Plugin OAuth](agent_skills/plugin_oauth.md)
+- **When to read this**
- You must integrate OAuth for a plugin or datasource.
- - You’re handling credential encryption or refresh flows.\
- Topics: credential storage, encryption helpers (`core/helper/provider_encryption.py`), OAuth client bootstrap (`services/plugin/oauth_service.py`, `services/plugin/plugin_parameter_service.py`), and how console/API layers expose the flows.
+ - You’re handling credential encryption or refresh flows.
+- **Topics**
+ - Credential storage
+ - Encryption helpers (`core/helper/provider_encryption.py`)
+ - OAuth client bootstrap (`services/plugin/oauth_service.py`, `services/plugin/plugin_parameter_service.py`)
+ - How console/API layers expose the flows
-______________________________________________________________________
+### Workflow Entry & Execution
-## Workflow Entry & Execution
+#### [Trigger Concepts](agent_skills/trigger.md)
-- **[Trigger Concepts](agent_skills/trigger.md)**\
- When to read this:
+- **When to read this**
- You’re debugging why a workflow didn’t start.
- You’re adding a new trigger type or hook.
- - You need to trace async execution, draft debugging, or webhook/schedule pipelines.\
- Details: Start-node taxonomy, webhook & schedule internals (`core/workflow/nodes/trigger_*`, `services/trigger/*`), async orchestration (`services/async_workflow_service.py`, Celery queues), debug event bus, and storage/logging interactions.
+ - You need to trace async execution, draft debugging, or webhook/schedule pipelines.
+- **Details**
+ - Start-node taxonomy
+ - Webhook & schedule internals (`core/workflow/nodes/trigger_*`, `services/trigger/*`)
+ - Async orchestration (`services/async_workflow_service.py`, Celery queues)
+ - Debug event bus
+ - Storage/logging interactions
-______________________________________________________________________
+## General Reminders
-## Additional Notes for Agents
-
-- All skill docs assume you follow the coding style guide—run Ruff/BasedPyright/tests listed there before submitting changes.
+- All skill docs assume you follow the coding style rules below—run the lint/type/test commands before submitting changes.
- When you cannot find an answer in these briefs, search the codebase using the paths referenced (e.g., `core/plugin/impl/tool.py`, `services/dataset_service.py`).
- If you run into cross-cutting concerns (tenancy, configuration, storage), check the infrastructure guide first; it links to most supporting modules.
- Keep multi-tenancy and configuration central: everything flows through `configs.dify_config` and `tenant_id`.
- When touching plugins or triggers, consult both the system overview and the specialised doc to ensure you adjust lifecycle, storage, and observability consistently.
+
+## Coding Style
+
+This is the default standard for backend code in this repo. Follow it for new code and use it as the checklist when reviewing changes.
+
+### Linting & Formatting
+
+- Use Ruff for formatting and linting (follow `.ruff.toml`).
+- Keep each line under 120 characters (including spaces).
+
+### Naming Conventions
+
+- Use `snake_case` for variables and functions.
+- Use `PascalCase` for classes.
+- Use `UPPER_CASE` for constants.
+
+### Typing & Class Layout
+
+- Code should usually include type annotations that match the repo’s current Python version (avoid untyped public APIs and “mystery” values).
+- Prefer modern typing forms (e.g. `list[str]`, `dict[str, int]`) and avoid `Any` unless there’s a strong reason.
+- For classes, declare member variables at the top of the class body (before `__init__`) so the class shape is obvious at a glance:
+
+```python
+from datetime import datetime
+
+
+class Example:
+ user_id: str
+ created_at: datetime
+
+ def __init__(self, user_id: str, created_at: datetime) -> None:
+ self.user_id = user_id
+ self.created_at = created_at
+```
+
+### General Rules
+
+- Use Pydantic v2 conventions.
+- Use `uv` for Python package management in this repo (usually with `--project api`).
+- Prefer simple functions over small “utility classes” for lightweight helpers.
+- Avoid implementing dunder methods unless it’s clearly needed and matches existing patterns.
+- Never start long-running services as part of agent work (`uv run app.py`, `flask run`, etc.); running tests is allowed.
+- Keep files below ~800 lines; split when necessary.
+- Keep code readable and explicit—avoid clever hacks.
+
+### Architecture & Boundaries
+
+- Mirror the layered architecture: controller → service → core/domain.
+- Reuse existing helpers in `core/`, `services/`, and `libs/` before creating new abstractions.
+- Optimise for observability: deterministic control flow, clear logging, actionable errors.
+
+### Logging & Errors
+
+- Never use `print`; use a module-level logger:
+ - `logger = logging.getLogger(__name__)`
+- Include tenant/app/workflow identifiers in log context when relevant.
+- Raise domain-specific exceptions (`services/errors`, `core/errors`) and translate them into HTTP responses in controllers.
+- Log retryable events at `warning`, terminal failures at `error`.
+
+### SQLAlchemy Patterns
+
+- Models inherit from `models.base.TypeBase`; do not create ad-hoc metadata or engines.
+- Open sessions with context managers:
+
+```python
+from sqlalchemy.orm import Session
+
+with Session(db.engine, expire_on_commit=False) as session:
+ stmt = select(Workflow).where(
+ Workflow.id == workflow_id,
+ Workflow.tenant_id == tenant_id,
+ )
+ workflow = session.execute(stmt).scalar_one_or_none()
+```
+
+- Prefer SQLAlchemy expressions; avoid raw SQL unless necessary.
+- Always scope queries by `tenant_id` and protect write paths with safeguards (`FOR UPDATE`, row counts, etc.).
+- Introduce repository abstractions only for very large tables (e.g., workflow executions) or when alternative storage strategies are required.
+
+### Storage & External I/O
+
+- Access storage via `extensions.ext_storage.storage`.
+- Use `core.helper.ssrf_proxy` for outbound HTTP fetches.
+- Background tasks that touch storage must be idempotent, and should log relevant object identifiers.
+
+### Pydantic Usage
+
+- Define DTOs with Pydantic v2 models and forbid extras by default.
+- Use `@field_validator` / `@model_validator` for domain rules.
+
+Example:
+
+```python
+from pydantic import BaseModel, ConfigDict, HttpUrl, field_validator
+
+
+class TriggerConfig(BaseModel):
+ endpoint: HttpUrl
+ secret: str
+
+ model_config = ConfigDict(extra="forbid")
+
+ @field_validator("secret")
+ def ensure_secret_prefix(cls, value: str) -> str:
+ if not value.startswith("dify_"):
+ raise ValueError("secret must start with dify_")
+ return value
+```
+
+### Generics & Protocols
+
+- Use `typing.Protocol` to define behavioural contracts (e.g., cache interfaces).
+- Apply generics (`TypeVar`, `Generic`) for reusable utilities like caches or providers.
+- Validate dynamic inputs at runtime when generics cannot enforce safety alone.
+
+### Tooling & Checks
+
+Quick checks while iterating:
+
+- Format: `make format`
+- Lint (includes auto-fix): `make lint`
+- Type check: `make type-check`
+- Targeted tests: `make test TARGET_TESTS=./api/tests/`
+
+Before opening a PR / submitting:
+
+- `make lint`
+- `make type-check`
+- `make test`
+
+### Controllers & Services
+
+- Controllers: parse input via Pydantic, invoke services, return serialised responses; no business logic.
+- Services: coordinate repositories, providers, background tasks; keep side effects explicit.
+- Document non-obvious behaviour with concise comments.
+
+### Miscellaneous
+
+- Use `configs.dify_config` for configuration—never read environment variables directly.
+- Maintain tenant awareness end-to-end; `tenant_id` must flow through every layer touching shared resources.
+- Queue async work through `services/async_workflow_service`; implement tasks under `tasks/` with explicit queue selection.
+- Keep experimental scripts under `dev/`; do not ship them in production builds.
diff --git a/api/agent-notes/controllers/console/datasets/datasets_document.py.md b/api/agent-notes/controllers/console/datasets/datasets_document.py.md
new file mode 100644
index 0000000000..b100249981
--- /dev/null
+++ b/api/agent-notes/controllers/console/datasets/datasets_document.py.md
@@ -0,0 +1,52 @@
+## Purpose
+
+`api/controllers/console/datasets/datasets_document.py` contains the console (authenticated) APIs for managing dataset documents (list/create/update/delete, processing controls, estimates, etc.).
+
+## Storage model (uploaded files)
+
+- For local file uploads into a knowledge base, the binary is stored via `extensions.ext_storage.storage` under the key:
+ - `upload_files//.`
+- File metadata is stored in the `upload_files` table (`UploadFile` model), keyed by `UploadFile.id`.
+- Dataset `Document` records reference the uploaded file via:
+ - `Document.data_source_info.upload_file_id`
+
+## Download endpoint
+
+- `GET /datasets//documents//download`
+
+ - Only supported when `Document.data_source_type == "upload_file"`.
+ - Performs dataset permission + tenant checks via `DocumentResource.get_document(...)`.
+ - Delegates `Document -> UploadFile` validation and signed URL generation to `DocumentService.get_document_download_url(...)`.
+ - Applies `cloud_edition_billing_rate_limit_check("knowledge")` to match other KB operations.
+ - Response body is **only**: `{ "url": "" }`.
+
+- `POST /datasets//documents/download-zip`
+
+ - Accepts `{ "document_ids": ["..."] }` (upload-file only).
+ - Returns `application/zip` as a single attachment download.
+ - Rationale: browsers often block multiple automatic downloads; a ZIP avoids that limitation.
+ - Applies `cloud_edition_billing_rate_limit_check("knowledge")`.
+ - Delegates dataset permission checks, document/upload-file validation, and download-name generation to
+ `DocumentService.prepare_document_batch_download_zip(...)` before streaming the ZIP.
+
+## Verification plan
+
+- Upload a document from a local file into a dataset.
+- Call the download endpoint and confirm it returns a signed URL.
+- Open the URL and confirm:
+ - Response headers force download (`Content-Disposition`), and
+ - Downloaded bytes match the uploaded file.
+- Select multiple uploaded-file documents and download as ZIP; confirm all selected files exist in the archive.
+
+## Shared helper
+
+- `DocumentService.get_document_download_url(document)` resolves the `UploadFile` and signs a download URL.
+- `DocumentService.prepare_document_batch_download_zip(...)` performs dataset permission checks, batches
+ document + upload file lookups, preserves request order, and generates the client-visible ZIP filename.
+- Internal helpers now live in `DocumentService` (`_get_upload_file_id_for_upload_file_document(...)`,
+ `_get_upload_file_for_upload_file_document(...)`, `_get_upload_files_by_document_id_for_zip_download(...)`).
+- ZIP packing is handled by `FileService.build_upload_files_zip_tempfile(...)`, which also:
+ - sanitizes entry names to avoid path traversal, and
+ - deduplicates names while preserving extensions (e.g., `doc.txt` → `doc (1).txt`).
+ Streaming the response and deferring cleanup is handled by the route via `send_file(path, ...)` + `ExitStack` +
+ `response.call_on_close(...)` (the file is deleted when the response is closed).
diff --git a/api/agent-notes/services/dataset_service.py.md b/api/agent-notes/services/dataset_service.py.md
new file mode 100644
index 0000000000..b68ef345f5
--- /dev/null
+++ b/api/agent-notes/services/dataset_service.py.md
@@ -0,0 +1,18 @@
+## Purpose
+
+`api/services/dataset_service.py` hosts dataset/document service logic used by console and API controllers.
+
+## Batch document operations
+
+- Batch document workflows should avoid N+1 database queries by using set-based lookups.
+- Tenant checks must be enforced consistently across dataset/document operations.
+- `DocumentService.get_documents_by_ids(...)` fetches documents for a dataset using `id.in_(...)`.
+- `FileService.get_upload_files_by_ids(...)` performs tenant-scoped batch lookup for `UploadFile` (dedupes ids with `set(...)`).
+- `DocumentService.get_document_download_url(...)` and `prepare_document_batch_download_zip(...)` handle
+ dataset/document permission checks plus `Document -> UploadFile` validation for download endpoints.
+
+## Verification plan
+
+- Exercise document list and download endpoints that use the service helpers.
+- Confirm batch download uses constant query count for documents + upload files.
+- Request a ZIP with a missing document id and confirm a 404 is returned.
diff --git a/api/agent-notes/services/file_service.py.md b/api/agent-notes/services/file_service.py.md
new file mode 100644
index 0000000000..cf394a1c05
--- /dev/null
+++ b/api/agent-notes/services/file_service.py.md
@@ -0,0 +1,35 @@
+## Purpose
+
+`api/services/file_service.py` owns business logic around `UploadFile` objects: upload validation, storage persistence,
+previews/generators, and deletion.
+
+## Key invariants
+
+- All storage I/O goes through `extensions.ext_storage.storage`.
+- Uploaded file keys follow: `upload_files//.`.
+- Upload validation is enforced in `FileService.upload_file(...)` (blocked extensions, size limits, dataset-only types).
+
+## Batch lookup helpers
+
+- `FileService.get_upload_files_by_ids(tenant_id, upload_file_ids)` is the canonical tenant-scoped batch loader for
+ `UploadFile`.
+
+## Dataset document download helpers
+
+The dataset document download/ZIP endpoints now delegate “Document → UploadFile” validation and permission checks to
+`DocumentService` (`api/services/dataset_service.py`). `FileService` stays focused on generic `UploadFile` operations
+(uploading, previews, deletion), plus generic ZIP serving.
+
+### ZIP serving
+
+- `FileService.build_upload_files_zip_tempfile(...)` builds a ZIP from `UploadFile` objects and yields a seeked
+ tempfile **path** so callers can stream it (e.g., `send_file(path, ...)`) without hitting "read of closed file"
+ issues from file-handle lifecycle during streamed responses.
+- Flask `send_file(...)` and the `ExitStack`/`call_on_close(...)` cleanup pattern are handled in the route layer.
+
+## Verification plan
+
+- Unit: `api/tests/unit_tests/controllers/console/datasets/test_datasets_document_download.py`
+ - Verify signed URL generation for upload-file documents and ZIP download behavior for multiple documents.
+- Unit: `api/tests/unit_tests/services/test_file_service_zip_and_lookup.py`
+ - Verify ZIP packing produces a valid, openable archive and preserves file content.
diff --git a/api/agent-notes/tests/unit_tests/controllers/console/datasets/test_datasets_document_download.py.md b/api/agent-notes/tests/unit_tests/controllers/console/datasets/test_datasets_document_download.py.md
new file mode 100644
index 0000000000..8f78dacde8
--- /dev/null
+++ b/api/agent-notes/tests/unit_tests/controllers/console/datasets/test_datasets_document_download.py.md
@@ -0,0 +1,28 @@
+## Purpose
+
+Unit tests for the console dataset document download endpoint:
+
+- `GET /datasets//documents//download`
+
+## Testing approach
+
+- Uses `Flask.test_request_context()` and calls the `Resource.get(...)` method directly.
+- Monkeypatches console decorators (`login_required`, `setup_required`, rate limit) to no-ops to keep the test focused.
+- Mocks:
+ - `DatasetService.get_dataset` / `check_dataset_permission`
+ - `DocumentService.get_document` for single-file download tests
+ - `DocumentService.get_documents_by_ids` + `FileService.get_upload_files_by_ids` for ZIP download tests
+ - `FileService.get_upload_files_by_ids` for `UploadFile` lookups in single-file tests
+ - `services.dataset_service.file_helpers.get_signed_file_url` to return a deterministic URL
+- Document mocks include `id` fields so batch lookups can map documents by id.
+
+## Covered cases
+
+- Success returns `{ "url": "" }` for upload-file documents.
+- 404 when document is not `upload_file`.
+- 404 when `upload_file_id` is missing.
+- 404 when referenced `UploadFile` row does not exist.
+- 403 when document tenant does not match current tenant.
+- Batch ZIP download returns `application/zip` for upload-file documents.
+- Batch ZIP download rejects non-upload-file documents.
+- Batch ZIP download uses a random `.zip` attachment name (`download_name`), so tests only assert the suffix.
diff --git a/api/agent-notes/tests/unit_tests/services/test_file_service_zip_and_lookup.py.md b/api/agent-notes/tests/unit_tests/services/test_file_service_zip_and_lookup.py.md
new file mode 100644
index 0000000000..dbcdf26f10
--- /dev/null
+++ b/api/agent-notes/tests/unit_tests/services/test_file_service_zip_and_lookup.py.md
@@ -0,0 +1,18 @@
+## Purpose
+
+Unit tests for `api/services/file_service.py` helper methods that are not covered by higher-level controller tests.
+
+## What’s covered
+
+- `FileService.build_upload_files_zip_tempfile(...)`
+ - ZIP entry name sanitization (no directory components / traversal)
+ - name deduplication while preserving extensions
+ - writing streamed bytes from `storage.load(...)` into ZIP entries
+ - yields a tempfile path so callers can open/stream the ZIP without holding a live file handle
+- `FileService.get_upload_files_by_ids(...)`
+ - returns `{}` for empty id lists
+ - returns an id-keyed mapping for non-empty lists
+
+## Notes
+
+- These tests intentionally stub `storage.load` and `db.session.scalars(...).all()` to avoid needing a real DB/storage.
diff --git a/api/agent_skills/coding_style.md b/api/agent_skills/coding_style.md
deleted file mode 100644
index a2b66f0bd5..0000000000
--- a/api/agent_skills/coding_style.md
+++ /dev/null
@@ -1,115 +0,0 @@
-## Linter
-
-- Always follow `.ruff.toml`.
-- Run `uv run ruff check --fix --unsafe-fixes`.
-- Keep each line under 100 characters (including spaces).
-
-## Code Style
-
-- `snake_case` for variables and functions.
-- `PascalCase` for classes.
-- `UPPER_CASE` for constants.
-
-## Rules
-
-- Use Pydantic v2 standard.
-- Use `uv` for package management.
-- Do not override dunder methods like `__init__`, `__iadd__`, etc.
-- Never launch services (`uv run app.py`, `flask run`, etc.); running tests under `tests/` is allowed.
-- Prefer simple functions over classes for lightweight helpers.
-- Keep files below 800 lines; split when necessary.
-- Keep code readable—no clever hacks.
-- Never use `print`; log with `logger = logging.getLogger(__name__)`.
-
-## Guiding Principles
-
-- Mirror the project’s layered architecture: controller → service → core/domain.
-- Reuse existing helpers in `core/`, `services/`, and `libs/` before creating new abstractions.
-- Optimise for observability: deterministic control flow, clear logging, actionable errors.
-
-## SQLAlchemy Patterns
-
-- Models inherit from `models.base.Base`; never create ad-hoc metadata or engines.
-
-- Open sessions with context managers:
-
- ```python
- from sqlalchemy.orm import Session
-
- with Session(db.engine, expire_on_commit=False) as session:
- stmt = select(Workflow).where(
- Workflow.id == workflow_id,
- Workflow.tenant_id == tenant_id,
- )
- workflow = session.execute(stmt).scalar_one_or_none()
- ```
-
-- Use SQLAlchemy expressions; avoid raw SQL unless necessary.
-
-- Introduce repository abstractions only for very large tables (e.g., workflow executions) to support alternative storage strategies.
-
-- Always scope queries by `tenant_id` and protect write paths with safeguards (`FOR UPDATE`, row counts, etc.).
-
-## Storage & External IO
-
-- Access storage via `extensions.ext_storage.storage`.
-- Use `core.helper.ssrf_proxy` for outbound HTTP fetches.
-- Background tasks that touch storage must be idempotent and log the relevant object identifiers.
-
-## Pydantic Usage
-
-- Define DTOs with Pydantic v2 models and forbid extras by default.
-
-- Use `@field_validator` / `@model_validator` for domain rules.
-
-- Example:
-
- ```python
- from pydantic import BaseModel, ConfigDict, HttpUrl, field_validator
-
- class TriggerConfig(BaseModel):
- endpoint: HttpUrl
- secret: str
-
- model_config = ConfigDict(extra="forbid")
-
- @field_validator("secret")
- def ensure_secret_prefix(cls, value: str) -> str:
- if not value.startswith("dify_"):
- raise ValueError("secret must start with dify_")
- return value
- ```
-
-## Generics & Protocols
-
-- Use `typing.Protocol` to define behavioural contracts (e.g., cache interfaces).
-- Apply generics (`TypeVar`, `Generic`) for reusable utilities like caches or providers.
-- Validate dynamic inputs at runtime when generics cannot enforce safety alone.
-
-## Error Handling & Logging
-
-- Raise domain-specific exceptions (`services/errors`, `core/errors`) and translate to HTTP responses in controllers.
-- Declare `logger = logging.getLogger(__name__)` at module top.
-- Include tenant/app/workflow identifiers in log context.
-- Log retryable events at `warning`, terminal failures at `error`.
-
-## Tooling & Checks
-
-- Format/lint: `uv run --project api --dev ruff format ./api` and `uv run --project api --dev ruff check --fix --unsafe-fixes ./api`.
-- Type checks: `uv run --directory api --dev basedpyright`.
-- Tests: `uv run --project api --dev dev/pytest/pytest_unit_tests.sh`.
-- Run all of the above before submitting your work.
-
-## Controllers & Services
-
-- Controllers: parse input via Pydantic, invoke services, return serialised responses; no business logic.
-- Services: coordinate repositories, providers, background tasks; keep side effects explicit.
-- Avoid repositories unless necessary; direct SQLAlchemy usage is preferred for typical tables.
-- Document non-obvious behaviour with concise comments.
-
-## Miscellaneous
-
-- Use `configs.dify_config` for configuration—never read environment variables directly.
-- Maintain tenant awareness end-to-end; `tenant_id` must flow through every layer touching shared resources.
-- Queue async work through `services/async_workflow_service`; implement tasks under `tasks/` with explicit queue selection.
-- Keep experimental scripts under `dev/`; do not ship them in production builds.
diff --git a/api/app_factory.py b/api/app_factory.py
index f827842d68..1fb01d2e91 100644
--- a/api/app_factory.py
+++ b/api/app_factory.py
@@ -71,6 +71,8 @@ def create_app() -> DifyApp:
def initialize_extensions(app: DifyApp):
+ # Initialize Flask context capture for workflow execution
+ from context.flask_app_context import init_flask_context
from extensions import (
ext_app_metrics,
ext_blueprints,
@@ -100,6 +102,8 @@ def initialize_extensions(app: DifyApp):
ext_warnings,
)
+ init_flask_context()
+
extensions = [
ext_timezone,
ext_logging,
diff --git a/api/commands.py b/api/commands.py
index 7ebf5b4874..aa7b731a27 100644
--- a/api/commands.py
+++ b/api/commands.py
@@ -1,7 +1,9 @@
import base64
+import datetime
import json
import logging
import secrets
+import time
from typing import Any
import click
@@ -34,7 +36,7 @@ from libs.rsa import generate_key_pair
from models import Tenant
from models.dataset import Dataset, DatasetCollectionBinding, DatasetMetadata, DatasetMetadataBinding, DocumentSegment
from models.dataset import Document as DatasetDocument
-from models.model import Account, App, AppAnnotationSetting, AppMode, Conversation, MessageAnnotation, UploadFile
+from models.model import App, AppAnnotationSetting, AppMode, Conversation, MessageAnnotation, UploadFile
from models.oauth import DatasourceOauthParamConfig, DatasourceProvider
from models.provider import Provider, ProviderModel
from models.provider_ids import DatasourceProviderID, ToolProviderID
@@ -45,6 +47,9 @@ from services.clear_free_plan_tenant_expired_logs import ClearFreePlanTenantExpi
from services.plugin.data_migration import PluginDataMigration
from services.plugin.plugin_migration import PluginMigration
from services.plugin.plugin_service import PluginService
+from services.retention.conversation.messages_clean_policy import create_message_clean_policy
+from services.retention.conversation.messages_clean_service import MessagesCleanService
+from services.retention.workflow_run.clear_free_plan_expired_workflow_run_logs import WorkflowRunCleanup
from tasks.remove_app_and_related_data_task import delete_draft_variables_batch
logger = logging.getLogger(__name__)
@@ -62,8 +67,10 @@ def reset_password(email, new_password, password_confirm):
if str(new_password).strip() != str(password_confirm).strip():
click.echo(click.style("Passwords do not match.", fg="red"))
return
+ normalized_email = email.strip().lower()
+
with sessionmaker(db.engine, expire_on_commit=False).begin() as session:
- account = session.query(Account).where(Account.email == email).one_or_none()
+ account = AccountService.get_account_by_email_with_case_fallback(email.strip(), session=session)
if not account:
click.echo(click.style(f"Account not found for email: {email}", fg="red"))
@@ -84,7 +91,7 @@ def reset_password(email, new_password, password_confirm):
base64_password_hashed = base64.b64encode(password_hashed).decode()
account.password = base64_password_hashed
account.password_salt = base64_salt
- AccountService.reset_login_error_rate_limit(email)
+ AccountService.reset_login_error_rate_limit(normalized_email)
click.echo(click.style("Password reset successfully.", fg="green"))
@@ -100,20 +107,22 @@ def reset_email(email, new_email, email_confirm):
if str(new_email).strip() != str(email_confirm).strip():
click.echo(click.style("New emails do not match.", fg="red"))
return
+ normalized_new_email = new_email.strip().lower()
+
with sessionmaker(db.engine, expire_on_commit=False).begin() as session:
- account = session.query(Account).where(Account.email == email).one_or_none()
+ account = AccountService.get_account_by_email_with_case_fallback(email.strip(), session=session)
if not account:
click.echo(click.style(f"Account not found for email: {email}", fg="red"))
return
try:
- email_validate(new_email)
+ email_validate(normalized_new_email)
except:
click.echo(click.style(f"Invalid email: {new_email}", fg="red"))
return
- account.email = new_email
+ account.email = normalized_new_email
click.echo(click.style("Email updated successfully.", fg="green"))
@@ -658,7 +667,7 @@ def create_tenant(email: str, language: str | None = None, name: str | None = No
return
# Create account
- email = email.strip()
+ email = email.strip().lower()
if "@" not in email:
click.echo(click.style("Invalid email address.", fg="red"))
@@ -852,6 +861,95 @@ def clear_free_plan_tenant_expired_logs(days: int, batch: int, tenant_ids: list[
click.echo(click.style("Clear free plan tenant expired logs completed.", fg="green"))
+@click.command("clean-workflow-runs", help="Clean expired workflow runs and related data for free tenants.")
+@click.option(
+ "--before-days",
+ "--days",
+ default=30,
+ show_default=True,
+ type=click.IntRange(min=0),
+ help="Delete workflow runs created before N days ago.",
+)
+@click.option("--batch-size", default=200, show_default=True, help="Batch size for selecting workflow runs.")
+@click.option(
+ "--from-days-ago",
+ default=None,
+ type=click.IntRange(min=0),
+ help="Lower bound in days ago (older). Must be paired with --to-days-ago.",
+)
+@click.option(
+ "--to-days-ago",
+ default=None,
+ type=click.IntRange(min=0),
+ help="Upper bound in days ago (newer). Must be paired with --from-days-ago.",
+)
+@click.option(
+ "--start-from",
+ type=click.DateTime(formats=["%Y-%m-%d", "%Y-%m-%dT%H:%M:%S"]),
+ default=None,
+ help="Optional lower bound (inclusive) for created_at; must be paired with --end-before.",
+)
+@click.option(
+ "--end-before",
+ type=click.DateTime(formats=["%Y-%m-%d", "%Y-%m-%dT%H:%M:%S"]),
+ default=None,
+ help="Optional upper bound (exclusive) for created_at; must be paired with --start-from.",
+)
+@click.option(
+ "--dry-run",
+ is_flag=True,
+ help="Preview cleanup results without deleting any workflow run data.",
+)
+def clean_workflow_runs(
+ before_days: int,
+ batch_size: int,
+ from_days_ago: int | None,
+ to_days_ago: int | None,
+ start_from: datetime.datetime | None,
+ end_before: datetime.datetime | None,
+ dry_run: bool,
+):
+ """
+ Clean workflow runs and related workflow data for free tenants.
+ """
+ if (start_from is None) ^ (end_before is None):
+ raise click.UsageError("--start-from and --end-before must be provided together.")
+
+ if (from_days_ago is None) ^ (to_days_ago is None):
+ raise click.UsageError("--from-days-ago and --to-days-ago must be provided together.")
+
+ if from_days_ago is not None and to_days_ago is not None:
+ if start_from or end_before:
+ raise click.UsageError("Choose either day offsets or explicit dates, not both.")
+ if from_days_ago <= to_days_ago:
+ raise click.UsageError("--from-days-ago must be greater than --to-days-ago.")
+ now = datetime.datetime.now()
+ start_from = now - datetime.timedelta(days=from_days_ago)
+ end_before = now - datetime.timedelta(days=to_days_ago)
+ before_days = 0
+
+ start_time = datetime.datetime.now(datetime.UTC)
+ click.echo(click.style(f"Starting workflow run cleanup at {start_time.isoformat()}.", fg="white"))
+
+ WorkflowRunCleanup(
+ days=before_days,
+ batch_size=batch_size,
+ start_from=start_from,
+ end_before=end_before,
+ dry_run=dry_run,
+ ).run()
+
+ end_time = datetime.datetime.now(datetime.UTC)
+ elapsed = end_time - start_time
+ click.echo(
+ click.style(
+ f"Workflow run cleanup completed. start={start_time.isoformat()} "
+ f"end={end_time.isoformat()} duration={elapsed}",
+ fg="green",
+ )
+ )
+
+
@click.option("-f", "--force", is_flag=True, help="Skip user confirmation and force the command to execute.")
@click.command("clear-orphaned-file-records", help="Clear orphaned file records.")
def clear_orphaned_file_records(force: bool):
@@ -2111,3 +2209,79 @@ def migrate_oss(
except Exception as e:
db.session.rollback()
click.echo(click.style(f"Failed to update DB storage_type: {str(e)}", fg="red"))
+
+
+@click.command("clean-expired-messages", help="Clean expired messages.")
+@click.option(
+ "--start-from",
+ type=click.DateTime(formats=["%Y-%m-%d", "%Y-%m-%dT%H:%M:%S"]),
+ required=True,
+ help="Lower bound (inclusive) for created_at.",
+)
+@click.option(
+ "--end-before",
+ type=click.DateTime(formats=["%Y-%m-%d", "%Y-%m-%dT%H:%M:%S"]),
+ required=True,
+ help="Upper bound (exclusive) for created_at.",
+)
+@click.option("--batch-size", default=1000, show_default=True, help="Batch size for selecting messages.")
+@click.option(
+ "--graceful-period",
+ default=21,
+ show_default=True,
+ help="Graceful period in days after subscription expiration, will be ignored when billing is disabled.",
+)
+@click.option("--dry-run", is_flag=True, default=False, help="Show messages logs would be cleaned without deleting")
+def clean_expired_messages(
+ batch_size: int,
+ graceful_period: int,
+ start_from: datetime.datetime,
+ end_before: datetime.datetime,
+ dry_run: bool,
+):
+ """
+ Clean expired messages and related data for tenants based on clean policy.
+ """
+ click.echo(click.style("clean_messages: start clean messages.", fg="green"))
+
+ start_at = time.perf_counter()
+
+ try:
+ # Create policy based on billing configuration
+ # NOTE: graceful_period will be ignored when billing is disabled.
+ policy = create_message_clean_policy(graceful_period_days=graceful_period)
+
+ # Create and run the cleanup service
+ service = MessagesCleanService.from_time_range(
+ policy=policy,
+ start_from=start_from,
+ end_before=end_before,
+ batch_size=batch_size,
+ dry_run=dry_run,
+ )
+ stats = service.run()
+
+ end_at = time.perf_counter()
+ click.echo(
+ click.style(
+ f"clean_messages: completed successfully\n"
+ f" - Latency: {end_at - start_at:.2f}s\n"
+ f" - Batches processed: {stats['batches']}\n"
+ f" - Total messages scanned: {stats['total_messages']}\n"
+ f" - Messages filtered: {stats['filtered_messages']}\n"
+ f" - Messages deleted: {stats['total_deleted']}",
+ fg="green",
+ )
+ )
+ except Exception as e:
+ end_at = time.perf_counter()
+ logger.exception("clean_messages failed")
+ click.echo(
+ click.style(
+ f"clean_messages: failed after {end_at - start_at:.2f}s - {str(e)}",
+ fg="red",
+ )
+ )
+ raise
+
+ click.echo(click.style("messages cleanup completed.", fg="green"))
diff --git a/api/configs/feature/__init__.py b/api/configs/feature/__init__.py
index 6a04171d2d..cf71a33fa8 100644
--- a/api/configs/feature/__init__.py
+++ b/api/configs/feature/__init__.py
@@ -949,6 +949,12 @@ class MailConfig(BaseSettings):
default=False,
)
+ SMTP_LOCAL_HOSTNAME: str | None = Field(
+ description="Override the local hostname used in SMTP HELO/EHLO. "
+ "Useful behind NAT or when the default hostname causes rejections.",
+ default=None,
+ )
+
EMAIL_SEND_IP_LIMIT_PER_MINUTE: PositiveInt = Field(
description="Maximum number of emails allowed to be sent from the same IP address in a minute",
default=50,
@@ -1101,6 +1107,10 @@ class CeleryScheduleTasksConfig(BaseSettings):
description="Enable clean messages task",
default=False,
)
+ ENABLE_WORKFLOW_RUN_CLEANUP_TASK: bool = Field(
+ description="Enable scheduled workflow run cleanup task",
+ default=False,
+ )
ENABLE_MAIL_CLEAN_DOCUMENT_NOTIFY_TASK: bool = Field(
description="Enable mail clean document notify task",
default=False,
diff --git a/api/configs/middleware/storage/volcengine_tos_storage_config.py b/api/configs/middleware/storage/volcengine_tos_storage_config.py
index be01f2dc36..2a35300401 100644
--- a/api/configs/middleware/storage/volcengine_tos_storage_config.py
+++ b/api/configs/middleware/storage/volcengine_tos_storage_config.py
@@ -4,7 +4,7 @@ from pydantic_settings import BaseSettings
class VolcengineTOSStorageConfig(BaseSettings):
"""
- Configuration settings for Volcengine Tinder Object Storage (TOS)
+ Configuration settings for Volcengine Torch Object Storage (TOS)
"""
VOLCENGINE_TOS_BUCKET_NAME: str | None = Field(
diff --git a/api/context/__init__.py b/api/context/__init__.py
new file mode 100644
index 0000000000..aebf9750ce
--- /dev/null
+++ b/api/context/__init__.py
@@ -0,0 +1,74 @@
+"""
+Core Context - Framework-agnostic context management.
+
+This module provides context management that is independent of any specific
+web framework. Framework-specific implementations register their context
+capture functions at application initialization time.
+
+This ensures the workflow layer remains completely decoupled from Flask
+or any other web framework.
+"""
+
+import contextvars
+from collections.abc import Callable
+
+from core.workflow.context.execution_context import (
+ ExecutionContext,
+ IExecutionContext,
+ NullAppContext,
+)
+
+# Global capturer function - set by framework-specific modules
+_capturer: Callable[[], IExecutionContext] | None = None
+
+
+def register_context_capturer(capturer: Callable[[], IExecutionContext]) -> None:
+ """
+ Register a context capture function.
+
+ This should be called by framework-specific modules (e.g., Flask)
+ during application initialization.
+
+ Args:
+ capturer: Function that captures current context and returns IExecutionContext
+ """
+ global _capturer
+ _capturer = capturer
+
+
+def capture_current_context() -> IExecutionContext:
+ """
+ Capture current execution context.
+
+ This function uses the registered context capturer. If no capturer
+ is registered, it returns a minimal context with only contextvars
+ (suitable for non-framework environments like tests or standalone scripts).
+
+ Returns:
+ IExecutionContext with captured context
+ """
+ if _capturer is None:
+ # No framework registered - return minimal context
+ return ExecutionContext(
+ app_context=NullAppContext(),
+ context_vars=contextvars.copy_context(),
+ )
+
+ return _capturer()
+
+
+def reset_context_provider() -> None:
+ """
+ Reset the context capturer.
+
+ This is primarily useful for testing to ensure a clean state.
+ """
+ global _capturer
+ _capturer = None
+
+
+__all__ = [
+ "capture_current_context",
+ "register_context_capturer",
+ "reset_context_provider",
+]
diff --git a/api/context/flask_app_context.py b/api/context/flask_app_context.py
new file mode 100644
index 0000000000..4b693cd91f
--- /dev/null
+++ b/api/context/flask_app_context.py
@@ -0,0 +1,198 @@
+"""
+Flask App Context - Flask implementation of AppContext interface.
+"""
+
+import contextvars
+from collections.abc import Generator
+from contextlib import contextmanager
+from typing import Any, final
+
+from flask import Flask, current_app, g
+
+from context import register_context_capturer
+from core.workflow.context.execution_context import (
+ AppContext,
+ IExecutionContext,
+)
+
+
+@final
+class FlaskAppContext(AppContext):
+ """
+ Flask implementation of AppContext.
+
+ This adapts Flask's app context to the AppContext interface.
+ """
+
+ def __init__(self, flask_app: Flask) -> None:
+ """
+ Initialize Flask app context.
+
+ Args:
+ flask_app: The Flask application instance
+ """
+ self._flask_app = flask_app
+
+ def get_config(self, key: str, default: Any = None) -> Any:
+ """Get configuration value from Flask app config."""
+ return self._flask_app.config.get(key, default)
+
+ def get_extension(self, name: str) -> Any:
+ """Get Flask extension by name."""
+ return self._flask_app.extensions.get(name)
+
+ @contextmanager
+ def enter(self) -> Generator[None, None, None]:
+ """Enter Flask app context."""
+ with self._flask_app.app_context():
+ yield
+
+ @property
+ def flask_app(self) -> Flask:
+ """Get the underlying Flask app instance."""
+ return self._flask_app
+
+
+def capture_flask_context(user: Any = None) -> IExecutionContext:
+ """
+ Capture current Flask execution context.
+
+ This function captures the Flask app context and contextvars from the
+ current environment. It should be called from within a Flask request or
+ app context.
+
+ Args:
+ user: Optional user object to include in context
+
+ Returns:
+ IExecutionContext with captured Flask context
+
+ Raises:
+ RuntimeError: If called outside Flask context
+ """
+ # Get Flask app instance
+ flask_app = current_app._get_current_object() # type: ignore
+
+ # Save current user if available
+ saved_user = user
+ if saved_user is None:
+ # Check for user in g (flask-login)
+ if hasattr(g, "_login_user"):
+ saved_user = g._login_user
+
+ # Capture contextvars
+ context_vars = contextvars.copy_context()
+
+ return FlaskExecutionContext(
+ flask_app=flask_app,
+ context_vars=context_vars,
+ user=saved_user,
+ )
+
+
+@final
+class FlaskExecutionContext:
+ """
+ Flask-specific execution context.
+
+ This is a specialized version of ExecutionContext that includes Flask app
+ context. It provides the same interface as ExecutionContext but with
+ Flask-specific implementation.
+ """
+
+ def __init__(
+ self,
+ flask_app: Flask,
+ context_vars: contextvars.Context,
+ user: Any = None,
+ ) -> None:
+ """
+ Initialize Flask execution context.
+
+ Args:
+ flask_app: Flask application instance
+ context_vars: Python contextvars
+ user: Optional user object
+ """
+ self._app_context = FlaskAppContext(flask_app)
+ self._context_vars = context_vars
+ self._user = user
+ self._flask_app = flask_app
+
+ @property
+ def app_context(self) -> FlaskAppContext:
+ """Get Flask app context."""
+ return self._app_context
+
+ @property
+ def context_vars(self) -> contextvars.Context:
+ """Get context variables."""
+ return self._context_vars
+
+ @property
+ def user(self) -> Any:
+ """Get user object."""
+ return self._user
+
+ def __enter__(self) -> "FlaskExecutionContext":
+ """Enter the Flask execution context."""
+ # Restore context variables
+ for var, val in self._context_vars.items():
+ var.set(val)
+
+ # Save current user from g if available
+ saved_user = None
+ if hasattr(g, "_login_user"):
+ saved_user = g._login_user
+
+ # Enter Flask app context
+ self._cm = self._app_context.enter()
+ self._cm.__enter__()
+
+ # Restore user in new app context
+ if saved_user is not None:
+ g._login_user = saved_user
+
+ return self
+
+ def __exit__(self, *args: Any) -> None:
+ """Exit the Flask execution context."""
+ if hasattr(self, "_cm"):
+ self._cm.__exit__(*args)
+
+ @contextmanager
+ def enter(self) -> Generator[None, None, None]:
+ """Enter Flask execution context as context manager."""
+ # Restore context variables
+ for var, val in self._context_vars.items():
+ var.set(val)
+
+ # Save current user from g if available
+ saved_user = None
+ if hasattr(g, "_login_user"):
+ saved_user = g._login_user
+
+ # Enter Flask app context
+ with self._flask_app.app_context():
+ # Restore user in new app context
+ if saved_user is not None:
+ g._login_user = saved_user
+ yield
+
+
+def init_flask_context() -> None:
+ """
+ Initialize Flask context capture by registering the capturer.
+
+ This function should be called during Flask application initialization
+ to register the Flask-specific context capturer with the core context module.
+
+ Example:
+ app = Flask(__name__)
+ init_flask_context() # Register Flask context capturer
+
+ Note:
+ This function does not need the app instance as it uses Flask's
+ `current_app` to get the app when capturing context.
+ """
+ register_context_capturer(capture_flask_context)
diff --git a/api/controllers/console/app/app.py b/api/controllers/console/app/app.py
index d66bb7063f..dad184c54b 100644
--- a/api/controllers/console/app/app.py
+++ b/api/controllers/console/app/app.py
@@ -1,4 +1,3 @@
-import re
import uuid
from datetime import datetime
from typing import Any, Literal, TypeAlias
@@ -68,48 +67,6 @@ class AppListQuery(BaseModel):
raise ValueError("Invalid UUID format in tag_ids.") from exc
-# XSS prevention: patterns that could lead to XSS attacks
-# Includes: script tags, iframe tags, javascript: protocol, SVG with onload, etc.
-_XSS_PATTERNS = [
- r"", # Script tags
- r")", # Iframe tags (including self-closing)
- r"javascript:", # JavaScript protocol
- r"
diff --git a/api/templates/register_email_when_account_exist_template_en-US.html b/api/templates/register_email_when_account_exist_template_en-US.html
index ac5042c274..e2bb99c989 100644
--- a/api/templates/register_email_when_account_exist_template_en-US.html
+++ b/api/templates/register_email_when_account_exist_template_en-US.html
@@ -115,7 +115,30 @@
We noticed you tried to sign up, but this email is already registered with an existing account.
Please log in here:
+ If the button doesn't work, copy and paste this link into your browser:
+
+ {{ login_url }}
+
+
+
If you forgot your password, you can reset it here: Reset Password
diff --git a/api/templates/register_email_when_account_exist_template_zh-CN.html b/api/templates/register_email_when_account_exist_template_zh-CN.html
index 326b58343a..6a5bbd135b 100644
--- a/api/templates/register_email_when_account_exist_template_zh-CN.html
+++ b/api/templates/register_email_when_account_exist_template_zh-CN.html
@@ -115,7 +115,30 @@
我们注意到您尝试注册,但此电子邮件已注册。
请在此登录:
diff --git a/api/templates/without-brand/invite_member_mail_template_en-US.html b/api/templates/without-brand/invite_member_mail_template_en-US.html
index f9157284fa..687ece617a 100644
--- a/api/templates/without-brand/invite_member_mail_template_en-US.html
+++ b/api/templates/without-brand/invite_member_mail_template_en-US.html
@@ -92,12 +92,34 @@
platform specifically designed for LLM application development. On {{application_title}}, you can explore,
create, and collaborate to build and operate AI applications.
Click the button below to log in to {{application_title}} and join the workspace.