diff --git a/.agent/skills b/.agent/skills deleted file mode 120000 index 454b8427cd..0000000000 --- a/.agent/skills +++ /dev/null @@ -1 +0,0 @@ -../.claude/skills \ No newline at end of file diff --git a/.agent/skills/component-refactoring b/.agent/skills/component-refactoring new file mode 120000 index 0000000000..53ae67e2f2 --- /dev/null +++ b/.agent/skills/component-refactoring @@ -0,0 +1 @@ +../../.agents/skills/component-refactoring \ No newline at end of file diff --git a/.agent/skills/frontend-code-review b/.agent/skills/frontend-code-review new file mode 120000 index 0000000000..55654ffbd7 --- /dev/null +++ b/.agent/skills/frontend-code-review @@ -0,0 +1 @@ +../../.agents/skills/frontend-code-review \ No newline at end of file diff --git a/.agent/skills/frontend-testing b/.agent/skills/frontend-testing new file mode 120000 index 0000000000..092cec7745 --- /dev/null +++ b/.agent/skills/frontend-testing @@ -0,0 +1 @@ +../../.agents/skills/frontend-testing \ No newline at end of file diff --git a/.agent/skills/orpc-contract-first b/.agent/skills/orpc-contract-first new file mode 120000 index 0000000000..da47b335c7 --- /dev/null +++ b/.agent/skills/orpc-contract-first @@ -0,0 +1 @@ +../../.agents/skills/orpc-contract-first \ No newline at end of file diff --git a/.agent/skills/skill-creator b/.agent/skills/skill-creator new file mode 120000 index 0000000000..b87455490f --- /dev/null +++ b/.agent/skills/skill-creator @@ -0,0 +1 @@ +../../.agents/skills/skill-creator \ No newline at end of file diff --git a/.agent/skills/vercel-react-best-practices b/.agent/skills/vercel-react-best-practices new file mode 120000 index 0000000000..e567923b32 --- /dev/null +++ b/.agent/skills/vercel-react-best-practices @@ -0,0 +1 @@ +../../.agents/skills/vercel-react-best-practices \ No newline at end of file diff --git a/.agent/skills/web-design-guidelines b/.agent/skills/web-design-guidelines new file mode 120000 index 0000000000..886b26ded7 --- /dev/null +++ b/.agent/skills/web-design-guidelines @@ -0,0 +1 @@ +../../.agents/skills/web-design-guidelines \ No newline at end of file diff --git a/.claude/skills/component-refactoring/SKILL.md b/.agents/skills/component-refactoring/SKILL.md similarity index 100% rename from .claude/skills/component-refactoring/SKILL.md rename to .agents/skills/component-refactoring/SKILL.md diff --git a/.claude/skills/component-refactoring/references/complexity-patterns.md b/.agents/skills/component-refactoring/references/complexity-patterns.md similarity index 100% rename from .claude/skills/component-refactoring/references/complexity-patterns.md rename to .agents/skills/component-refactoring/references/complexity-patterns.md diff --git a/.claude/skills/component-refactoring/references/component-splitting.md b/.agents/skills/component-refactoring/references/component-splitting.md similarity index 100% rename from .claude/skills/component-refactoring/references/component-splitting.md rename to .agents/skills/component-refactoring/references/component-splitting.md diff --git a/.claude/skills/component-refactoring/references/hook-extraction.md b/.agents/skills/component-refactoring/references/hook-extraction.md similarity index 100% rename from .claude/skills/component-refactoring/references/hook-extraction.md rename to .agents/skills/component-refactoring/references/hook-extraction.md diff --git a/.claude/skills/frontend-code-review/SKILL.md b/.agents/skills/frontend-code-review/SKILL.md similarity index 100% rename from .claude/skills/frontend-code-review/SKILL.md rename to .agents/skills/frontend-code-review/SKILL.md diff --git a/.claude/skills/frontend-code-review/references/business-logic.md b/.agents/skills/frontend-code-review/references/business-logic.md similarity index 100% rename from .claude/skills/frontend-code-review/references/business-logic.md rename to .agents/skills/frontend-code-review/references/business-logic.md diff --git a/.claude/skills/frontend-code-review/references/code-quality.md b/.agents/skills/frontend-code-review/references/code-quality.md similarity index 100% rename from .claude/skills/frontend-code-review/references/code-quality.md rename to .agents/skills/frontend-code-review/references/code-quality.md diff --git a/.claude/skills/frontend-code-review/references/performance.md b/.agents/skills/frontend-code-review/references/performance.md similarity index 100% rename from .claude/skills/frontend-code-review/references/performance.md rename to .agents/skills/frontend-code-review/references/performance.md diff --git a/.claude/skills/frontend-testing/SKILL.md b/.agents/skills/frontend-testing/SKILL.md similarity index 100% rename from .claude/skills/frontend-testing/SKILL.md rename to .agents/skills/frontend-testing/SKILL.md diff --git a/.claude/skills/frontend-testing/assets/component-test.template.tsx b/.agents/skills/frontend-testing/assets/component-test.template.tsx similarity index 100% rename from .claude/skills/frontend-testing/assets/component-test.template.tsx rename to .agents/skills/frontend-testing/assets/component-test.template.tsx diff --git a/.claude/skills/frontend-testing/assets/hook-test.template.ts b/.agents/skills/frontend-testing/assets/hook-test.template.ts similarity index 100% rename from .claude/skills/frontend-testing/assets/hook-test.template.ts rename to .agents/skills/frontend-testing/assets/hook-test.template.ts diff --git a/.claude/skills/frontend-testing/assets/utility-test.template.ts b/.agents/skills/frontend-testing/assets/utility-test.template.ts similarity index 100% rename from .claude/skills/frontend-testing/assets/utility-test.template.ts rename to .agents/skills/frontend-testing/assets/utility-test.template.ts diff --git a/.claude/skills/frontend-testing/references/async-testing.md b/.agents/skills/frontend-testing/references/async-testing.md similarity index 100% rename from .claude/skills/frontend-testing/references/async-testing.md rename to .agents/skills/frontend-testing/references/async-testing.md diff --git a/.claude/skills/frontend-testing/references/checklist.md b/.agents/skills/frontend-testing/references/checklist.md similarity index 100% rename from .claude/skills/frontend-testing/references/checklist.md rename to .agents/skills/frontend-testing/references/checklist.md diff --git a/.claude/skills/frontend-testing/references/common-patterns.md b/.agents/skills/frontend-testing/references/common-patterns.md similarity index 100% rename from .claude/skills/frontend-testing/references/common-patterns.md rename to .agents/skills/frontend-testing/references/common-patterns.md diff --git a/.claude/skills/frontend-testing/references/domain-components.md b/.agents/skills/frontend-testing/references/domain-components.md similarity index 100% rename from .claude/skills/frontend-testing/references/domain-components.md rename to .agents/skills/frontend-testing/references/domain-components.md diff --git a/.claude/skills/frontend-testing/references/mocking.md b/.agents/skills/frontend-testing/references/mocking.md similarity index 100% rename from .claude/skills/frontend-testing/references/mocking.md rename to .agents/skills/frontend-testing/references/mocking.md diff --git a/.claude/skills/frontend-testing/references/workflow.md b/.agents/skills/frontend-testing/references/workflow.md similarity index 100% rename from .claude/skills/frontend-testing/references/workflow.md rename to .agents/skills/frontend-testing/references/workflow.md diff --git a/.claude/skills/orpc-contract-first/SKILL.md b/.agents/skills/orpc-contract-first/SKILL.md similarity index 100% rename from .claude/skills/orpc-contract-first/SKILL.md rename to .agents/skills/orpc-contract-first/SKILL.md diff --git a/.claude/skills/skill-creator/SKILL.md b/.agents/skills/skill-creator/SKILL.md similarity index 100% rename from .claude/skills/skill-creator/SKILL.md rename to .agents/skills/skill-creator/SKILL.md diff --git a/.claude/skills/skill-creator/references/output-patterns.md b/.agents/skills/skill-creator/references/output-patterns.md similarity index 100% rename from .claude/skills/skill-creator/references/output-patterns.md rename to .agents/skills/skill-creator/references/output-patterns.md diff --git a/.claude/skills/skill-creator/references/workflows.md b/.agents/skills/skill-creator/references/workflows.md similarity index 100% rename from .claude/skills/skill-creator/references/workflows.md rename to .agents/skills/skill-creator/references/workflows.md diff --git a/.claude/skills/skill-creator/scripts/init_skill.py b/.agents/skills/skill-creator/scripts/init_skill.py similarity index 100% rename from .claude/skills/skill-creator/scripts/init_skill.py rename to .agents/skills/skill-creator/scripts/init_skill.py diff --git a/.claude/skills/skill-creator/scripts/package_skill.py b/.agents/skills/skill-creator/scripts/package_skill.py similarity index 100% rename from .claude/skills/skill-creator/scripts/package_skill.py rename to .agents/skills/skill-creator/scripts/package_skill.py diff --git a/.claude/skills/skill-creator/scripts/quick_validate.py b/.agents/skills/skill-creator/scripts/quick_validate.py similarity index 100% rename from .claude/skills/skill-creator/scripts/quick_validate.py rename to .agents/skills/skill-creator/scripts/quick_validate.py diff --git a/.claude/skills/vercel-react-best-practices/AGENTS.md b/.agents/skills/vercel-react-best-practices/AGENTS.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/AGENTS.md rename to .agents/skills/vercel-react-best-practices/AGENTS.md diff --git a/.claude/skills/vercel-react-best-practices/SKILL.md b/.agents/skills/vercel-react-best-practices/SKILL.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/SKILL.md rename to .agents/skills/vercel-react-best-practices/SKILL.md diff --git a/.claude/skills/vercel-react-best-practices/rules/advanced-event-handler-refs.md b/.agents/skills/vercel-react-best-practices/rules/advanced-event-handler-refs.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/advanced-event-handler-refs.md rename to .agents/skills/vercel-react-best-practices/rules/advanced-event-handler-refs.md diff --git a/.claude/skills/vercel-react-best-practices/rules/advanced-use-latest.md b/.agents/skills/vercel-react-best-practices/rules/advanced-use-latest.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/advanced-use-latest.md rename to .agents/skills/vercel-react-best-practices/rules/advanced-use-latest.md diff --git a/.claude/skills/vercel-react-best-practices/rules/async-api-routes.md b/.agents/skills/vercel-react-best-practices/rules/async-api-routes.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/async-api-routes.md rename to .agents/skills/vercel-react-best-practices/rules/async-api-routes.md diff --git a/.claude/skills/vercel-react-best-practices/rules/async-defer-await.md b/.agents/skills/vercel-react-best-practices/rules/async-defer-await.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/async-defer-await.md rename to .agents/skills/vercel-react-best-practices/rules/async-defer-await.md diff --git a/.claude/skills/vercel-react-best-practices/rules/async-dependencies.md b/.agents/skills/vercel-react-best-practices/rules/async-dependencies.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/async-dependencies.md rename to .agents/skills/vercel-react-best-practices/rules/async-dependencies.md diff --git a/.claude/skills/vercel-react-best-practices/rules/async-parallel.md b/.agents/skills/vercel-react-best-practices/rules/async-parallel.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/async-parallel.md rename to .agents/skills/vercel-react-best-practices/rules/async-parallel.md diff --git a/.claude/skills/vercel-react-best-practices/rules/async-suspense-boundaries.md b/.agents/skills/vercel-react-best-practices/rules/async-suspense-boundaries.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/async-suspense-boundaries.md rename to .agents/skills/vercel-react-best-practices/rules/async-suspense-boundaries.md diff --git a/.claude/skills/vercel-react-best-practices/rules/bundle-barrel-imports.md b/.agents/skills/vercel-react-best-practices/rules/bundle-barrel-imports.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/bundle-barrel-imports.md rename to .agents/skills/vercel-react-best-practices/rules/bundle-barrel-imports.md diff --git a/.claude/skills/vercel-react-best-practices/rules/bundle-conditional.md b/.agents/skills/vercel-react-best-practices/rules/bundle-conditional.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/bundle-conditional.md rename to .agents/skills/vercel-react-best-practices/rules/bundle-conditional.md diff --git a/.claude/skills/vercel-react-best-practices/rules/bundle-defer-third-party.md b/.agents/skills/vercel-react-best-practices/rules/bundle-defer-third-party.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/bundle-defer-third-party.md rename to .agents/skills/vercel-react-best-practices/rules/bundle-defer-third-party.md diff --git a/.claude/skills/vercel-react-best-practices/rules/bundle-dynamic-imports.md b/.agents/skills/vercel-react-best-practices/rules/bundle-dynamic-imports.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/bundle-dynamic-imports.md rename to .agents/skills/vercel-react-best-practices/rules/bundle-dynamic-imports.md diff --git a/.claude/skills/vercel-react-best-practices/rules/bundle-preload.md b/.agents/skills/vercel-react-best-practices/rules/bundle-preload.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/bundle-preload.md rename to .agents/skills/vercel-react-best-practices/rules/bundle-preload.md diff --git a/.claude/skills/vercel-react-best-practices/rules/client-event-listeners.md b/.agents/skills/vercel-react-best-practices/rules/client-event-listeners.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/client-event-listeners.md rename to .agents/skills/vercel-react-best-practices/rules/client-event-listeners.md diff --git a/.claude/skills/vercel-react-best-practices/rules/client-localstorage-schema.md b/.agents/skills/vercel-react-best-practices/rules/client-localstorage-schema.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/client-localstorage-schema.md rename to .agents/skills/vercel-react-best-practices/rules/client-localstorage-schema.md diff --git a/.claude/skills/vercel-react-best-practices/rules/client-passive-event-listeners.md b/.agents/skills/vercel-react-best-practices/rules/client-passive-event-listeners.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/client-passive-event-listeners.md rename to .agents/skills/vercel-react-best-practices/rules/client-passive-event-listeners.md diff --git a/.claude/skills/vercel-react-best-practices/rules/client-swr-dedup.md b/.agents/skills/vercel-react-best-practices/rules/client-swr-dedup.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/client-swr-dedup.md rename to .agents/skills/vercel-react-best-practices/rules/client-swr-dedup.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-batch-dom-css.md b/.agents/skills/vercel-react-best-practices/rules/js-batch-dom-css.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-batch-dom-css.md rename to .agents/skills/vercel-react-best-practices/rules/js-batch-dom-css.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-cache-function-results.md b/.agents/skills/vercel-react-best-practices/rules/js-cache-function-results.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-cache-function-results.md rename to .agents/skills/vercel-react-best-practices/rules/js-cache-function-results.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-cache-property-access.md b/.agents/skills/vercel-react-best-practices/rules/js-cache-property-access.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-cache-property-access.md rename to .agents/skills/vercel-react-best-practices/rules/js-cache-property-access.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-cache-storage.md b/.agents/skills/vercel-react-best-practices/rules/js-cache-storage.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-cache-storage.md rename to .agents/skills/vercel-react-best-practices/rules/js-cache-storage.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-combine-iterations.md b/.agents/skills/vercel-react-best-practices/rules/js-combine-iterations.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-combine-iterations.md rename to .agents/skills/vercel-react-best-practices/rules/js-combine-iterations.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-early-exit.md b/.agents/skills/vercel-react-best-practices/rules/js-early-exit.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-early-exit.md rename to .agents/skills/vercel-react-best-practices/rules/js-early-exit.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-hoist-regexp.md b/.agents/skills/vercel-react-best-practices/rules/js-hoist-regexp.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-hoist-regexp.md rename to .agents/skills/vercel-react-best-practices/rules/js-hoist-regexp.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-index-maps.md b/.agents/skills/vercel-react-best-practices/rules/js-index-maps.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-index-maps.md rename to .agents/skills/vercel-react-best-practices/rules/js-index-maps.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-length-check-first.md b/.agents/skills/vercel-react-best-practices/rules/js-length-check-first.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-length-check-first.md rename to .agents/skills/vercel-react-best-practices/rules/js-length-check-first.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-min-max-loop.md b/.agents/skills/vercel-react-best-practices/rules/js-min-max-loop.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-min-max-loop.md rename to .agents/skills/vercel-react-best-practices/rules/js-min-max-loop.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-set-map-lookups.md b/.agents/skills/vercel-react-best-practices/rules/js-set-map-lookups.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-set-map-lookups.md rename to .agents/skills/vercel-react-best-practices/rules/js-set-map-lookups.md diff --git a/.claude/skills/vercel-react-best-practices/rules/js-tosorted-immutable.md b/.agents/skills/vercel-react-best-practices/rules/js-tosorted-immutable.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/js-tosorted-immutable.md rename to .agents/skills/vercel-react-best-practices/rules/js-tosorted-immutable.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rendering-activity.md b/.agents/skills/vercel-react-best-practices/rules/rendering-activity.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rendering-activity.md rename to .agents/skills/vercel-react-best-practices/rules/rendering-activity.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rendering-animate-svg-wrapper.md b/.agents/skills/vercel-react-best-practices/rules/rendering-animate-svg-wrapper.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rendering-animate-svg-wrapper.md rename to .agents/skills/vercel-react-best-practices/rules/rendering-animate-svg-wrapper.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rendering-conditional-render.md b/.agents/skills/vercel-react-best-practices/rules/rendering-conditional-render.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rendering-conditional-render.md rename to .agents/skills/vercel-react-best-practices/rules/rendering-conditional-render.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rendering-content-visibility.md b/.agents/skills/vercel-react-best-practices/rules/rendering-content-visibility.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rendering-content-visibility.md rename to .agents/skills/vercel-react-best-practices/rules/rendering-content-visibility.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rendering-hoist-jsx.md b/.agents/skills/vercel-react-best-practices/rules/rendering-hoist-jsx.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rendering-hoist-jsx.md rename to .agents/skills/vercel-react-best-practices/rules/rendering-hoist-jsx.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rendering-hydration-no-flicker.md b/.agents/skills/vercel-react-best-practices/rules/rendering-hydration-no-flicker.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rendering-hydration-no-flicker.md rename to .agents/skills/vercel-react-best-practices/rules/rendering-hydration-no-flicker.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rendering-svg-precision.md b/.agents/skills/vercel-react-best-practices/rules/rendering-svg-precision.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rendering-svg-precision.md rename to .agents/skills/vercel-react-best-practices/rules/rendering-svg-precision.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rerender-defer-reads.md b/.agents/skills/vercel-react-best-practices/rules/rerender-defer-reads.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rerender-defer-reads.md rename to .agents/skills/vercel-react-best-practices/rules/rerender-defer-reads.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rerender-dependencies.md b/.agents/skills/vercel-react-best-practices/rules/rerender-dependencies.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rerender-dependencies.md rename to .agents/skills/vercel-react-best-practices/rules/rerender-dependencies.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rerender-derived-state.md b/.agents/skills/vercel-react-best-practices/rules/rerender-derived-state.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rerender-derived-state.md rename to .agents/skills/vercel-react-best-practices/rules/rerender-derived-state.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rerender-functional-setstate.md b/.agents/skills/vercel-react-best-practices/rules/rerender-functional-setstate.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rerender-functional-setstate.md rename to .agents/skills/vercel-react-best-practices/rules/rerender-functional-setstate.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rerender-lazy-state-init.md b/.agents/skills/vercel-react-best-practices/rules/rerender-lazy-state-init.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rerender-lazy-state-init.md rename to .agents/skills/vercel-react-best-practices/rules/rerender-lazy-state-init.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rerender-memo.md b/.agents/skills/vercel-react-best-practices/rules/rerender-memo.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rerender-memo.md rename to .agents/skills/vercel-react-best-practices/rules/rerender-memo.md diff --git a/.claude/skills/vercel-react-best-practices/rules/rerender-transitions.md b/.agents/skills/vercel-react-best-practices/rules/rerender-transitions.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/rerender-transitions.md rename to .agents/skills/vercel-react-best-practices/rules/rerender-transitions.md diff --git a/.claude/skills/vercel-react-best-practices/rules/server-after-nonblocking.md b/.agents/skills/vercel-react-best-practices/rules/server-after-nonblocking.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/server-after-nonblocking.md rename to .agents/skills/vercel-react-best-practices/rules/server-after-nonblocking.md diff --git a/.claude/skills/vercel-react-best-practices/rules/server-cache-lru.md b/.agents/skills/vercel-react-best-practices/rules/server-cache-lru.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/server-cache-lru.md rename to .agents/skills/vercel-react-best-practices/rules/server-cache-lru.md diff --git a/.claude/skills/vercel-react-best-practices/rules/server-cache-react.md b/.agents/skills/vercel-react-best-practices/rules/server-cache-react.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/server-cache-react.md rename to .agents/skills/vercel-react-best-practices/rules/server-cache-react.md diff --git a/.claude/skills/vercel-react-best-practices/rules/server-parallel-fetching.md b/.agents/skills/vercel-react-best-practices/rules/server-parallel-fetching.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/server-parallel-fetching.md rename to .agents/skills/vercel-react-best-practices/rules/server-parallel-fetching.md diff --git a/.claude/skills/vercel-react-best-practices/rules/server-serialization.md b/.agents/skills/vercel-react-best-practices/rules/server-serialization.md similarity index 100% rename from .claude/skills/vercel-react-best-practices/rules/server-serialization.md rename to .agents/skills/vercel-react-best-practices/rules/server-serialization.md diff --git a/.agents/skills/web-design-guidelines/SKILL.md b/.agents/skills/web-design-guidelines/SKILL.md new file mode 100644 index 0000000000..ceae92ab31 --- /dev/null +++ b/.agents/skills/web-design-guidelines/SKILL.md @@ -0,0 +1,39 @@ +--- +name: web-design-guidelines +description: Review UI code for Web Interface Guidelines compliance. Use when asked to "review my UI", "check accessibility", "audit design", "review UX", or "check my site against best practices". +metadata: + author: vercel + version: "1.0.0" + argument-hint: +--- + +# Web Interface Guidelines + +Review files for compliance with Web Interface Guidelines. + +## How It Works + +1. Fetch the latest guidelines from the source URL below +2. Read the specified files (or prompt user for files/pattern) +3. Check against all rules in the fetched guidelines +4. Output findings in the terse `file:line` format + +## Guidelines Source + +Fetch fresh guidelines before each review: + +``` +https://raw.githubusercontent.com/vercel-labs/web-interface-guidelines/main/command.md +``` + +Use WebFetch to retrieve the latest rules. The fetched content contains all the rules and output format instructions. + +## Usage + +When a user provides a file or pattern argument: +1. Fetch guidelines from the source URL above +2. Read the specified files +3. Apply all rules from the fetched guidelines +4. Output findings using the format specified in the guidelines + +If no files specified, ask the user which files to review. diff --git a/.claude/skills/component-refactoring b/.claude/skills/component-refactoring new file mode 120000 index 0000000000..53ae67e2f2 --- /dev/null +++ b/.claude/skills/component-refactoring @@ -0,0 +1 @@ +../../.agents/skills/component-refactoring \ No newline at end of file diff --git a/.claude/skills/frontend-code-review b/.claude/skills/frontend-code-review new file mode 120000 index 0000000000..55654ffbd7 --- /dev/null +++ b/.claude/skills/frontend-code-review @@ -0,0 +1 @@ +../../.agents/skills/frontend-code-review \ No newline at end of file diff --git a/.claude/skills/frontend-testing b/.claude/skills/frontend-testing new file mode 120000 index 0000000000..092cec7745 --- /dev/null +++ b/.claude/skills/frontend-testing @@ -0,0 +1 @@ +../../.agents/skills/frontend-testing \ No newline at end of file diff --git a/.claude/skills/orpc-contract-first b/.claude/skills/orpc-contract-first new file mode 120000 index 0000000000..da47b335c7 --- /dev/null +++ b/.claude/skills/orpc-contract-first @@ -0,0 +1 @@ +../../.agents/skills/orpc-contract-first \ No newline at end of file diff --git a/.claude/skills/skill-creator b/.claude/skills/skill-creator new file mode 120000 index 0000000000..b87455490f --- /dev/null +++ b/.claude/skills/skill-creator @@ -0,0 +1 @@ +../../.agents/skills/skill-creator \ No newline at end of file diff --git a/.claude/skills/vercel-react-best-practices b/.claude/skills/vercel-react-best-practices new file mode 120000 index 0000000000..e567923b32 --- /dev/null +++ b/.claude/skills/vercel-react-best-practices @@ -0,0 +1 @@ +../../.agents/skills/vercel-react-best-practices \ No newline at end of file diff --git a/.claude/skills/web-design-guidelines b/.claude/skills/web-design-guidelines new file mode 120000 index 0000000000..886b26ded7 --- /dev/null +++ b/.claude/skills/web-design-guidelines @@ -0,0 +1 @@ +../../.agents/skills/web-design-guidelines \ No newline at end of file diff --git a/.codex/skills b/.codex/skills deleted file mode 120000 index 454b8427cd..0000000000 --- a/.codex/skills +++ /dev/null @@ -1 +0,0 @@ -../.claude/skills \ No newline at end of file diff --git a/.codex/skills/component-refactoring b/.codex/skills/component-refactoring new file mode 120000 index 0000000000..53ae67e2f2 --- /dev/null +++ b/.codex/skills/component-refactoring @@ -0,0 +1 @@ +../../.agents/skills/component-refactoring \ No newline at end of file diff --git a/.codex/skills/frontend-code-review b/.codex/skills/frontend-code-review new file mode 120000 index 0000000000..55654ffbd7 --- /dev/null +++ b/.codex/skills/frontend-code-review @@ -0,0 +1 @@ +../../.agents/skills/frontend-code-review \ No newline at end of file diff --git a/.codex/skills/frontend-testing b/.codex/skills/frontend-testing new file mode 120000 index 0000000000..092cec7745 --- /dev/null +++ b/.codex/skills/frontend-testing @@ -0,0 +1 @@ +../../.agents/skills/frontend-testing \ No newline at end of file diff --git a/.codex/skills/orpc-contract-first b/.codex/skills/orpc-contract-first new file mode 120000 index 0000000000..da47b335c7 --- /dev/null +++ b/.codex/skills/orpc-contract-first @@ -0,0 +1 @@ +../../.agents/skills/orpc-contract-first \ No newline at end of file diff --git a/.codex/skills/skill-creator b/.codex/skills/skill-creator new file mode 120000 index 0000000000..b87455490f --- /dev/null +++ b/.codex/skills/skill-creator @@ -0,0 +1 @@ +../../.agents/skills/skill-creator \ No newline at end of file diff --git a/.codex/skills/vercel-react-best-practices b/.codex/skills/vercel-react-best-practices new file mode 120000 index 0000000000..e567923b32 --- /dev/null +++ b/.codex/skills/vercel-react-best-practices @@ -0,0 +1 @@ +../../.agents/skills/vercel-react-best-practices \ No newline at end of file diff --git a/.codex/skills/web-design-guidelines b/.codex/skills/web-design-guidelines new file mode 120000 index 0000000000..886b26ded7 --- /dev/null +++ b/.codex/skills/web-design-guidelines @@ -0,0 +1 @@ +../../.agents/skills/web-design-guidelines \ No newline at end of file diff --git a/.cursor/skills/component-refactoring b/.cursor/skills/component-refactoring new file mode 120000 index 0000000000..53ae67e2f2 --- /dev/null +++ b/.cursor/skills/component-refactoring @@ -0,0 +1 @@ +../../.agents/skills/component-refactoring \ No newline at end of file diff --git a/.cursor/skills/frontend-code-review b/.cursor/skills/frontend-code-review new file mode 120000 index 0000000000..55654ffbd7 --- /dev/null +++ b/.cursor/skills/frontend-code-review @@ -0,0 +1 @@ +../../.agents/skills/frontend-code-review \ No newline at end of file diff --git a/.cursor/skills/frontend-testing b/.cursor/skills/frontend-testing new file mode 120000 index 0000000000..092cec7745 --- /dev/null +++ b/.cursor/skills/frontend-testing @@ -0,0 +1 @@ +../../.agents/skills/frontend-testing \ No newline at end of file diff --git a/.cursor/skills/orpc-contract-first b/.cursor/skills/orpc-contract-first new file mode 120000 index 0000000000..da47b335c7 --- /dev/null +++ b/.cursor/skills/orpc-contract-first @@ -0,0 +1 @@ +../../.agents/skills/orpc-contract-first \ No newline at end of file diff --git a/.cursor/skills/skill-creator b/.cursor/skills/skill-creator new file mode 120000 index 0000000000..b87455490f --- /dev/null +++ b/.cursor/skills/skill-creator @@ -0,0 +1 @@ +../../.agents/skills/skill-creator \ No newline at end of file diff --git a/.cursor/skills/vercel-react-best-practices b/.cursor/skills/vercel-react-best-practices new file mode 120000 index 0000000000..e567923b32 --- /dev/null +++ b/.cursor/skills/vercel-react-best-practices @@ -0,0 +1 @@ +../../.agents/skills/vercel-react-best-practices \ No newline at end of file diff --git a/.cursor/skills/web-design-guidelines b/.cursor/skills/web-design-guidelines new file mode 120000 index 0000000000..886b26ded7 --- /dev/null +++ b/.cursor/skills/web-design-guidelines @@ -0,0 +1 @@ +../../.agents/skills/web-design-guidelines \ No newline at end of file diff --git a/.gemini/skills/component-refactoring b/.gemini/skills/component-refactoring new file mode 120000 index 0000000000..53ae67e2f2 --- /dev/null +++ b/.gemini/skills/component-refactoring @@ -0,0 +1 @@ +../../.agents/skills/component-refactoring \ No newline at end of file diff --git a/.gemini/skills/frontend-code-review b/.gemini/skills/frontend-code-review new file mode 120000 index 0000000000..55654ffbd7 --- /dev/null +++ b/.gemini/skills/frontend-code-review @@ -0,0 +1 @@ +../../.agents/skills/frontend-code-review \ No newline at end of file diff --git a/.gemini/skills/frontend-testing b/.gemini/skills/frontend-testing new file mode 120000 index 0000000000..092cec7745 --- /dev/null +++ b/.gemini/skills/frontend-testing @@ -0,0 +1 @@ +../../.agents/skills/frontend-testing \ No newline at end of file diff --git a/.gemini/skills/orpc-contract-first b/.gemini/skills/orpc-contract-first new file mode 120000 index 0000000000..da47b335c7 --- /dev/null +++ b/.gemini/skills/orpc-contract-first @@ -0,0 +1 @@ +../../.agents/skills/orpc-contract-first \ No newline at end of file diff --git a/.gemini/skills/skill-creator b/.gemini/skills/skill-creator new file mode 120000 index 0000000000..b87455490f --- /dev/null +++ b/.gemini/skills/skill-creator @@ -0,0 +1 @@ +../../.agents/skills/skill-creator \ No newline at end of file diff --git a/.gemini/skills/vercel-react-best-practices b/.gemini/skills/vercel-react-best-practices new file mode 120000 index 0000000000..e567923b32 --- /dev/null +++ b/.gemini/skills/vercel-react-best-practices @@ -0,0 +1 @@ +../../.agents/skills/vercel-react-best-practices \ No newline at end of file diff --git a/.gemini/skills/web-design-guidelines b/.gemini/skills/web-design-guidelines new file mode 120000 index 0000000000..886b26ded7 --- /dev/null +++ b/.gemini/skills/web-design-guidelines @@ -0,0 +1 @@ +../../.agents/skills/web-design-guidelines \ No newline at end of file diff --git a/.github/skills/component-refactoring b/.github/skills/component-refactoring new file mode 120000 index 0000000000..53ae67e2f2 --- /dev/null +++ b/.github/skills/component-refactoring @@ -0,0 +1 @@ +../../.agents/skills/component-refactoring \ No newline at end of file diff --git a/.github/skills/frontend-code-review b/.github/skills/frontend-code-review new file mode 120000 index 0000000000..55654ffbd7 --- /dev/null +++ b/.github/skills/frontend-code-review @@ -0,0 +1 @@ +../../.agents/skills/frontend-code-review \ No newline at end of file diff --git a/.github/skills/frontend-testing b/.github/skills/frontend-testing new file mode 120000 index 0000000000..092cec7745 --- /dev/null +++ b/.github/skills/frontend-testing @@ -0,0 +1 @@ +../../.agents/skills/frontend-testing \ No newline at end of file diff --git a/.github/skills/orpc-contract-first b/.github/skills/orpc-contract-first new file mode 120000 index 0000000000..da47b335c7 --- /dev/null +++ b/.github/skills/orpc-contract-first @@ -0,0 +1 @@ +../../.agents/skills/orpc-contract-first \ No newline at end of file diff --git a/.github/skills/skill-creator b/.github/skills/skill-creator new file mode 120000 index 0000000000..b87455490f --- /dev/null +++ b/.github/skills/skill-creator @@ -0,0 +1 @@ +../../.agents/skills/skill-creator \ No newline at end of file diff --git a/.github/skills/vercel-react-best-practices b/.github/skills/vercel-react-best-practices new file mode 120000 index 0000000000..e567923b32 --- /dev/null +++ b/.github/skills/vercel-react-best-practices @@ -0,0 +1 @@ +../../.agents/skills/vercel-react-best-practices \ No newline at end of file diff --git a/.github/skills/web-design-guidelines b/.github/skills/web-design-guidelines new file mode 120000 index 0000000000..886b26ded7 --- /dev/null +++ b/.github/skills/web-design-guidelines @@ -0,0 +1 @@ +../../.agents/skills/web-design-guidelines \ No newline at end of file diff --git a/.github/workflows/autofix.yml b/.github/workflows/autofix.yml index ff006324bb..4571fd1cd1 100644 --- a/.github/workflows/autofix.yml +++ b/.github/workflows/autofix.yml @@ -82,6 +82,6 @@ jobs: # mdformat breaks YAML front matter in markdown files. Add --exclude for directories containing YAML front matter. - name: mdformat run: | - uvx --python 3.13 mdformat . --exclude ".claude/skills/**" + uvx --python 3.13 mdformat . --exclude ".agents/skills/**" - uses: autofix-ci/action@635ffb0c9798bd160680f18fd73371e355b85f27 diff --git a/api/.env.example b/api/.env.example index 15981c14b8..c3b1474549 100644 --- a/api/.env.example +++ b/api/.env.example @@ -715,4 +715,5 @@ ANNOTATION_IMPORT_MAX_CONCURRENT=5 SANDBOX_EXPIRED_RECORDS_CLEAN_GRACEFUL_PERIOD=21 SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_SIZE=1000 SANDBOX_EXPIRED_RECORDS_RETENTION_DAYS=30 +SANDBOX_EXPIRED_RECORDS_CLEAN_TASK_LOCK_TTL=90000 diff --git a/api/agent-notes/controllers/console/datasets/datasets_document.py.md b/api/agent-notes/controllers/console/datasets/datasets_document.py.md deleted file mode 100644 index b100249981..0000000000 --- a/api/agent-notes/controllers/console/datasets/datasets_document.py.md +++ /dev/null @@ -1,52 +0,0 @@ -## Purpose - -`api/controllers/console/datasets/datasets_document.py` contains the console (authenticated) APIs for managing dataset documents (list/create/update/delete, processing controls, estimates, etc.). - -## Storage model (uploaded files) - -- For local file uploads into a knowledge base, the binary is stored via `extensions.ext_storage.storage` under the key: - - `upload_files//.` -- File metadata is stored in the `upload_files` table (`UploadFile` model), keyed by `UploadFile.id`. -- Dataset `Document` records reference the uploaded file via: - - `Document.data_source_info.upload_file_id` - -## Download endpoint - -- `GET /datasets//documents//download` - - - Only supported when `Document.data_source_type == "upload_file"`. - - Performs dataset permission + tenant checks via `DocumentResource.get_document(...)`. - - Delegates `Document -> UploadFile` validation and signed URL generation to `DocumentService.get_document_download_url(...)`. - - Applies `cloud_edition_billing_rate_limit_check("knowledge")` to match other KB operations. - - Response body is **only**: `{ "url": "" }`. - -- `POST /datasets//documents/download-zip` - - - Accepts `{ "document_ids": ["..."] }` (upload-file only). - - Returns `application/zip` as a single attachment download. - - Rationale: browsers often block multiple automatic downloads; a ZIP avoids that limitation. - - Applies `cloud_edition_billing_rate_limit_check("knowledge")`. - - Delegates dataset permission checks, document/upload-file validation, and download-name generation to - `DocumentService.prepare_document_batch_download_zip(...)` before streaming the ZIP. - -## Verification plan - -- Upload a document from a local file into a dataset. -- Call the download endpoint and confirm it returns a signed URL. -- Open the URL and confirm: - - Response headers force download (`Content-Disposition`), and - - Downloaded bytes match the uploaded file. -- Select multiple uploaded-file documents and download as ZIP; confirm all selected files exist in the archive. - -## Shared helper - -- `DocumentService.get_document_download_url(document)` resolves the `UploadFile` and signs a download URL. -- `DocumentService.prepare_document_batch_download_zip(...)` performs dataset permission checks, batches - document + upload file lookups, preserves request order, and generates the client-visible ZIP filename. -- Internal helpers now live in `DocumentService` (`_get_upload_file_id_for_upload_file_document(...)`, - `_get_upload_file_for_upload_file_document(...)`, `_get_upload_files_by_document_id_for_zip_download(...)`). -- ZIP packing is handled by `FileService.build_upload_files_zip_tempfile(...)`, which also: - - sanitizes entry names to avoid path traversal, and - - deduplicates names while preserving extensions (e.g., `doc.txt` → `doc (1).txt`). - Streaming the response and deferring cleanup is handled by the route via `send_file(path, ...)` + `ExitStack` + - `response.call_on_close(...)` (the file is deleted when the response is closed). diff --git a/api/agent-notes/services/dataset_service.py.md b/api/agent-notes/services/dataset_service.py.md deleted file mode 100644 index b68ef345f5..0000000000 --- a/api/agent-notes/services/dataset_service.py.md +++ /dev/null @@ -1,18 +0,0 @@ -## Purpose - -`api/services/dataset_service.py` hosts dataset/document service logic used by console and API controllers. - -## Batch document operations - -- Batch document workflows should avoid N+1 database queries by using set-based lookups. -- Tenant checks must be enforced consistently across dataset/document operations. -- `DocumentService.get_documents_by_ids(...)` fetches documents for a dataset using `id.in_(...)`. -- `FileService.get_upload_files_by_ids(...)` performs tenant-scoped batch lookup for `UploadFile` (dedupes ids with `set(...)`). -- `DocumentService.get_document_download_url(...)` and `prepare_document_batch_download_zip(...)` handle - dataset/document permission checks plus `Document -> UploadFile` validation for download endpoints. - -## Verification plan - -- Exercise document list and download endpoints that use the service helpers. -- Confirm batch download uses constant query count for documents + upload files. -- Request a ZIP with a missing document id and confirm a 404 is returned. diff --git a/api/agent-notes/services/file_service.py.md b/api/agent-notes/services/file_service.py.md deleted file mode 100644 index cf394a1c05..0000000000 --- a/api/agent-notes/services/file_service.py.md +++ /dev/null @@ -1,35 +0,0 @@ -## Purpose - -`api/services/file_service.py` owns business logic around `UploadFile` objects: upload validation, storage persistence, -previews/generators, and deletion. - -## Key invariants - -- All storage I/O goes through `extensions.ext_storage.storage`. -- Uploaded file keys follow: `upload_files//.`. -- Upload validation is enforced in `FileService.upload_file(...)` (blocked extensions, size limits, dataset-only types). - -## Batch lookup helpers - -- `FileService.get_upload_files_by_ids(tenant_id, upload_file_ids)` is the canonical tenant-scoped batch loader for - `UploadFile`. - -## Dataset document download helpers - -The dataset document download/ZIP endpoints now delegate “Document → UploadFile” validation and permission checks to -`DocumentService` (`api/services/dataset_service.py`). `FileService` stays focused on generic `UploadFile` operations -(uploading, previews, deletion), plus generic ZIP serving. - -### ZIP serving - -- `FileService.build_upload_files_zip_tempfile(...)` builds a ZIP from `UploadFile` objects and yields a seeked - tempfile **path** so callers can stream it (e.g., `send_file(path, ...)`) without hitting "read of closed file" - issues from file-handle lifecycle during streamed responses. -- Flask `send_file(...)` and the `ExitStack`/`call_on_close(...)` cleanup pattern are handled in the route layer. - -## Verification plan - -- Unit: `api/tests/unit_tests/controllers/console/datasets/test_datasets_document_download.py` - - Verify signed URL generation for upload-file documents and ZIP download behavior for multiple documents. -- Unit: `api/tests/unit_tests/services/test_file_service_zip_and_lookup.py` - - Verify ZIP packing produces a valid, openable archive and preserves file content. diff --git a/api/agent-notes/tests/unit_tests/controllers/console/datasets/test_datasets_document_download.py.md b/api/agent-notes/tests/unit_tests/controllers/console/datasets/test_datasets_document_download.py.md deleted file mode 100644 index 8f78dacde8..0000000000 --- a/api/agent-notes/tests/unit_tests/controllers/console/datasets/test_datasets_document_download.py.md +++ /dev/null @@ -1,28 +0,0 @@ -## Purpose - -Unit tests for the console dataset document download endpoint: - -- `GET /datasets//documents//download` - -## Testing approach - -- Uses `Flask.test_request_context()` and calls the `Resource.get(...)` method directly. -- Monkeypatches console decorators (`login_required`, `setup_required`, rate limit) to no-ops to keep the test focused. -- Mocks: - - `DatasetService.get_dataset` / `check_dataset_permission` - - `DocumentService.get_document` for single-file download tests - - `DocumentService.get_documents_by_ids` + `FileService.get_upload_files_by_ids` for ZIP download tests - - `FileService.get_upload_files_by_ids` for `UploadFile` lookups in single-file tests - - `services.dataset_service.file_helpers.get_signed_file_url` to return a deterministic URL -- Document mocks include `id` fields so batch lookups can map documents by id. - -## Covered cases - -- Success returns `{ "url": "" }` for upload-file documents. -- 404 when document is not `upload_file`. -- 404 when `upload_file_id` is missing. -- 404 when referenced `UploadFile` row does not exist. -- 403 when document tenant does not match current tenant. -- Batch ZIP download returns `application/zip` for upload-file documents. -- Batch ZIP download rejects non-upload-file documents. -- Batch ZIP download uses a random `.zip` attachment name (`download_name`), so tests only assert the suffix. diff --git a/api/agent-notes/tests/unit_tests/services/test_file_service_zip_and_lookup.py.md b/api/agent-notes/tests/unit_tests/services/test_file_service_zip_and_lookup.py.md deleted file mode 100644 index dbcdf26f10..0000000000 --- a/api/agent-notes/tests/unit_tests/services/test_file_service_zip_and_lookup.py.md +++ /dev/null @@ -1,18 +0,0 @@ -## Purpose - -Unit tests for `api/services/file_service.py` helper methods that are not covered by higher-level controller tests. - -## What’s covered - -- `FileService.build_upload_files_zip_tempfile(...)` - - ZIP entry name sanitization (no directory components / traversal) - - name deduplication while preserving extensions - - writing streamed bytes from `storage.load(...)` into ZIP entries - - yields a tempfile path so callers can open/stream the ZIP without holding a live file handle -- `FileService.get_upload_files_by_ids(...)` - - returns `{}` for empty id lists - - returns an id-keyed mapping for non-empty lists - -## Notes - -- These tests intentionally stub `storage.load` and `db.session.scalars(...).all()` to avoid needing a real DB/storage. diff --git a/api/agent_skills/infra.md b/api/agent_skills/infra.md deleted file mode 100644 index bc36c7bf64..0000000000 --- a/api/agent_skills/infra.md +++ /dev/null @@ -1,96 +0,0 @@ -## Configuration - -- Import `configs.dify_config` for every runtime toggle. Do not read environment variables directly. -- Add new settings to the proper mixin inside `configs/` (deployment, feature, middleware, etc.) so they load through `DifyConfig`. -- Remote overrides come from the optional providers in `configs/remote_settings_sources`; keep defaults in code safe when the value is missing. -- Example: logging pulls targets from `extensions/ext_logging.py`, and model provider URLs are assembled in `services/entities/model_provider_entities.py`. - -## Dependencies - -- Runtime dependencies live in `[project].dependencies` inside `pyproject.toml`. Optional clients go into the `storage`, `tools`, or `vdb` groups under `[dependency-groups]`. -- Always pin versions and keep the list alphabetised. Shared tooling (lint, typing, pytest) belongs in the `dev` group. -- When code needs a new package, explain why in the PR and run `uv lock` so the lockfile stays current. - -## Storage & Files - -- Use `extensions.ext_storage.storage` for all blob IO; it already respects the configured backend. -- Convert files for workflows with helpers in `core/file/file_manager.py`; they handle signed URLs and multimodal payloads. -- When writing controller logic, delegate upload quotas and metadata to `services/file_service.py` instead of touching storage directly. -- All outbound HTTP fetches (webhooks, remote files) must go through the SSRF-safe client in `core/helper/ssrf_proxy.py`; it wraps `httpx` with the allow/deny rules configured for the platform. - -## Redis & Shared State - -- Access Redis through `extensions.ext_redis.redis_client`. For locking, reuse `redis_client.lock`. -- Prefer higher-level helpers when available: rate limits use `libs.helper.RateLimiter`, provider metadata uses caches in `core/helper/provider_cache.py`. - -## Models - -- SQLAlchemy models sit in `models/` and inherit from the shared declarative `Base` defined in `models/base.py` (metadata configured via `models/engine.py`). -- `models/__init__.py` exposes grouped aggregates: account/tenant models, app and conversation tables, datasets, providers, workflow runs, triggers, etc. Import from there to avoid deep path churn. -- Follow the DDD boundary: persistence objects live in `models/`, repositories under `repositories/` translate them into domain entities, and services consume those repositories. -- When adding a table, create the model class, register it in `models/__init__.py`, wire a repository if needed, and generate an Alembic migration as described below. - -## Vector Stores - -- Vector client implementations live in `core/rag/datasource/vdb/`, with a common factory in `core/rag/datasource/vdb/vector_factory.py` and enums in `core/rag/datasource/vdb/vector_type.py`. -- Retrieval pipelines call these providers through `core/rag/datasource/retrieval_service.py` and dataset ingestion flows in `services/dataset_service.py`. -- The CLI helper `flask vdb-migrate` orchestrates bulk migrations using routines in `commands.py`; reuse that pattern when adding new backend transitions. -- To add another store, mirror the provider layout, register it with the factory, and include any schema changes in Alembic migrations. - -## Observability & OTEL - -- OpenTelemetry settings live under the observability mixin in `configs/observability`. Toggle exporters and sampling via `dify_config`, not ad-hoc env reads. -- HTTP, Celery, Redis, SQLAlchemy, and httpx instrumentation is initialised in `extensions/ext_app_metrics.py` and `extensions/ext_request_logging.py`; reuse these hooks when adding new workers or entrypoints. -- When creating background tasks or external calls, propagate tracing context with helpers in the existing instrumented clients (e.g. use the shared `httpx` session from `core/helper/http_client_pooling.py`). -- If you add a new external integration, ensure spans and metrics are emitted by wiring the appropriate OTEL instrumentation package in `pyproject.toml` and configuring it in `extensions/`. - -## Ops Integrations - -- Langfuse support and other tracing bridges live under `core/ops/opik_trace`. Config toggles sit in `configs/observability`, while exporters are initialised in the OTEL extensions mentioned above. -- External monitoring services should follow this pattern: keep client code in `core/ops`, expose switches via `dify_config`, and hook initialisation in `extensions/ext_app_metrics.py` or sibling modules. -- Before instrumenting new code paths, check whether existing context helpers (e.g. `extensions/ext_request_logging.py`) already capture the necessary metadata. - -## Controllers, Services, Core - -- Controllers only parse HTTP input and call a service method. Keep business rules in `services/`. -- Services enforce tenant rules, quotas, and orchestration, then call into `core/` engines (workflow execution, tools, LLMs). -- When adding a new endpoint, search for an existing service to extend before introducing a new layer. Example: workflow APIs pipe through `services/workflow_service.py` into `core/workflow`. - -## Plugins, Tools, Providers - -- In Dify a plugin is a tenant-installable bundle that declares one or more providers (tool, model, datasource, trigger, endpoint, agent strategy) plus its resource needs and version metadata. The manifest (`core/plugin/entities/plugin.py`) mirrors what you see in the marketplace documentation. -- Installation, upgrades, and migrations are orchestrated by `services/plugin/plugin_service.py` together with helpers such as `services/plugin/plugin_migration.py`. -- Runtime loading happens through the implementations under `core/plugin/impl/*` (tool/model/datasource/trigger/endpoint/agent). These modules normalise plugin providers so that downstream systems (`core/tools/tool_manager.py`, `services/model_provider_service.py`, `services/trigger/*`) can treat builtin and plugin capabilities the same way. -- For remote execution, plugin daemons (`core/plugin/entities/plugin_daemon.py`, `core/plugin/impl/plugin.py`) manage lifecycle hooks, credential forwarding, and background workers that keep plugin processes in sync with the main application. -- Acquire tool implementations through `core/tools/tool_manager.py`; it resolves builtin, plugin, and workflow-as-tool providers uniformly, injecting the right context (tenant, credentials, runtime config). -- To add a new plugin capability, extend the relevant `core/plugin/entities` schema and register the implementation in the matching `core/plugin/impl` module rather than importing the provider directly. - -## Async Workloads - -see `agent_skills/trigger.md` for more detailed documentation. - -- Enqueue background work through `services/async_workflow_service.py`. It routes jobs to the tiered Celery queues defined in `tasks/`. -- Workers boot from `celery_entrypoint.py` and execute functions in `tasks/workflow_execution_tasks.py`, `tasks/trigger_processing_tasks.py`, etc. -- Scheduled workflows poll from `schedule/workflow_schedule_tasks.py`. Follow the same pattern if you need new periodic jobs. - -## Database & Migrations - -- SQLAlchemy models live under `models/` and map directly to migration files in `migrations/versions`. -- Generate migrations with `uv run --project api flask db revision --autogenerate -m ""`, then review the diff; never hand-edit the database outside Alembic. -- Apply migrations locally using `uv run --project api flask db upgrade`; production deploys expect the same history. -- If you add tenant-scoped data, confirm the upgrade includes tenant filters or defaults consistent with the service logic touching those tables. - -## CLI Commands - -- Maintenance commands from `commands.py` are registered on the Flask CLI. Run them via `uv run --project api flask `. -- Use the built-in `db` commands from Flask-Migrate for schema operations (`flask db upgrade`, `flask db stamp`, etc.). Only fall back to custom helpers if you need their extra behaviour. -- Custom entries such as `flask reset-password`, `flask reset-email`, and `flask vdb-migrate` handle self-hosted account recovery and vector database migrations. -- Before adding a new command, check whether an existing service can be reused and ensure the command guards edition-specific behaviour (many enforce `SELF_HOSTED`). Document any additions in the PR. -- Ruff helpers are run directly with `uv`: `uv run --project api --dev ruff format ./api` for formatting and `uv run --project api --dev ruff check ./api` (add `--fix` if you want automatic fixes). - -## When You Add Features - -- Check for an existing helper or service before writing a new util. -- Uphold tenancy: every service method should receive the tenant ID from controller wrappers such as `controllers/console/wraps.py`. -- Update or create tests alongside behaviour changes (`tests/unit_tests` for fast coverage, `tests/integration_tests` when touching orchestrations). -- Run `uv run --project api --dev ruff check ./api`, `uv run --directory api --dev basedpyright`, and `uv run --project api --dev dev/pytest/pytest_unit_tests.sh` before submitting changes. diff --git a/api/agent_skills/plugin.md b/api/agent_skills/plugin.md deleted file mode 100644 index 954ddd236b..0000000000 --- a/api/agent_skills/plugin.md +++ /dev/null @@ -1 +0,0 @@ -// TBD diff --git a/api/agent_skills/plugin_oauth.md b/api/agent_skills/plugin_oauth.md deleted file mode 100644 index 954ddd236b..0000000000 --- a/api/agent_skills/plugin_oauth.md +++ /dev/null @@ -1 +0,0 @@ -// TBD diff --git a/api/agent_skills/trigger.md b/api/agent_skills/trigger.md deleted file mode 100644 index f4b076332c..0000000000 --- a/api/agent_skills/trigger.md +++ /dev/null @@ -1,53 +0,0 @@ -## Overview - -Trigger is a collection of nodes that we called `Start` nodes, also, the concept of `Start` is the same as `RootNode` in the workflow engine `core/workflow/graph_engine`, On the other hand, `Start` node is the entry point of workflows, every workflow run always starts from a `Start` node. - -## Trigger nodes - -- `UserInput` -- `Trigger Webhook` -- `Trigger Schedule` -- `Trigger Plugin` - -### UserInput - -Before `Trigger` concept is introduced, it's what we called `Start` node, but now, to avoid confusion, it was renamed to `UserInput` node, has a strong relation with `ServiceAPI` in `controllers/service_api/app` - -1. `UserInput` node introduces a list of arguments that need to be provided by the user, finally it will be converted into variables in the workflow variable pool. -1. `ServiceAPI` accept those arguments, and pass through them into `UserInput` node. -1. For its detailed implementation, please refer to `core/workflow/nodes/start` - -### Trigger Webhook - -Inside Webhook Node, Dify provided a UI panel that allows user define a HTTP manifest `core/workflow/nodes/trigger_webhook/entities.py`.`WebhookData`, also, Dify generates a random webhook id for each `Trigger Webhook` node, the implementation was implemented in `core/trigger/utils/endpoint.py`, as you can see, `webhook-debug` is a debug mode for webhook, you may find it in `controllers/trigger/webhook.py`. - -Finally, requests to `webhook` endpoint will be converted into variables in workflow variable pool during workflow execution. - -### Trigger Schedule - -`Trigger Schedule` node is a node that allows user define a schedule to trigger the workflow, detailed manifest is here `core/workflow/nodes/trigger_schedule/entities.py`, we have a poller and executor to handle millions of schedules, see `docker/entrypoint.sh` / `schedule/workflow_schedule_task.py` for help. - -To Achieve this, a `WorkflowSchedulePlan` model was introduced in `models/trigger.py`, and a `events/event_handlers/sync_workflow_schedule_when_app_published.py` was used to sync workflow schedule plans when app is published. - -### Trigger Plugin - -`Trigger Plugin` node allows user define there own distributed trigger plugin, whenever a request was received, Dify forwards it to the plugin and wait for parsed variables from it. - -1. Requests were saved in storage by `services/trigger/trigger_request_service.py`, referenced by `services/trigger/trigger_service.py`.`TriggerService`.`process_endpoint` -1. Plugins accept those requests and parse variables from it, see `core/plugin/impl/trigger.py` for details. - -A `subscription` concept was out here by Dify, it means an endpoint address from Dify was bound to thirdparty webhook service like `Github` `Slack` `Linear` `GoogleDrive` `Gmail` etc. Once a subscription was created, Dify continually receives requests from the platforms and handle them one by one. - -## Worker Pool / Async Task - -All the events that triggered a new workflow run is always in async mode, a unified entrypoint can be found here `services/async_workflow_service.py`.`AsyncWorkflowService`.`trigger_workflow_async`. - -The infrastructure we used is `celery`, we've already configured it in `docker/entrypoint.sh`, and the consumers are in `tasks/async_workflow_tasks.py`, 3 queues were used to handle different tiers of users, `PROFESSIONAL_QUEUE` `TEAM_QUEUE` `SANDBOX_QUEUE`. - -## Debug Strategy - -Dify divided users into 2 groups: builders / end users. - -Builders are the users who create workflows, in this stage, debugging a workflow becomes a critical part of the workflow development process, as the start node in workflows, trigger nodes can `listen` to the events from `WebhookDebug` `Schedule` `Plugin`, debugging process was created in `controllers/console/app/workflow.py`.`DraftWorkflowTriggerNodeApi`. - -A polling process can be considered as combine of few single `poll` operations, each `poll` operation fetches events cached in `Redis`, returns `None` if no event was found, more detailed implemented: `core/trigger/debug/event_bus.py` was used to handle the polling process, and `core/trigger/debug/event_selectors.py` was used to select the event poller based on the trigger type. diff --git a/api/configs/feature/__init__.py b/api/configs/feature/__init__.py index cf71a33fa8..03aff7e6b5 100644 --- a/api/configs/feature/__init__.py +++ b/api/configs/feature/__init__.py @@ -1298,6 +1298,10 @@ class SandboxExpiredRecordsCleanConfig(BaseSettings): description="Retention days for sandbox expired workflow_run records and message records", default=30, ) + SANDBOX_EXPIRED_RECORDS_CLEAN_TASK_LOCK_TTL: PositiveInt = Field( + description="Lock TTL for sandbox expired records clean task in seconds", + default=90000, + ) class FeatureConfig( diff --git a/api/context/flask_app_context.py b/api/context/flask_app_context.py index 4b693cd91f..360be16beb 100644 --- a/api/context/flask_app_context.py +++ b/api/context/flask_app_context.py @@ -9,7 +9,7 @@ from typing import Any, final from flask import Flask, current_app, g -from context import register_context_capturer +from core.workflow.context import register_context_capturer from core.workflow.context.execution_context import ( AppContext, IExecutionContext, diff --git a/api/core/tools/tool_engine.py b/api/core/tools/tool_engine.py index 13fd579e20..3f57a346cd 100644 --- a/api/core/tools/tool_engine.py +++ b/api/core/tools/tool_engine.py @@ -1,5 +1,6 @@ import contextlib import json +import logging from collections.abc import Generator, Iterable from copy import deepcopy from datetime import UTC, datetime @@ -36,6 +37,8 @@ from extensions.ext_database import db from models.enums import CreatorUserRole from models.model import Message, MessageFile +logger = logging.getLogger(__name__) + class ToolEngine: """ @@ -123,25 +126,31 @@ class ToolEngine: # transform tool invoke message to get LLM friendly message return plain_text, message_files, meta except ToolProviderCredentialValidationError as e: + logger.error(e, exc_info=True) error_response = "Please check your tool provider credentials" agent_tool_callback.on_tool_error(e) except (ToolNotFoundError, ToolNotSupportedError, ToolProviderNotFoundError) as e: error_response = f"there is not a tool named {tool.entity.identity.name}" + logger.error(e, exc_info=True) agent_tool_callback.on_tool_error(e) except ToolParameterValidationError as e: error_response = f"tool parameters validation error: {e}, please check your tool parameters" agent_tool_callback.on_tool_error(e) + logger.error(e, exc_info=True) except ToolInvokeError as e: error_response = f"tool invoke error: {e}" agent_tool_callback.on_tool_error(e) + logger.error(e, exc_info=True) except ToolEngineInvokeError as e: meta = e.meta error_response = f"tool invoke error: {meta.error}" agent_tool_callback.on_tool_error(e) + logger.error(e, exc_info=True) return error_response, [], meta except Exception as e: error_response = f"unknown error: {e}" agent_tool_callback.on_tool_error(e) + logger.error(e, exc_info=True) return error_response, [], ToolInvokeMeta.error_instance(error_response) diff --git a/api/core/tools/workflow_as_tool/tool.py b/api/core/tools/workflow_as_tool/tool.py index 283744b43b..9c1ceff145 100644 --- a/api/core/tools/workflow_as_tool/tool.py +++ b/api/core/tools/workflow_as_tool/tool.py @@ -20,7 +20,6 @@ from core.tools.entities.tool_entities import ( ) from core.tools.errors import ToolInvokeError from factories.file_factory import build_from_mapping -from libs.login import current_user from models import Account, Tenant from models.model import App, EndUser from models.workflow import Workflow @@ -28,21 +27,6 @@ from models.workflow import Workflow logger = logging.getLogger(__name__) -def _try_resolve_user_from_request() -> Account | EndUser | None: - """ - Try to resolve user from Flask request context. - - Returns None if not in a request context or if user is not available. - """ - # Note: `current_user` is a LocalProxy. Never compare it with None directly. - # Use _get_current_object() to dereference the proxy - user = getattr(current_user, "_get_current_object", lambda: current_user)() - # Check if we got a valid user object - if user is not None and hasattr(user, "id"): - return user - return None - - class WorkflowTool(Tool): """ Workflow tool. @@ -223,12 +207,6 @@ class WorkflowTool(Tool): Returns: Account | EndUser | None: The resolved user object, or None if resolution fails. """ - # Try to resolve user from request context first - user = _try_resolve_user_from_request() - if user is not None: - return user - - # Fall back to database resolution return self._resolve_user_from_database(user_id=user_id) def _resolve_user_from_database(self, user_id: str) -> Account | EndUser | None: diff --git a/api/core/workflow/context/__init__.py b/api/core/workflow/context/__init__.py index 31e1f2c8d9..1237d6a017 100644 --- a/api/core/workflow/context/__init__.py +++ b/api/core/workflow/context/__init__.py @@ -7,16 +7,28 @@ execution in multi-threaded environments. from core.workflow.context.execution_context import ( AppContext, + ContextProviderNotFoundError, ExecutionContext, IExecutionContext, NullAppContext, capture_current_context, + read_context, + register_context, + register_context_capturer, + reset_context_provider, ) +from core.workflow.context.models import SandboxContext __all__ = [ "AppContext", + "ContextProviderNotFoundError", "ExecutionContext", "IExecutionContext", "NullAppContext", + "SandboxContext", "capture_current_context", + "read_context", + "register_context", + "register_context_capturer", + "reset_context_provider", ] diff --git a/api/core/workflow/context/execution_context.py b/api/core/workflow/context/execution_context.py index 5a4203be93..d951c95d68 100644 --- a/api/core/workflow/context/execution_context.py +++ b/api/core/workflow/context/execution_context.py @@ -4,9 +4,11 @@ Execution Context - Abstracted context management for workflow execution. import contextvars from abc import ABC, abstractmethod -from collections.abc import Generator +from collections.abc import Callable, Generator from contextlib import AbstractContextManager, contextmanager -from typing import Any, Protocol, final, runtime_checkable +from typing import Any, Protocol, TypeVar, final, runtime_checkable + +from pydantic import BaseModel class AppContext(ABC): @@ -204,13 +206,75 @@ class ExecutionContextBuilder: ) +_capturer: Callable[[], IExecutionContext] | None = None + +# Tenant-scoped providers using tuple keys for clarity and constant-time lookup. +# Key mapping: +# (name, tenant_id) -> provider +# - name: namespaced identifier (recommend prefixing, e.g. "workflow.sandbox") +# - tenant_id: tenant identifier string +# Value: +# provider: Callable[[], BaseModel] returning the typed context value +# Type-safety note: +# - This registry cannot enforce that all providers for a given name return the same BaseModel type. +# - Implementors SHOULD provide typed wrappers around register/read (like Go's context best practice), +# e.g. def register_sandbox_ctx(tenant_id: str, p: Callable[[], SandboxContext]) and +# def read_sandbox_ctx(tenant_id: str) -> SandboxContext. +_tenant_context_providers: dict[tuple[str, str], Callable[[], BaseModel]] = {} + +T = TypeVar("T", bound=BaseModel) + + +class ContextProviderNotFoundError(KeyError): + """Raised when a tenant-scoped context provider is missing for a given (name, tenant_id).""" + + pass + + +def register_context_capturer(capturer: Callable[[], IExecutionContext]) -> None: + """Register a single enterable execution context capturer (e.g., Flask).""" + global _capturer + _capturer = capturer + + +def register_context(name: str, tenant_id: str, provider: Callable[[], BaseModel]) -> None: + """Register a tenant-specific provider for a named context. + + Tip: use a namespaced "name" (e.g., "workflow.sandbox") to avoid key collisions. + Consider adding a typed wrapper for this registration in your feature module. + """ + _tenant_context_providers[(name, tenant_id)] = provider + + +def read_context(name: str, *, tenant_id: str) -> BaseModel: + """ + Read a context value for a specific tenant. + + Raises KeyError if the provider for (name, tenant_id) is not registered. + """ + prov = _tenant_context_providers.get((name, tenant_id)) + if prov is None: + raise ContextProviderNotFoundError(f"Context provider '{name}' not registered for tenant '{tenant_id}'") + return prov() + + def capture_current_context() -> IExecutionContext: """ Capture current execution context from the calling environment. - Returns: - IExecutionContext with captured context + If a capturer is registered (e.g., Flask), use it. Otherwise, return a minimal + context with NullAppContext + copy of current contextvars. """ - from context import capture_current_context + if _capturer is None: + return ExecutionContext( + app_context=NullAppContext(), + context_vars=contextvars.copy_context(), + ) + return _capturer() - return capture_current_context() + +def reset_context_provider() -> None: + """Reset the capturer and all tenant-scoped context providers (primarily for tests).""" + global _capturer + _capturer = None + _tenant_context_providers.clear() diff --git a/api/core/workflow/context/models.py b/api/core/workflow/context/models.py new file mode 100644 index 0000000000..af5a4b2614 --- /dev/null +++ b/api/core/workflow/context/models.py @@ -0,0 +1,13 @@ +from __future__ import annotations + +from pydantic import AnyHttpUrl, BaseModel + + +class SandboxContext(BaseModel): + """Typed context for sandbox integration. All fields optional by design.""" + + sandbox_url: AnyHttpUrl | None = None + sandbox_token: str | None = None # optional, if later needed for auth + + +__all__ = ["SandboxContext"] diff --git a/api/migrations/versions/2025_11_06_1603-9e6fa5cbcd80_make_message_annotation_question_not_.py b/api/migrations/versions/2025_11_06_1603-9e6fa5cbcd80_make_message_annotation_question_not_.py new file mode 100644 index 0000000000..624be1d073 --- /dev/null +++ b/api/migrations/versions/2025_11_06_1603-9e6fa5cbcd80_make_message_annotation_question_not_.py @@ -0,0 +1,60 @@ +"""make message annotation question not nullable + +Revision ID: 9e6fa5cbcd80 +Revises: 03f8dcbc611e +Create Date: 2025-11-06 16:03:54.549378 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '9e6fa5cbcd80' +down_revision = '288345cd01d1' +branch_labels = None +depends_on = None + + +def upgrade(): + bind = op.get_bind() + message_annotations = sa.table( + "message_annotations", + sa.column("id", sa.String), + sa.column("message_id", sa.String), + sa.column("question", sa.Text), + ) + messages = sa.table( + "messages", + sa.column("id", sa.String), + sa.column("query", sa.Text), + ) + update_question_from_message = ( + sa.update(message_annotations) + .where( + sa.and_( + message_annotations.c.question.is_(None), + message_annotations.c.message_id.isnot(None), + ) + ) + .values( + question=sa.select(sa.func.coalesce(messages.c.query, "")) + .where(messages.c.id == message_annotations.c.message_id) + .scalar_subquery() + ) + ) + bind.execute(update_question_from_message) + + fill_remaining_questions = ( + sa.update(message_annotations) + .where(message_annotations.c.question.is_(None)) + .values(question="") + ) + bind.execute(fill_remaining_questions) + with op.batch_alter_table('message_annotations', schema=None) as batch_op: + batch_op.alter_column('question', existing_type=sa.TEXT(), nullable=False) + + +def downgrade(): + with op.batch_alter_table('message_annotations', schema=None) as batch_op: + batch_op.alter_column('question', existing_type=sa.TEXT(), nullable=True) diff --git a/api/models/model.py b/api/models/model.py index d6a0aa3bb3..72f2d173cc 100644 --- a/api/models/model.py +++ b/api/models/model.py @@ -1423,7 +1423,7 @@ class MessageAnnotation(Base): app_id: Mapped[str] = mapped_column(StringUUID) conversation_id: Mapped[str | None] = mapped_column(StringUUID, sa.ForeignKey("conversations.id")) message_id: Mapped[str | None] = mapped_column(StringUUID) - question: Mapped[str | None] = mapped_column(LongText, nullable=True) + question: Mapped[str] = mapped_column(LongText, nullable=False) content: Mapped[str] = mapped_column(LongText, nullable=False) hit_count: Mapped[int] = mapped_column(sa.Integer, nullable=False, server_default=sa.text("0")) account_id: Mapped[str] = mapped_column(StringUUID, nullable=False) diff --git a/api/schedule/clean_messages.py b/api/schedule/clean_messages.py index e85bba8823..be5f483b95 100644 --- a/api/schedule/clean_messages.py +++ b/api/schedule/clean_messages.py @@ -2,9 +2,11 @@ import logging import time import click +from redis.exceptions import LockError import app from configs import dify_config +from extensions.ext_redis import redis_client from services.retention.conversation.messages_clean_policy import create_message_clean_policy from services.retention.conversation.messages_clean_service import MessagesCleanService @@ -31,12 +33,16 @@ def clean_messages(): ) # Create and run the cleanup service - service = MessagesCleanService.from_days( - policy=policy, - days=dify_config.SANDBOX_EXPIRED_RECORDS_RETENTION_DAYS, - batch_size=dify_config.SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_SIZE, - ) - stats = service.run() + # lock the task to avoid concurrent execution in case of the future data volume growth + with redis_client.lock( + "retention:clean_messages", timeout=dify_config.SANDBOX_EXPIRED_RECORDS_CLEAN_TASK_LOCK_TTL, blocking=False + ): + service = MessagesCleanService.from_days( + policy=policy, + days=dify_config.SANDBOX_EXPIRED_RECORDS_RETENTION_DAYS, + batch_size=dify_config.SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_SIZE, + ) + stats = service.run() end_at = time.perf_counter() click.echo( @@ -50,6 +56,16 @@ def clean_messages(): fg="green", ) ) + except LockError: + end_at = time.perf_counter() + logger.exception("clean_messages: acquire task lock failed, skip current execution") + click.echo( + click.style( + f"clean_messages: skipped (lock already held) - latency: {end_at - start_at:.2f}s", + fg="yellow", + ) + ) + raise except Exception as e: end_at = time.perf_counter() logger.exception("clean_messages failed") diff --git a/api/schedule/clean_workflow_runs_task.py b/api/schedule/clean_workflow_runs_task.py index 9f5bf8e150..ff45a3ddf2 100644 --- a/api/schedule/clean_workflow_runs_task.py +++ b/api/schedule/clean_workflow_runs_task.py @@ -1,11 +1,16 @@ +import logging from datetime import UTC, datetime import click +from redis.exceptions import LockError import app from configs import dify_config +from extensions.ext_redis import redis_client from services.retention.workflow_run.clear_free_plan_expired_workflow_run_logs import WorkflowRunCleanup +logger = logging.getLogger(__name__) + @app.celery.task(queue="retention") def clean_workflow_runs_task() -> None: @@ -25,19 +30,50 @@ def clean_workflow_runs_task() -> None: start_time = datetime.now(UTC) - WorkflowRunCleanup( - days=dify_config.SANDBOX_EXPIRED_RECORDS_RETENTION_DAYS, - batch_size=dify_config.SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_SIZE, - start_from=None, - end_before=None, - ).run() + try: + # lock the task to avoid concurrent execution in case of the future data volume growth + with redis_client.lock( + "retention:clean_workflow_runs_task", + timeout=dify_config.SANDBOX_EXPIRED_RECORDS_CLEAN_TASK_LOCK_TTL, + blocking=False, + ): + WorkflowRunCleanup( + days=dify_config.SANDBOX_EXPIRED_RECORDS_RETENTION_DAYS, + batch_size=dify_config.SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_SIZE, + start_from=None, + end_before=None, + ).run() - end_time = datetime.now(UTC) - elapsed = end_time - start_time - click.echo( - click.style( - f"Scheduled workflow run cleanup finished. start={start_time.isoformat()} " - f"end={end_time.isoformat()} duration={elapsed}", - fg="green", + end_time = datetime.now(UTC) + elapsed = end_time - start_time + click.echo( + click.style( + f"Scheduled workflow run cleanup finished. start={start_time.isoformat()} " + f"end={end_time.isoformat()} duration={elapsed}", + fg="green", + ) ) - ) + except LockError: + end_time = datetime.now(UTC) + elapsed = end_time - start_time + logger.exception("clean_workflow_runs_task: acquire task lock failed, skip current execution") + click.echo( + click.style( + f"Scheduled workflow run cleanup skipped (lock already held). " + f"start={start_time.isoformat()} end={end_time.isoformat()} duration={elapsed}", + fg="yellow", + ) + ) + raise + except Exception as e: + end_time = datetime.now(UTC) + elapsed = end_time - start_time + logger.exception("clean_workflow_runs_task failed") + click.echo( + click.style( + f"Scheduled workflow run cleanup failed. start={start_time.isoformat()} " + f"end={end_time.isoformat()} duration={elapsed} - {str(e)}", + fg="red", + ) + ) + raise diff --git a/api/services/annotation_service.py b/api/services/annotation_service.py index b73302508a..56e9cc6a00 100644 --- a/api/services/annotation_service.py +++ b/api/services/annotation_service.py @@ -209,8 +209,12 @@ class AppAnnotationService: if not app: raise NotFound("App not found") + question = args.get("question") + if question is None: + raise ValueError("'question' is required") + annotation = MessageAnnotation( - app_id=app.id, content=args["answer"], question=args["question"], account_id=current_user.id + app_id=app.id, content=args["answer"], question=question, account_id=current_user.id ) db.session.add(annotation) db.session.commit() @@ -219,7 +223,7 @@ class AppAnnotationService: if annotation_setting: add_annotation_to_index_task.delay( annotation.id, - args["question"], + question, current_tenant_id, app_id, annotation_setting.collection_binding_id, @@ -244,8 +248,12 @@ class AppAnnotationService: if not annotation: raise NotFound("Annotation not found") + question = args.get("question") + if question is None: + raise ValueError("'question' is required") + annotation.content = args["answer"] - annotation.question = args["question"] + annotation.question = question db.session.commit() # if annotation reply is enabled , add annotation to index diff --git a/api/tests/test_containers_integration_tests/services/test_annotation_service.py b/api/tests/test_containers_integration_tests/services/test_annotation_service.py index 5555400ca6..4f5190e533 100644 --- a/api/tests/test_containers_integration_tests/services/test_annotation_service.py +++ b/api/tests/test_containers_integration_tests/services/test_annotation_service.py @@ -220,6 +220,23 @@ class TestAnnotationService: # Note: In this test, no annotation setting exists, so task should not be called mock_external_service_dependencies["add_task"].delay.assert_not_called() + def test_insert_app_annotation_directly_requires_question( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Question must be provided when inserting annotations directly. + """ + fake = Faker() + app, _ = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + annotation_args = { + "question": None, + "answer": fake.text(max_nb_chars=200), + } + + with pytest.raises(ValueError): + AppAnnotationService.insert_app_annotation_directly(annotation_args, app.id) + def test_insert_app_annotation_directly_app_not_found( self, db_session_with_containers, mock_external_service_dependencies ): diff --git a/api/tests/unit_tests/core/workflow/context/test_execution_context.py b/api/tests/unit_tests/core/workflow/context/test_execution_context.py index 217c39385c..63466cfb5e 100644 --- a/api/tests/unit_tests/core/workflow/context/test_execution_context.py +++ b/api/tests/unit_tests/core/workflow/context/test_execution_context.py @@ -5,6 +5,7 @@ from typing import Any from unittest.mock import MagicMock import pytest +from pydantic import BaseModel from core.workflow.context.execution_context import ( AppContext, @@ -12,6 +13,8 @@ from core.workflow.context.execution_context import ( ExecutionContextBuilder, IExecutionContext, NullAppContext, + read_context, + register_context, ) @@ -256,3 +259,31 @@ class TestCaptureCurrentContext: # Context variables should be captured assert result.context_vars is not None + + +class TestTenantScopedContextRegistry: + def setup_method(self): + from core.workflow.context import reset_context_provider + + reset_context_provider() + + def teardown_method(self): + from core.workflow.context import reset_context_provider + + reset_context_provider() + + def test_tenant_provider_read_ok(self): + class SandboxContext(BaseModel): + base_url: str | None = None + + register_context("workflow.sandbox", "t1", lambda: SandboxContext(base_url="http://t1")) + register_context("workflow.sandbox", "t2", lambda: SandboxContext(base_url="http://t2")) + + assert read_context("workflow.sandbox", tenant_id="t1").base_url == "http://t1" + assert read_context("workflow.sandbox", tenant_id="t2").base_url == "http://t2" + + def test_missing_provider_raises_keyerror(self): + from core.workflow.context import ContextProviderNotFoundError + + with pytest.raises(ContextProviderNotFoundError): + read_context("missing", tenant_id="unknown") diff --git a/docker/.env.example b/docker/.env.example index 627a3a23da..c7246ae11f 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -1518,3 +1518,4 @@ AMPLITUDE_API_KEY= SANDBOX_EXPIRED_RECORDS_CLEAN_GRACEFUL_PERIOD=21 SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_SIZE=1000 SANDBOX_EXPIRED_RECORDS_RETENTION_DAYS=30 +SANDBOX_EXPIRED_RECORDS_CLEAN_TASK_LOCK_TTL=90000 diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index 429667e75f..902ca3103c 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -682,6 +682,7 @@ x-shared-env: &shared-api-worker-env SANDBOX_EXPIRED_RECORDS_CLEAN_GRACEFUL_PERIOD: ${SANDBOX_EXPIRED_RECORDS_CLEAN_GRACEFUL_PERIOD:-21} SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_SIZE: ${SANDBOX_EXPIRED_RECORDS_CLEAN_BATCH_SIZE:-1000} SANDBOX_EXPIRED_RECORDS_RETENTION_DAYS: ${SANDBOX_EXPIRED_RECORDS_RETENTION_DAYS:-30} + SANDBOX_EXPIRED_RECORDS_CLEAN_TASK_LOCK_TTL: ${SANDBOX_EXPIRED_RECORDS_CLEAN_TASK_LOCK_TTL:-90000} services: # Init container to fix permissions diff --git a/web/README.md b/web/README.md index 13780eec6c..9c731a081a 100644 --- a/web/README.md +++ b/web/README.md @@ -138,7 +138,7 @@ This will help you determine the testing strategy. See [web/testing/testing.md]( ## Documentation -Visit to view the full documentation. +Visit to view the full documentation. ## Community diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/card-view.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/card-view.tsx index 81b4f2474e..f07b2932c9 100644 --- a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/card-view.tsx +++ b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/card-view.tsx @@ -5,7 +5,6 @@ import type { BlockEnum } from '@/app/components/workflow/types' import type { UpdateAppSiteCodeResponse } from '@/models/app' import type { App } from '@/types/app' import type { I18nKeysByPrefix } from '@/types/i18n' -import * as React from 'react' import { useCallback, useMemo } from 'react' import { useTranslation } from 'react-i18next' import { useContext } from 'use-context-selector' @@ -17,7 +16,6 @@ import { ToastContext } from '@/app/components/base/toast' import MCPServiceCard from '@/app/components/tools/mcp/mcp-service-card' import { isTriggerNode } from '@/app/components/workflow/types' import { NEED_REFRESH_APP_LIST_KEY } from '@/config' -import { useDocLink } from '@/context/i18n' import { fetchAppDetail, updateAppSiteAccessToken, @@ -36,7 +34,6 @@ export type ICardViewProps = { const CardView: FC = ({ appId, isInPanel, className }) => { const { t } = useTranslation() - const docLink = useDocLink() const { notify } = useContext(ToastContext) const appDetail = useAppStore(state => state.appDetail) const setAppDetail = useAppStore(state => state.setAppDetail) @@ -59,25 +56,13 @@ const CardView: FC = ({ appId, isInPanel, className }) => { const shouldRenderAppCards = !isWorkflowApp || hasTriggerNode === false const disableAppCards = !shouldRenderAppCards - const triggerDocUrl = docLink('/guides/workflow/node/start') const buildTriggerModeMessage = useCallback((featureName: string) => (
{t('overview.disableTooltip.triggerMode', { ns: 'appOverview', feature: featureName })}
- { - event.stopPropagation() - }} - > - {t('overview.appInfo.enableTooltip.learnMore', { ns: 'appOverview' })} -
- ), [t, triggerDocUrl]) + ), [t]) const disableWebAppTooltip = disableAppCards ? buildTriggerModeMessage(t('overview.appInfo.title', { ns: 'appOverview' })) diff --git a/web/app/components/app/configuration/config-prompt/conversation-history/history-panel.spec.tsx b/web/app/components/app/configuration/config-prompt/conversation-history/history-panel.spec.tsx index 60627e12c2..827986f521 100644 --- a/web/app/components/app/configuration/config-prompt/conversation-history/history-panel.spec.tsx +++ b/web/app/components/app/configuration/config-prompt/conversation-history/history-panel.spec.tsx @@ -1,12 +1,6 @@ import { render, screen } from '@testing-library/react' -import * as React from 'react' import HistoryPanel from './history-panel' -const mockDocLink = vi.fn(() => 'doc-link') -vi.mock('@/context/i18n', () => ({ - useDocLink: () => mockDocLink, -})) - vi.mock('@/app/components/app/configuration/base/operation-btn', () => ({ default: ({ onClick }: { onClick: () => void }) => ( , })) +// Mock utils to control threshold and plan limits +vi.mock('../utils', () => ({ + getPlanVectorSpaceLimitMB: (planType: string) => { + // Return 5 for sandbox (threshold) and 100 for team + if (planType === 'sandbox') + return 5 + if (planType === 'team') + return 100 + return 0 + }, +})) + describe('VectorSpaceFull', () => { const planMock = { type: 'team', @@ -52,6 +64,6 @@ describe('VectorSpaceFull', () => { render() expect(screen.getByText('8')).toBeInTheDocument() - expect(screen.getByText('10MB')).toBeInTheDocument() + expect(screen.getByText('100MB')).toBeInTheDocument() }) }) diff --git a/web/app/components/datasets/create/step-three/index.spec.tsx b/web/app/components/datasets/create/step-three/index.spec.tsx index 43b4916778..74c5912a1b 100644 --- a/web/app/components/datasets/create/step-three/index.spec.tsx +++ b/web/app/components/datasets/create/step-three/index.spec.tsx @@ -190,7 +190,7 @@ describe('StepThree', () => { // Assert const link = screen.getByText('datasetPipeline.addDocuments.stepThree.learnMore') - expect(link).toHaveAttribute('href', 'https://docs.dify.ai/en-US/guides/knowledge-base/integrate-knowledge-within-application') + expect(link).toHaveAttribute('href', 'https://docs.dify.ai/en-US/use-dify/knowledge/integrate-knowledge-within-application') expect(link).toHaveAttribute('target', '_blank') expect(link).toHaveAttribute('rel', 'noreferrer noopener') }) diff --git a/web/app/components/datasets/create/step-three/index.tsx b/web/app/components/datasets/create/step-three/index.tsx index ad26711311..5ab21f6302 100644 --- a/web/app/components/datasets/create/step-three/index.tsx +++ b/web/app/components/datasets/create/step-three/index.tsx @@ -87,7 +87,7 @@ const StepThree = ({ datasetId, datasetName, indexingType, creationCache, retrie
{t('stepThree.sideTipTitle', { ns: 'datasetCreation' })}
{t('stepThree.sideTipContent', { ns: 'datasetCreation' })}
= ({ {t('form.retrievalSetting.learnMore', { ns: 'datasetSettings' })} diff --git a/web/app/components/datasets/create/website/watercrawl/index.spec.tsx b/web/app/components/datasets/create/website/watercrawl/index.spec.tsx index e694537895..4bb8267cea 100644 --- a/web/app/components/datasets/create/website/watercrawl/index.spec.tsx +++ b/web/app/components/datasets/create/website/watercrawl/index.spec.tsx @@ -24,6 +24,11 @@ vi.mock('@/context/modal-context', () => ({ }), })) +// Mock i18n context +vi.mock('@/context/i18n', () => ({ + useDocLink: () => (path?: string) => path ? `https://docs.dify.ai/en${path}` : 'https://docs.dify.ai/en/', +})) + // ============================================================================ // Test Data Factories // ============================================================================ diff --git a/web/app/components/datasets/documents/components/documents-header.tsx b/web/app/components/datasets/documents/components/documents-header.tsx index ed97742fdd..490893d43f 100644 --- a/web/app/components/datasets/documents/components/documents-header.tsx +++ b/web/app/components/datasets/documents/components/documents-header.tsx @@ -121,7 +121,7 @@ const DocumentsHeader: FC = ({ className="flex items-center text-text-accent" target="_blank" rel="noopener noreferrer" - href={docLink('/guides/knowledge-base/integrate-knowledge-within-application')} + href={docLink('/use-dify/knowledge/integrate-knowledge-within-application')} > {t('list.learnMore', { ns: 'datasetDocuments' })} diff --git a/web/app/components/datasets/documents/create-from-pipeline/data-source/online-documents/index.tsx b/web/app/components/datasets/documents/create-from-pipeline/data-source/online-documents/index.tsx index 9b0df231bd..4bdaac895b 100644 --- a/web/app/components/datasets/documents/create-from-pipeline/data-source/online-documents/index.tsx +++ b/web/app/components/datasets/documents/create-from-pipeline/data-source/online-documents/index.tsx @@ -138,7 +138,7 @@ const OnlineDocuments = ({
{ render() // Assert - expect(mockDocLink).toHaveBeenCalledWith('/guides/knowledge-base/knowledge-pipeline/authorize-data-source') + expect(mockDocLink).toHaveBeenCalledWith('/use-dify/knowledge/knowledge-pipeline/authorize-data-source') }) }) diff --git a/web/app/components/datasets/documents/create-from-pipeline/data-source/online-drive/index.tsx b/web/app/components/datasets/documents/create-from-pipeline/data-source/online-drive/index.tsx index 508745aaeb..4346a2d0af 100644 --- a/web/app/components/datasets/documents/create-from-pipeline/data-source/online-drive/index.tsx +++ b/web/app/components/datasets/documents/create-from-pipeline/data-source/online-drive/index.tsx @@ -196,7 +196,7 @@ const OnlineDrive = ({
{ // Assert const link = screen.getByRole('link', { name: 'datasetPipeline.addDocuments.stepThree.learnMore' }) - expect(link).toHaveAttribute('href', 'https://docs.dify.ai/en-US/guides/knowledge-base/integrate-knowledge-within-application') + expect(link).toHaveAttribute('href', 'https://docs.dify.ai/en-US/use-dify/knowledge/knowledge-pipeline/authorize-data-source') expect(link).toHaveAttribute('target', '_blank') expect(link).toHaveAttribute('rel', 'noreferrer noopener') }) diff --git a/web/app/components/datasets/documents/create-from-pipeline/processing/index.tsx b/web/app/components/datasets/documents/create-from-pipeline/processing/index.tsx index 97c8937442..283600fa69 100644 --- a/web/app/components/datasets/documents/create-from-pipeline/processing/index.tsx +++ b/web/app/components/datasets/documents/create-from-pipeline/processing/index.tsx @@ -44,7 +44,7 @@ const Processing = ({
{t('stepThree.sideTipTitle', { ns: 'datasetCreation' })}
{t('stepThree.sideTipContent', { ns: 'datasetCreation' })}
= React.memo(({ {variable === 'endpoint' && ( { render() const docLink = screen.getByText('dataset.externalAPIPanelDocumentation') expect(docLink).toBeInTheDocument() - expect(docLink.closest('a')).toHaveAttribute('href', 'https://docs.example.com/guides/knowledge-base/connect-external-knowledge-base') + expect(docLink.closest('a')).toHaveAttribute('href', 'https://docs.example.com/use-dify/knowledge/connect-external-knowledge-base') }) it('should render create button', () => { diff --git a/web/app/components/datasets/external-api/external-api-panel/index.tsx b/web/app/components/datasets/external-api/external-api-panel/index.tsx index a137348626..c37ff20ba7 100644 --- a/web/app/components/datasets/external-api/external-api-panel/index.tsx +++ b/web/app/components/datasets/external-api/external-api-panel/index.tsx @@ -54,7 +54,7 @@ const ExternalAPIPanel: React.FC = ({ onClose }) => {
{t('externalAPIPanelDescription', { ns: 'dataset' })}
diff --git a/web/app/components/datasets/external-knowledge-base/create/InfoPanel.tsx b/web/app/components/datasets/external-knowledge-base/create/InfoPanel.tsx index beb6a3cf71..61b37a0a1d 100644 --- a/web/app/components/datasets/external-knowledge-base/create/InfoPanel.tsx +++ b/web/app/components/datasets/external-knowledge-base/create/InfoPanel.tsx @@ -18,14 +18,14 @@ const InfoPanel = () => { {t('connectDatasetIntro.content.front', { ns: 'dataset' })} - + {t('connectDatasetIntro.content.link', { ns: 'dataset' })} {t('connectDatasetIntro.content.end', { ns: 'dataset' })} diff --git a/web/app/components/datasets/external-knowledge-base/create/index.spec.tsx b/web/app/components/datasets/external-knowledge-base/create/index.spec.tsx index 2fce096cd5..d56833fd36 100644 --- a/web/app/components/datasets/external-knowledge-base/create/index.spec.tsx +++ b/web/app/components/datasets/external-knowledge-base/create/index.spec.tsx @@ -146,7 +146,7 @@ describe('ExternalKnowledgeBaseCreate', () => { renderComponent() const docLink = screen.getByText('dataset.connectHelper.helper4') - expect(docLink).toHaveAttribute('href', 'https://docs.dify.ai/en/guides/knowledge-base/connect-external-knowledge-base') + expect(docLink).toHaveAttribute('href', 'https://docs.dify.ai/en/use-dify/knowledge/connect-external-knowledge-base') expect(docLink).toHaveAttribute('target', '_blank') expect(docLink).toHaveAttribute('rel', 'noopener noreferrer') }) diff --git a/web/app/components/datasets/external-knowledge-base/create/index.tsx b/web/app/components/datasets/external-knowledge-base/create/index.tsx index 1d17b23b43..07b6e71fa6 100644 --- a/web/app/components/datasets/external-knowledge-base/create/index.tsx +++ b/web/app/components/datasets/external-knowledge-base/create/index.tsx @@ -61,7 +61,7 @@ const ExternalKnowledgeBaseCreate: React.FC = {t('connectHelper.helper1', { ns: 'dataset' })} {t('connectHelper.helper2', { ns: 'dataset' })} {t('connectHelper.helper3', { ns: 'dataset' })} - + {t('connectHelper.helper4', { ns: 'dataset' })} diff --git a/web/app/components/datasets/hit-testing/modify-retrieval-modal.tsx b/web/app/components/datasets/hit-testing/modify-retrieval-modal.tsx index d21297fc93..a942c402ed 100644 --- a/web/app/components/datasets/hit-testing/modify-retrieval-modal.tsx +++ b/web/app/components/datasets/hit-testing/modify-retrieval-modal.tsx @@ -96,10 +96,7 @@ const ModifyRetrievalModal: FC = ({ {t('form.retrievalSetting.learnMore', { ns: 'datasetSettings' })} diff --git a/web/app/components/datasets/no-linked-apps-panel.tsx b/web/app/components/datasets/no-linked-apps-panel.tsx index 1b0357bc6a..12e87a7379 100644 --- a/web/app/components/datasets/no-linked-apps-panel.tsx +++ b/web/app/components/datasets/no-linked-apps-panel.tsx @@ -15,7 +15,7 @@ const NoLinkedAppsPanel = () => {
{t('datasetMenus.emptyTip', { ns: 'common' })}
diff --git a/web/app/components/datasets/settings/form/index.tsx b/web/app/components/datasets/settings/form/index.tsx index 32dde39e97..a80efff95f 100644 --- a/web/app/components/datasets/settings/form/index.tsx +++ b/web/app/components/datasets/settings/form/index.tsx @@ -289,7 +289,7 @@ const Form = () => { {t('form.chunkStructure.learnMore', { ns: 'datasetSettings' })} @@ -446,10 +446,7 @@ const Form = () => { {t('form.retrievalSetting.learnMore', { ns: 'datasetSettings' })} diff --git a/web/app/components/header/account-dropdown/index.tsx b/web/app/components/header/account-dropdown/index.tsx index e16c00acd0..07dd0fca3d 100644 --- a/web/app/components/header/account-dropdown/index.tsx +++ b/web/app/components/header/account-dropdown/index.tsx @@ -137,7 +137,7 @@ export default function AppSelector() { diff --git a/web/app/components/header/account-setting/api-based-extension-page/empty.tsx b/web/app/components/header/account-setting/api-based-extension-page/empty.tsx index 38525993fa..d75e66f8d0 100644 --- a/web/app/components/header/account-setting/api-based-extension-page/empty.tsx +++ b/web/app/components/header/account-setting/api-based-extension-page/empty.tsx @@ -17,7 +17,7 @@ const Empty = () => {
{t('apiBasedExtension.title', { ns: 'common' })}
diff --git a/web/app/components/header/account-setting/api-based-extension-page/modal.tsx b/web/app/components/header/account-setting/api-based-extension-page/modal.tsx index d3146d7baa..f35986dbb0 100644 --- a/web/app/components/header/account-setting/api-based-extension-page/modal.tsx +++ b/web/app/components/header/account-setting/api-based-extension-page/modal.tsx @@ -102,7 +102,7 @@ const ApiBasedExtensionModal: FC = ({
{t('detailPanel.endpointsTip', { ns: 'plugin' })}
diff --git a/web/app/components/plugins/plugin-page/debug-info.tsx b/web/app/components/plugins/plugin-page/debug-info.tsx index f62f8a4134..f3eed424f4 100644 --- a/web/app/components/plugins/plugin-page/debug-info.tsx +++ b/web/app/components/plugins/plugin-page/debug-info.tsx @@ -8,8 +8,7 @@ import * as React from 'react' import { useTranslation } from 'react-i18next' import Button from '@/app/components/base/button' import Tooltip from '@/app/components/base/tooltip' -import { getDocsUrl } from '@/app/components/plugins/utils' -import { useLocale } from '@/context/i18n' +import { useDocLink } from '@/context/i18n' import { useDebugKey } from '@/service/use-plugins' import KeyValueItem from '../base/key-value-item' @@ -17,7 +16,7 @@ const i18nPrefix = 'debugInfo' const DebugInfo: FC = () => { const { t } = useTranslation() - const locale = useLocale() + const docLink = useDocLink() const { data: info, isLoading } = useDebugKey() // info.key likes 4580bdb7-b878-471c-a8a4-bfd760263a53 mask the middle part using *. @@ -34,7 +33,7 @@ const DebugInfo: FC = () => { <>
{t(`${i18nPrefix}.title`, { ns: 'plugin' })} - + {t(`${i18nPrefix}.viewDocs`, { ns: 'plugin' })} diff --git a/web/app/components/plugins/plugin-page/index.spec.tsx b/web/app/components/plugins/plugin-page/index.spec.tsx index a3ea7f7125..9b7ada2a87 100644 --- a/web/app/components/plugins/plugin-page/index.spec.tsx +++ b/web/app/components/plugins/plugin-page/index.spec.tsx @@ -24,6 +24,7 @@ vi.mock('@/hooks/use-document-title', () => ({ vi.mock('@/context/i18n', () => ({ useLocale: () => 'en-US', + useDocLink: () => (path: string) => `https://docs.example.com${path}`, })) vi.mock('@/context/global-public-context', () => ({ diff --git a/web/app/components/plugins/plugin-page/index.tsx b/web/app/components/plugins/plugin-page/index.tsx index d852e4d0b8..efb665197a 100644 --- a/web/app/components/plugins/plugin-page/index.tsx +++ b/web/app/components/plugins/plugin-page/index.tsx @@ -15,10 +15,9 @@ import Button from '@/app/components/base/button' import TabSlider from '@/app/components/base/tab-slider' import Tooltip from '@/app/components/base/tooltip' import ReferenceSettingModal from '@/app/components/plugins/reference-setting-modal' -import { getDocsUrl } from '@/app/components/plugins/utils' import { MARKETPLACE_API_PREFIX, SUPPORT_INSTALL_LOCAL_FILE_EXTENSIONS } from '@/config' import { useGlobalPublicStore } from '@/context/global-public-context' -import { useLocale } from '@/context/i18n' +import { useDocLink } from '@/context/i18n' import useDocumentTitle from '@/hooks/use-document-title' import { usePluginInstallation } from '@/hooks/use-query-params' import { fetchBundleInfoFromMarketPlace, fetchManifestFromMarketPlace } from '@/service/plugins' @@ -47,7 +46,7 @@ const PluginPage = ({ marketplace, }: PluginPageProps) => { const { t } = useTranslation() - const locale = useLocale() + const docLink = useDocLink() useDocumentTitle(t('metadata.title', { ns: 'plugin' })) // Use nuqs hook for installation state @@ -175,7 +174,7 @@ const PluginPage = ({
window.open(docLink('/guides/workflow/node/user-input'), '_blank')} + onClick={() => window.open(docLink('/use-dify/nodes/user-input'), '_blank')} > {t('overview.appInfo.enableTooltip.learnMore', { ns: 'appOverview' })}
diff --git a/web/app/components/tools/provider/custom-create-card.tsx b/web/app/components/tools/provider/custom-create-card.tsx index 637d17c3c3..bf86a1f833 100644 --- a/web/app/components/tools/provider/custom-create-card.tsx +++ b/web/app/components/tools/provider/custom-create-card.tsx @@ -2,16 +2,12 @@ import type { CustomCollectionBackend } from '../types' import { RiAddCircleFill, - RiArrowRightUpLine, - RiBookOpenLine, } from '@remixicon/react' -import { useMemo, useState } from 'react' +import { useState } from 'react' import { useTranslation } from 'react-i18next' import Toast from '@/app/components/base/toast' import EditCustomToolModal from '@/app/components/tools/edit-custom-collection-modal' import { useAppContext } from '@/context/app-context' -import { useDocLink, useLocale } from '@/context/i18n' -import { getLanguage } from '@/i18n-config/language' import { createCustomCollection } from '@/service/tools' type Props = { @@ -20,17 +16,8 @@ type Props = { const Contribute = ({ onRefreshData }: Props) => { const { t } = useTranslation() - const locale = useLocale() - const language = getLanguage(locale) const { isCurrentWorkspaceManager } = useAppContext() - const docLink = useDocLink() - const linkUrl = useMemo(() => { - return docLink('/guides/tools#how-to-create-custom-tools', { - 'zh-Hans': '/guides/tools#ru-he-chuang-jian-zi-ding-yi-gong-ju', - }) - }, [language]) - const [isShowEditCollectionToolModal, setIsShowEditCustomCollectionModal] = useState(false) const doCreateCustomToolCollection = async (data: CustomCollectionBackend) => { await createCustomCollection(data) @@ -54,13 +41,6 @@ const Contribute = ({ onRefreshData }: Props) => {
{t('createCustomTool', { ns: 'tools' })}
- )} {isShowEditCollectionToolModal && ( diff --git a/web/app/components/workflow-app/components/workflow-onboarding-modal/index.spec.tsx b/web/app/components/workflow-app/components/workflow-onboarding-modal/index.spec.tsx index 6dac82a642..e6767a802b 100644 --- a/web/app/components/workflow-app/components/workflow-onboarding-modal/index.spec.tsx +++ b/web/app/components/workflow-app/components/workflow-onboarding-modal/index.spec.tsx @@ -126,18 +126,6 @@ describe('WorkflowOnboardingModal', () => { expect(descriptionDiv).toHaveTextContent('workflow.onboarding.aboutStartNode') }) - it('should render learn more link', () => { - // Arrange & Act - renderComponent() - - // Assert - const learnMoreLink = screen.getByText('workflow.onboarding.learnMore') - expect(learnMoreLink).toBeInTheDocument() - expect(learnMoreLink.closest('a')).toHaveAttribute('href', 'https://docs.example.com/guides/workflow/node/start') - expect(learnMoreLink.closest('a')).toHaveAttribute('target', '_blank') - expect(learnMoreLink.closest('a')).toHaveAttribute('rel', 'noopener noreferrer') - }) - it('should render StartNodeSelectionPanel', () => { // Arrange & Act renderComponent() @@ -547,16 +535,6 @@ describe('WorkflowOnboardingModal', () => { expect(heading).toHaveTextContent('workflow.onboarding.title') }) - it('should have external link with proper attributes', () => { - // Arrange & Act - renderComponent() - - // Assert - const link = screen.getByText('workflow.onboarding.learnMore').closest('a') - expect(link).toHaveAttribute('target', '_blank') - expect(link).toHaveAttribute('rel', 'noopener noreferrer') - }) - it('should have keyboard navigation support via ESC key', () => { // Arrange renderComponent({ isShow: true }) @@ -595,16 +573,6 @@ describe('WorkflowOnboardingModal', () => { const title = screen.getByText('workflow.onboarding.title') expect(title).toHaveClass('text-text-primary') }) - - it('should have underlined learn more link', () => { - // Arrange & Act - renderComponent() - - // Assert - const link = screen.getByText('workflow.onboarding.learnMore').closest('a') - expect(link).toHaveClass('underline') - expect(link).toHaveClass('cursor-pointer') - }) }) // Integration Tests @@ -654,9 +622,6 @@ describe('WorkflowOnboardingModal', () => { const heading = container.querySelector('h3') expect(heading).toBeInTheDocument() - // Assert - Description with link - expect(screen.getByText('workflow.onboarding.learnMore').closest('a')).toBeInTheDocument() - // Assert - Selection panel expect(screen.getByTestId('start-node-selection-panel')).toBeInTheDocument() diff --git a/web/app/components/workflow-app/components/workflow-onboarding-modal/index.tsx b/web/app/components/workflow-app/components/workflow-onboarding-modal/index.tsx index 0f92982cf2..0faf43bfd1 100644 --- a/web/app/components/workflow-app/components/workflow-onboarding-modal/index.tsx +++ b/web/app/components/workflow-app/components/workflow-onboarding-modal/index.tsx @@ -8,7 +8,6 @@ import { import { useTranslation } from 'react-i18next' import Modal from '@/app/components/base/modal' import { BlockEnum } from '@/app/components/workflow/types' -import { useDocLink } from '@/context/i18n' import StartNodeSelectionPanel from './start-node-selection-panel' type WorkflowOnboardingModalProps = { @@ -23,7 +22,6 @@ const WorkflowOnboardingModal: FC = ({ onSelectStartNode, }) => { const { t } = useTranslation() - const docLink = useDocLink() const handleSelectUserInput = useCallback(() => { onSelectStartNode(BlockEnum.Start) @@ -63,15 +61,6 @@ const WorkflowOnboardingModal: FC = ({
{t('onboarding.description', { ns: 'workflow' })} {' '} - - {t('onboarding.learnMore', { ns: 'workflow' })} - - {' '} {t('onboarding.aboutStartNode', { ns: 'workflow' })}
diff --git a/web/app/components/workflow-app/hooks/use-available-nodes-meta-data.ts b/web/app/components/workflow-app/hooks/use-available-nodes-meta-data.ts index 60f0bf3b28..0c5c1e4a40 100644 --- a/web/app/components/workflow-app/hooks/use-available-nodes-meta-data.ts +++ b/web/app/components/workflow-app/hooks/use-available-nodes-meta-data.ts @@ -1,4 +1,5 @@ import type { AvailableNodesMetaData } from '@/app/components/workflow/hooks-store/store' +import type { DocPathWithoutLang } from '@/types/doc-paths' import { useMemo } from 'react' import { useTranslation } from 'react-i18next' import { WORKFLOW_COMMON_NODES } from '@/app/components/workflow/constants/node' @@ -44,7 +45,7 @@ export const useAvailableNodesMetaData = () => { const { metaData } = node const title = t(`blocks.${metaData.type}`, { ns: 'workflow' }) const description = t(`blocksAbout.${metaData.type}`, { ns: 'workflow' }) - const helpLinkPath = `guides/workflow/node/${metaData.helpLinkUri}` + const helpLinkPath = `/use-dify/nodes/${metaData.helpLinkUri}` as DocPathWithoutLang return { ...node, metaData: { diff --git a/web/app/components/workflow/index.tsx b/web/app/components/workflow/index.tsx index 1543bce714..62516a797d 100644 --- a/web/app/components/workflow/index.tsx +++ b/web/app/components/workflow/index.tsx @@ -95,6 +95,7 @@ import { import SyncingDataModal from './syncing-data-modal' import { ControlMode, + WorkflowRunningStatus, } from './types' import { setupScrollToNodeListener } from './utils/node-navigation' import { WorkflowHistoryProvider } from './workflow-history-store' @@ -231,11 +232,20 @@ export const Workflow: FC = memo(({ const { handleRefreshWorkflowDraft } = useWorkflowRefreshDraft() const handleSyncWorkflowDraftWhenPageClose = useCallback(() => { - if (document.visibilityState === 'hidden') + if (document.visibilityState === 'hidden') { syncWorkflowDraftWhenPageClose() + return + } + + if (document.visibilityState === 'visible') { + const { isListening, workflowRunningData } = workflowStore.getState() + const status = workflowRunningData?.result?.status + // Avoid resetting UI state when user comes back while a run is active or listening for triggers + if (isListening || status === WorkflowRunningStatus.Running) + return - else if (document.visibilityState === 'visible') setTimeout(() => handleRefreshWorkflowDraft(), 500) + } }, [syncWorkflowDraftWhenPageClose, handleRefreshWorkflowDraft, workflowStore]) // Also add beforeunload handler as additional safety net for tab close diff --git a/web/app/components/workflow/nodes/_base/components/agent-strategy.tsx b/web/app/components/workflow/nodes/_base/components/agent-strategy.tsx index 8303681d90..42be3d46e4 100644 --- a/web/app/components/workflow/nodes/_base/components/agent-strategy.tsx +++ b/web/app/components/workflow/nodes/_base/components/agent-strategy.tsx @@ -251,10 +251,7 @@ export const AgentStrategy = memo((props: AgentStrategyProps) => { {' '}
diff --git a/web/app/components/workflow/nodes/_base/components/error-handle/default-value.tsx b/web/app/components/workflow/nodes/_base/components/error-handle/default-value.tsx index 538dce09d0..080fa0f107 100644 --- a/web/app/components/workflow/nodes/_base/components/error-handle/default-value.tsx +++ b/web/app/components/workflow/nodes/_base/components/error-handle/default-value.tsx @@ -5,7 +5,6 @@ import Input from '@/app/components/base/input' import CodeEditor from '@/app/components/workflow/nodes/_base/components/editor/code-editor' import { CodeLanguage } from '@/app/components/workflow/nodes/code/types' import { VarType } from '@/app/components/workflow/types' -import { useDocLink } from '@/context/i18n' type DefaultValueProps = { forms: DefaultValueForm[] @@ -16,7 +15,6 @@ const DefaultValue = ({ onFormChange, }: DefaultValueProps) => { const { t } = useTranslation() - const docLink = useDocLink() const getFormChangeHandler = useCallback(({ key, type }: DefaultValueForm) => { return (payload: any) => { let value @@ -35,15 +33,6 @@ const DefaultValue = ({
{t('nodes.common.errorHandle.defaultValue.desc', { ns: 'workflow' })}   - - {t('common.learnMore', { ns: 'workflow' })} -
{ diff --git a/web/app/components/workflow/nodes/_base/components/error-handle/fail-branch-card.tsx b/web/app/components/workflow/nodes/_base/components/error-handle/fail-branch-card.tsx index fe267f52c4..49cd44160c 100644 --- a/web/app/components/workflow/nodes/_base/components/error-handle/fail-branch-card.tsx +++ b/web/app/components/workflow/nodes/_base/components/error-handle/fail-branch-card.tsx @@ -19,7 +19,7 @@ const FailBranchCard = () => { {t('nodes.common.errorHandle.failBranch.customizeTip', { ns: 'workflow' })}   diff --git a/web/app/components/workflow/nodes/_base/components/variable/var-reference-popup.tsx b/web/app/components/workflow/nodes/_base/components/variable/var-reference-popup.tsx index 6184bcad9f..26f10b7a1d 100644 --- a/web/app/components/workflow/nodes/_base/components/variable/var-reference-popup.tsx +++ b/web/app/components/workflow/nodes/_base/components/variable/var-reference-popup.tsx @@ -6,7 +6,6 @@ import { useMemo } from 'react' import { useTranslation } from 'react-i18next' import ListEmpty from '@/app/components/base/list-empty' import { useStore } from '@/app/components/workflow/store' -import { useDocLink } from '@/context/i18n' import VarReferenceVars from './var-reference-vars' type Props = { @@ -31,7 +30,7 @@ const VarReferencePopup: FC = ({ const pipelineId = useStore(s => s.pipelineId) const showManageRagInputFields = useMemo(() => !!pipelineId, [pipelineId]) const setShowInputFieldPanel = useStore(s => s.setShowInputFieldPanel) - const docLink = useDocLink() + // max-h-[300px] overflow-y-auto todo: use portal to handle long list return (
= ({ description={( )} /> diff --git a/web/app/components/workflow/nodes/knowledge-base/components/chunk-structure/instruction/index.tsx b/web/app/components/workflow/nodes/knowledge-base/components/chunk-structure/instruction/index.tsx index 77981639cd..73e87ec12b 100644 --- a/web/app/components/workflow/nodes/knowledge-base/components/chunk-structure/instruction/index.tsx +++ b/web/app/components/workflow/nodes/knowledge-base/components/chunk-structure/instruction/index.tsx @@ -31,7 +31,7 @@ const Instruction = ({

{t('nodes.knowledgeBase.chunkStructureTip.message', { ns: 'workflow' })}

{ const { t } = useTranslation() + const docLink = useDocLink() const { options, hybridSearchModeOptions, @@ -61,7 +63,7 @@ const RetrievalSetting = ({ title: t('form.retrievalSetting.title', { ns: 'datasetSettings' }), subTitle: ( diff --git a/web/app/components/workflow/nodes/llm/components/json-schema-config-modal/json-schema-config.tsx b/web/app/components/workflow/nodes/llm/components/json-schema-config-modal/json-schema-config.tsx index e7ac493bd2..b4dac4b58e 100644 --- a/web/app/components/workflow/nodes/llm/components/json-schema-config-modal/json-schema-config.tsx +++ b/web/app/components/workflow/nodes/llm/components/json-schema-config-modal/json-schema-config.tsx @@ -1,14 +1,12 @@ import type { FC } from 'react' import type { SchemaRoot } from '../../types' -import { RiBracesLine, RiCloseLine, RiExternalLinkLine, RiTimelineView } from '@remixicon/react' -import * as React from 'react' +import { RiBracesLine, RiCloseLine, RiTimelineView } from '@remixicon/react' import { useCallback, useState } from 'react' import { useTranslation } from 'react-i18next' import Button from '@/app/components/base/button' import Divider from '@/app/components/base/divider' import Toast from '@/app/components/base/toast' import { JSON_SCHEMA_MAX_DEPTH } from '@/config' -import { useDocLink } from '@/context/i18n' import { SegmentedControl } from '../../../../../base/segmented-control' import { Type } from '../../types' import { @@ -55,7 +53,6 @@ const JsonSchemaConfig: FC = ({ onClose, }) => { const { t } = useTranslation() - const docLink = useDocLink() const [currentTab, setCurrentTab] = useState(SchemaView.VisualEditor) const [jsonSchema, setJsonSchema] = useState(defaultSchema || DEFAULT_SCHEMA) const [json, setJson] = useState(() => JSON.stringify(jsonSchema, null, 2)) @@ -253,15 +250,6 @@ const JsonSchemaConfig: FC = ({
{/* Footer */}
- - {t('nodes.llm.jsonSchema.doc', { ns: 'workflow' })} - -